[lmfit-py] 02/08: Imported Upstream version 0.8.0~rc2+dfsg.1

Frédéric-Emmanuel Picca picca at moszumanska.debian.org
Sun Aug 31 12:33:34 UTC 2014


This is an automated email from the git hooks/post-receive script.

picca pushed a commit to branch master
in repository lmfit-py.

commit ea3665143e82ab188e13521d2ad4686c80a1c0f2
Author: Picca Frédéric-Emmanuel <picca at debian.org>
Date:   Sun Aug 31 10:37:02 2014 +0200

    Imported Upstream version 0.8.0~rc2+dfsg.1
---
 .travis.yml                                        |  58 +-
 THANKS.txt                                         |  12 +-
 doc/Makefile                                       |  22 +-
 doc/_images/conf_interval1.png                     | Bin 0 -> 21741 bytes
 doc/_images/conf_interval1a.png                    | Bin 0 -> 19853 bytes
 doc/_images/conf_interval2.png                     | Bin 0 -> 16793 bytes
 doc/_images/model_eval.png                         | Bin 0 -> 21967 bytes
 doc/_images/model_fit1.png                         | Bin 0 -> 29579 bytes
 doc/_images/model_fit2.png                         | Bin 0 -> 33474 bytes
 doc/_images/models_doc1.png                        | Bin 0 -> 32292 bytes
 doc/_images/models_doc2.png                        | Bin 0 -> 33112 bytes
 doc/_images/models_nistgauss.png                   | Bin 0 -> 44692 bytes
 doc/_images/models_nistgauss2.png                  | Bin 0 -> 45887 bytes
 doc/_images/models_peak1.png                       | Bin 0 -> 161561 bytes
 doc/_images/models_peak2.png                       | Bin 0 -> 175732 bytes
 doc/_images/models_peak3.png                       | Bin 0 -> 159332 bytes
 doc/_images/models_peak4.png                       | Bin 0 -> 164721 bytes
 doc/_images/models_stepfit.png                     | Bin 0 -> 31966 bytes
 doc/_templates/indexsidebar.html                   |  21 +-
 doc/_templates/layout.html                         |  58 ++
 doc/bounds.rst                                     |  16 +-
 doc/builtin_models.rst                             | 782 +++++++++++++++++
 doc/conf.py                                        |  80 +-
 doc/confidence.rst                                 | 169 ++--
 doc/constraints.rst                                |   6 +-
 doc/contents.rst                                   |  15 +
 doc/fitting.rst                                    |  71 +-
 doc/index.rst                                      | 102 ++-
 doc/installation.rst                               |  75 +-
 doc/intro.rst                                      | 150 ++++
 doc/model.rst                                      | 572 ++++++++++++
 doc/models1d.rst                                   |  80 --
 doc/models1d_doc1.png                              | Bin 29771 -> 0 bytes
 doc/parameters.rst                                 | 160 +---
 doc/{ => sphinx/mathjax}/conf.py                   |  80 +-
 doc/{ => sphinx/pngmath}/conf.py                   |  78 +-
 doc/sphinx/theme/lmfitdoc/layout.html              |  14 +
 doc/sphinx/theme/lmfitdoc/static/contents.png      | Bin 0 -> 202 bytes
 doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t    | 348 ++++++++
 doc/sphinx/theme/lmfitdoc/static/navigation.png    | Bin 0 -> 218 bytes
 doc/sphinx/theme/lmfitdoc/theme.conf               |   4 +
 examples/{NIST_STRD/Gauss2.dat => NIST_Gauss2.dat} | 120 +--
 examples/{simple.py => doc_basic.py}               |   4 +-
 examples/doc_model1.py                             |  25 +
 examples/doc_model2.py                             |  29 +
 examples/doc_nistgauss.py                          |  36 +
 examples/doc_nistgauss2.py                         |  40 +
 examples/doc_stepmodel.py                          |  28 +
 examples/example_anneal.py                         |  63 --
 examples/example_covar.py                          |   8 +-
 examples/example_peakmodel.py                      |   2 +-
 examples/example_stepmodel.py                      |  28 -
 examples/fit_pvoigt.py                             |  10 +-
 examples/fit_pvoigt2.py                            |  18 +-
 examples/fit_pvoigt_NelderMead.py                  |  11 +-
 examples/fit_pvoigt_NelderMead2.py                 |  10 +-
 examples/fit_with_algebraic_constraint.py          |  16 +-
 examples/m1.py                                     |   2 +-
 examples/model1d_doc1.py                           |   2 +-
 examples/model1d_doc2.py                           |   2 +-
 examples/models_doc1.py                            |  47 +
 examples/models_doc2.py                            |  30 +
 examples/peakfit_1.py                              |   6 +-
 examples/{fit_NIST.py => test_NIST_Strd.py}        |  22 +-
 examples/test_peak.dat                             | 404 +++++++++
 examples/use_models1d.py                           |   4 +-
 lmfit/__init__.py                                  |  11 +-
 lmfit/asteval.py                                   | 159 ++--
 lmfit/astutils.py                                  | 123 +--
 lmfit/confidence.py                                |  27 +-
 lmfit/lineshapes.py                                | 243 ++++++
 lmfit/minimizer.py                                 |  69 +-
 lmfit/model.py                                     | 365 +++++---
 lmfit/models.py                                    | 334 +++++++
 lmfit/{models1d.py => old_models1d.py}             |   0
 lmfit/parameter.py                                 |   8 +-
 lmfit/printfuncs.py                                |  60 +-
 lmfit/specified_models.py                          | 196 -----
 lmfit/uncertainties/test_umath.py                  | 294 -------
 lmfit/uncertainties/test_uncertainties.py          | 972 ---------------------
 lmfit/uncertainties/umath.py                       |  22 +-
 lmfit/uncertainties/unumpy/__init__.py             |  82 --
 lmfit/uncertainties/unumpy/core.py                 | 612 -------------
 lmfit/uncertainties/unumpy/test_ulinalg.py         |  87 --
 lmfit/uncertainties/unumpy/test_unumpy.py          | 265 ------
 lmfit/uncertainties/unumpy/ulinalg.py              |  17 -
 lmfit/utilfuncs.py                                 |  86 --
 requirements.txt                                   |   3 +
 setup.py                                           |   2 +-
 {examples => tests}/NISTModels.py                  |   6 +-
 {examples => tests}/NIST_STRD/Bennett5.dat         |   0
 {examples => tests}/NIST_STRD/BoxBOD.dat           |   0
 {examples => tests}/NIST_STRD/Chwirut1.dat         |   0
 {examples => tests}/NIST_STRD/Chwirut2.dat         |   0
 {examples => tests}/NIST_STRD/DanWood.dat          |   0
 {examples => tests}/NIST_STRD/ENSO.dat             |   0
 {examples => tests}/NIST_STRD/Eckerle4.dat         |   0
 {examples => tests}/NIST_STRD/Gauss1.dat           |   0
 {examples => tests}/NIST_STRD/Gauss2.dat           |   0
 {examples => tests}/NIST_STRD/Gauss3.dat           |   0
 {examples => tests}/NIST_STRD/Hahn1.dat            |   0
 {examples => tests}/NIST_STRD/Kirby2.dat           |   0
 {examples => tests}/NIST_STRD/Lanczos1.dat         |   0
 {examples => tests}/NIST_STRD/Lanczos2.dat         |   0
 {examples => tests}/NIST_STRD/Lanczos3.dat         |   0
 {examples => tests}/NIST_STRD/MGH09.dat            |   0
 {examples => tests}/NIST_STRD/MGH10.dat            |   0
 {examples => tests}/NIST_STRD/MGH17.dat            |   0
 {examples => tests}/NIST_STRD/Misra1a.dat          |   0
 {examples => tests}/NIST_STRD/Misra1b.dat          |   0
 {examples => tests}/NIST_STRD/Misra1c.dat          |   0
 {examples => tests}/NIST_STRD/Misra1d.dat          |   0
 {examples => tests}/NIST_STRD/Models               |   0
 {examples => tests}/NIST_STRD/Nelson.dat           |   0
 {examples => tests}/NIST_STRD/Rat42.dat            |   0
 {examples => tests}/NIST_STRD/Rat43.dat            |   0
 {examples => tests}/NIST_STRD/Roszman1.dat         |   0
 {examples => tests}/NIST_STRD/Thurber.dat          |   0
 tests/lmfit_testutils.py                           |  18 +
 tests/test_1variable.py                            |  46 +-
 tests/test_NIST_Strd.py                            | 268 ++++++
 tests/test_algebraic_constraint.py                 |  12 +-
 tests/test_algebraic_constraint2.py                |  24 +-
 tests/test_basicfit.py                             |  45 +
 tests/test_bounds.py                               |  54 ++
 tests/test_model.py                                | 130 ++-
 tests/test_multidatasets.py                        |  76 ++
 tests/test_nose.py                                 |  14 +-
 tests/test_stepmodel.py                            |  59 ++
 129 files changed, 4803 insertions(+), 3926 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 2fac278..bbdfbf2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,55 +3,21 @@
 language: python
 
 python:
-    - 2.6
-
-matrix:
-    include:
-        - python: 2.7
-          env:
-            - PYTHON=python
-            - PYVER=2.x
-        - python: 3.2
-          env:
-            - PYTHON=python3
-            - PYVER=3.x
-    exclude:
-        - python: 2.6
-
-virtualenv:
-    system_site_packages: true
+    - 2.7
+    - 3.3
 
 before_install:
-    - export DISPLAY=:99.0
-    - sh -e /etc/init.d/xvfb start
-
-    - sudo apt-get update
-    - sudo apt-get install $PYTHON-setuptools
-    - sudo apt-get install $PYTHON-numpy
-    - sudo apt-get install $PYTHON-scipy
-    - sudo apt-get install $PYTHON-sphinx
-    - sudo apt-get install $PYTHON-nose
-    - sudo pip -q install --use-mirrors uncertainties asteval
-
-
-    - if [[ $PYVER == '2.x' ]]; then
-    -   sudo apt-get install ipython
-    -   sudo apt-get install $PYTHON-matplotlib;
-    - fi
-    - if [[ $PYVER == '3.x' ]]; then
-    -   sudo apt-get install ipython3
-    -   pip install --use-mirrors matplotlib;
-    - fi
-
+    - wget http://repo.continuum.io/miniconda/Miniconda3-3.5.5-Linux-x86_64.sh -O miniconda.sh
+    - chmod +x miniconda.sh
+    - ./miniconda.sh -b
+    - export PATH=/home/travis/miniconda3/bin:$PATH
+    - conda config --add channels https://conda.binstar.org/dan_blanchard
+    - conda update --yes conda
 
 install:
-    - sudo $PYTHON setup.py install
+    - conda install --yes pip python=$TRAVIS_PYTHON_VERSION numpy scipy pandas matplotlib dateutil nose
+    - python setup.py install
 
 script:
-    # Execute the unit tests
-    - nosetests tests
-    # Generate the docs
-    - if [[ $PYVER == '2.x' ]]; then
-    -   cd doc
-    -   make html
-    - fi
+    - cd tests
+    - nosetests
diff --git a/THANKS.txt b/THANKS.txt
index 53b7e78..f03a5a4 100644
--- a/THANKS.txt
+++ b/THANKS.txt
@@ -1,15 +1,15 @@
 Many people have contributed to lmfit.
 
 Matthew Newville wrote the original implementation.
-Till Stensitzki wrote the improved estimates of confidence intervals, 
+Till Stensitzki wrote the improved estimates of confidence intervals,
      and contributed many tests, bug fixes, and documentation.
-Daniel B. Allan wrote much of the high level Models, and many 
+Daniel B. Allan wrote much of the high level Models, and many
      improvements to the testing and documentation.
 J. J. Helmus wrote the MINUT bounds for leastsq, originally in
      leastsqbounds.py, and ported to lmfit.
-E. O. Lebigot wrote the uncertainties package, a version of which is 
-     used here. 
+E. O. Le Bigot wrote the uncertainties package, a version of which is
+     used here.
 
-Additional patches, bug fixes, and suggestions have come from 
-  Christohp Deil, Francois Boulogne, Colin Brosseau, nmearl, 
+Additional patches, bug fixes, and suggestions have come from
+  Christohp Deil, Francois Boulogne, Colin Brosseau, nmearl,
   Gustavo Pasquevich, and Ben Gamari
diff --git a/doc/Makefile b/doc/Makefile
index 6a5951e..d432b5d 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -6,7 +6,8 @@ SPHINXOPTS    =
 SPHINXBUILD   = sphinx-build
 PAPER         =
 BUILDDIR      = _build
-
+JAXMATHCONF   = sphinx/mathjax/conf.py
+PNGMATHCONF   = sphinx/pngmath/conf.py
 INSTALLDIR = /home/newville/public_html/lmfit/
 
 
@@ -15,23 +16,36 @@ PAPEROPT_a4     = -D latex_paper_size=a4
 PAPEROPT_letter = -D latex_paper_size=letter
 ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
 
-.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest latexpdf
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest latexpdf htmlzip
 .PHONY: all install pdf
 
 html:
+	cp conf.py SAVEconf.py
+	cp $(JAXMATHCONF) conf.py
 	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	cp SAVEconf.py conf.py
 	@echo
 	@echo "html build finished: $(BUILDDIR)/html."
 
+htmlzip: html
+	cp conf.py SAVEconf.py
+	cp $(PNGMATHCONF) conf.py
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/lmfit_doc
+	cp SAVEconf.py conf.py
+	cd $(BUILDDIR) && zip -pur html/lmfit_doc.zip lmfit_doc
+
 epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	cp conf.py SAVEconf.py
+	cp $(PNGMATHCONF) conf.py
+	$(SPHINXBUILD) -b epub  $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	cp SAVEconf.py conf.py
 	cp -pr $(BUILDDIR)/epub/*.epub $(BUILDDIR)/html/.
 
 pdf: latex
 	cd $(BUILDDIR)/latex && make all-pdf
 	cp -pr $(BUILDDIR)/latex/lmfit.pdf $(BUILDDIR)/html/.
 
-all: html epub pdf
+all: html htmlzip epub pdf
 
 install: all
 	cd $(BUILDDIR)/latex && pdflatex lmfit.tex
diff --git a/doc/_images/conf_interval1.png b/doc/_images/conf_interval1.png
new file mode 100644
index 0000000..b2c8432
Binary files /dev/null and b/doc/_images/conf_interval1.png differ
diff --git a/doc/_images/conf_interval1a.png b/doc/_images/conf_interval1a.png
new file mode 100644
index 0000000..7e411fd
Binary files /dev/null and b/doc/_images/conf_interval1a.png differ
diff --git a/doc/_images/conf_interval2.png b/doc/_images/conf_interval2.png
new file mode 100644
index 0000000..98c3a46
Binary files /dev/null and b/doc/_images/conf_interval2.png differ
diff --git a/doc/_images/model_eval.png b/doc/_images/model_eval.png
new file mode 100644
index 0000000..07da9ca
Binary files /dev/null and b/doc/_images/model_eval.png differ
diff --git a/doc/_images/model_fit1.png b/doc/_images/model_fit1.png
new file mode 100644
index 0000000..705891b
Binary files /dev/null and b/doc/_images/model_fit1.png differ
diff --git a/doc/_images/model_fit2.png b/doc/_images/model_fit2.png
new file mode 100644
index 0000000..06e500e
Binary files /dev/null and b/doc/_images/model_fit2.png differ
diff --git a/doc/_images/models_doc1.png b/doc/_images/models_doc1.png
new file mode 100644
index 0000000..44682b3
Binary files /dev/null and b/doc/_images/models_doc1.png differ
diff --git a/doc/_images/models_doc2.png b/doc/_images/models_doc2.png
new file mode 100644
index 0000000..75654ab
Binary files /dev/null and b/doc/_images/models_doc2.png differ
diff --git a/doc/_images/models_nistgauss.png b/doc/_images/models_nistgauss.png
new file mode 100644
index 0000000..4d81921
Binary files /dev/null and b/doc/_images/models_nistgauss.png differ
diff --git a/doc/_images/models_nistgauss2.png b/doc/_images/models_nistgauss2.png
new file mode 100644
index 0000000..cdd202f
Binary files /dev/null and b/doc/_images/models_nistgauss2.png differ
diff --git a/doc/_images/models_peak1.png b/doc/_images/models_peak1.png
new file mode 100644
index 0000000..79b49cf
Binary files /dev/null and b/doc/_images/models_peak1.png differ
diff --git a/doc/_images/models_peak2.png b/doc/_images/models_peak2.png
new file mode 100644
index 0000000..9fc6000
Binary files /dev/null and b/doc/_images/models_peak2.png differ
diff --git a/doc/_images/models_peak3.png b/doc/_images/models_peak3.png
new file mode 100644
index 0000000..6b04fcf
Binary files /dev/null and b/doc/_images/models_peak3.png differ
diff --git a/doc/_images/models_peak4.png b/doc/_images/models_peak4.png
new file mode 100644
index 0000000..e1a732c
Binary files /dev/null and b/doc/_images/models_peak4.png differ
diff --git a/doc/_images/models_stepfit.png b/doc/_images/models_stepfit.png
new file mode 100644
index 0000000..c1b5cd8
Binary files /dev/null and b/doc/_images/models_stepfit.png differ
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
index b8b4795..960a7b3 100644
--- a/doc/_templates/indexsidebar.html
+++ b/doc/_templates/indexsidebar.html
@@ -1,16 +1,15 @@
-<h3>Downloads</h3>
+<h3>Getting LMFIT</h3>
 <p>Current version: <b>{{ release }}</b></p>
-<p>Downloads:
-<ul>
-  <li>    <a href="http://pypi.python.org/pypi/lmfit/">PyPI (Python.org)</a> <p>
-       try <tt>easy_install -U lmfit</tt>
-</ul>
+<p>Download:   <a href="http://pypi.python.org/pypi/lmfit/">PyPI (Python.org)</a>
+<p>Install:    <tt>pip install lmfit</tt>
 <p>
+<p>Development version: <br> 
+    <a href="https://github.com/lmfit/lmfit-py/">github.com</a> <br>
 
-Development version: <br>     <a href="https://github.com/lmfit/lmfit-py/">github.com</a> <br>
-
-<h3>Documentation</h3>
-
- <a href="lmfit.pdf">  PDF Format</a>
+<h3>Off-line Documentation</h3>
+[<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.pdf">PDF</a>
+|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.epub">EPUB</a>
+|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit_doc.zip">HTML(zip)</a>
+]
 <hr> 
 <p>
diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html
new file mode 100644
index 0000000..92a1cb0
--- /dev/null
+++ b/doc/_templates/layout.html
@@ -0,0 +1,58 @@
+{% extends "!layout.html" %}
+
+{%- block extrahead %}
+  <script type="text/x-mathjax-config">
+     MathJax.Hub.Config({
+        "TeX": {Macros: {AA : "{\\unicode{x212B}}"}},
+        "HTML-CSS": {scale: 90}
+  });</script>
+{% endblock %}
+
+
+
+{% block rootrellink %}
+   <li>[<a href="{{ pathto('intro') }}">intro</a>|</li>
+   <li><a href="{{ pathto('parameters') }}">parameters</a>|</li>
+   <li><a href="{{ pathto('fitting') }}"> minimize</a>|</li>
+   <li><a href="{{ pathto('model') }}"> model</a>|</li>
+   <li><a href="{{ pathto('builtin_models') }}"> builtin models</a>|</li>
+   <li><a href="{{ pathto('confidence') }}">confidence intervals</a>|</li>
+   <li><a href="{{ pathto('bounds') }}">bounds</a>|</li>
+   <li><a href="{{ pathto('constraints') }}">constraints</a>]</li>
+{% endblock %}
+
+{% block relbar1 %}
+<div>
+<table border=0>
+  <tr><td></td><td width=75% padding=5 align=left>
+       <a href="index.html" style="color: #157"> <font size=+2>LMFIT</font></a>
+     </td><td></td>
+     <td width=8% align=left>
+         <a href="contents.html" style="color: #882222">
+         <font size+=1>Contents</font></a> </td>
+     <td width=8% align=left>
+          <a href="installation.html" style="color: #882222">
+          <font size+=1>Download</font></a></td>
+     <td width=8% align=left>
+        <a href="https://github.com/lmfit/lmfit-py/" style="color: #882222">
+         <font size+=1>Develop</font></a></td>
+  </tr>
+  <tr><td></td><td width=75% padding=5 align=left>
+        <a href="index.html" style="color: #157"> <font size=+1>
+	Non-Linear Least-Squares Minimization and Curve-Fitting for Python</font></a>
+     </td><td></td>
+     <td width=8% align=left>
+         <a href="intro.html" style="color: #882222">
+         <font size+=1>Introduction</font></a> </td>
+     <td width=8% align=left>
+         <a href="parameters.html" style="color: #882222">
+         <font size+=1>Parameters</font></a> </td>
+     <td width=8% align=left>
+         <a href="model.html" style="color: #882222">
+         <font size+=1>Models</font></a> </td>
+
+  </tr>
+</table>
+</div>
+{{ super() }}
+{% endblock %}
diff --git a/doc/bounds.rst b/doc/bounds.rst
index 1f36a1c..2ef9e65 100644
--- a/doc/bounds.rst
+++ b/doc/bounds.rst
@@ -1,18 +1,16 @@
-.. _scipy.optimize.leastsq: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.leastsq.html
-.. _MINPACK-1: http://en.wikipedia.org/wiki/MINPACK
-.. _MINUIT: http://en.wikipedia.org/wiki/MINUIT
-.. _leastsqbound: https://github.com/jjhelmus/leastsqbound-scipy
-
-.. _parameter-bounds-label:
-
+.. _bounds_chapter:
 
 =================================
 Bounds Implementation
 =================================
 
+.. _MINPACK-1: http://en.wikipedia.org/wiki/MINPACK
+.. _MINUIT: http://en.wikipedia.org/wiki/MINUIT
+.. _leastsqbound: https://github.com/jjhelmus/leastsqbound-scipy
+
 This section describes the implementation of :class:`Parameter` bounds.
-The `MINPACK-1`_ implementation used in `scipy.optimize.leastsq`_ for the
-Levenberg-Marquardt algorithm does not explicitly support bounds on
+The `MINPACK-1`_ implementation used in :func:`scipy.optimize.leastsq` for
+the Levenberg-Marquardt algorithm does not explicitly support bounds on
 parameters, and expects to be able to fully explore the available range of
 values for any Parameter.  Simply placing hard constraints (that is,
 resetting the value when it exceeds the desired bounds) prevents the
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
new file mode 100644
index 0000000..9185311
--- /dev/null
+++ b/doc/builtin_models.rst
@@ -0,0 +1,782 @@
+.. _builtin_models_chapter:
+
+=================================================
+Built-in Fitting Models in the :mod:`models`
+=================================================
+
+Lmfit provides several builtin fitting models in the :mod:`models` module.
+These pre-defined models each subclass from the :class:`Model` class of the
+previous chapter and wrap relatively well-known functional forms, such as
+Gaussians, Lorentzian, and Exponentials that are used in a wide range of
+scientific domains.  In fact, all the models are all based on simple, plain
+python functions defined in the :mod:`lineshapes` module.  In addition to
+wrapping a function into a :class:`Model`, these models also provide a
+:meth:`guess_starting_values` method that is intended to give a reasonable
+set of starting values from a data array that closely approximates the
+data to be fit.
+
+.. module:: models
+
+As shown in the previous chapter, a key feature of the :class:`Model` class
+is that models can easily be combined to give a composite
+:class:`Model`. Thus while some of the models listed here may seem pretty
+trivial (notably, :class:`ConstantModel` and :class:`LinearModel`), the
+main point of having these is to be able to used in composite models.  For
+example,  a Lorentzian plus a linear background might be represented as::
+
+    >>> from lmfit.models import LinearModel, LorentzianModel
+    >>> peak = LorentzianModel()
+    >>> background  = LinearModel()
+    >>> model = peak + background
+
+
+
+
+All the models listed below are one dimensional, with an independent
+variable named ``x``.  Many of these models represent a function with a
+distinct peak, and so share common features.  To maintain uniformity,
+common parameter names are used whenever possible.  Thus, most models have
+a parameter called ``amplitude`` that represents the overall height (or
+area of) a peak or function, a ``center`` parameter that represents a peak
+centroid position, and a ``sigma`` parameter that gives a characteristic
+width.   Some peak shapes also have a parameter ``fwhm``, typically
+constrained by ``sigma`` to give the full width at half maximum.
+
+After a list of builtin models, a few examples of their use is given.
+
+Peak-like models
+-------------------
+
+There are many peak-like models available.  These include
+:class:`GaussianModel`, :class:`LorentzianModel`, :class:`VoigtModel` and
+some less commonly used variations.
+
+:class:`GaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+.. class:: GaussianModel()
+
+A model based on a `Gaussian or normal distribution lineshape
+<http://en.wikipedia.org/wiki/Normal_distribution>`_.  Parameter names:
+``amplitude``, ``center``, and ``sigma``.  In addition, a constrained
+parameter ``fwhm`` is included.
+
+.. math::
+
+  f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]}
+
+where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
+:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The Full-Width at
+Half-Maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately
+:math:`2.3548\sigma`
+
+
+:class:`LorentzianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: LorentzianModel()
+
+A model based on a `Lorentzian or Cauchy-Lorentz distribution function
+<http://en.wikipedia.org/wiki/Cauchy_distribution>`_.  Parameter names:
+``amplitude``, ``center``, and ``sigma``.  In addition, a constrained
+parameter ``fwhm`` is included.
+
+.. math::
+
+  f(x; A, \mu, \sigma) = \frac{A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
+
+where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
+:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The Full-Width at
+Half-Maximum is :math:`2\sigma`.
+
+
+:class:`VoigtModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: VoigtModel()
+
+A model based on a `Voigt distribution function
+<http://en.wikipedia.org/wiki/Voigt_profile>`_.  Parameter names:
+``amplitude``, ``center``, and ``sigma``.  A ``gamma`` parameter is also
+available.  By default, it is constrained to have value equal to ``sigma``,
+though this can be varied independently.  In addition, a constrained
+parameter ``fwhm`` is included.  The definition for the Voigt function used
+here is
+
+.. math::
+
+    f(x; A, \mu, \sigma, \gamma) = \frac{A \textrm{Re}[w(z)]}{\sigma\sqrt{2 \pi}}
+
+where
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+     z &=& \frac{x-\mu +i\gamma}{\sigma\sqrt{2}} \\
+     w(z) &=& e^{-z^2}{\operatorname{erfc}}(-iz)
+   \end{eqnarray*}
+
+and :func:`erfc` is the complimentary error function.  As above,
+``amplitude`` corresponds to :math:`A`, ``center`` to
+:math:`\mu`, and ``sigma`` to :math:`\sigma`. The parameter ``gamma``
+corresponds  to :math:`\gamma`.
+If ``gamma`` is kept at the default value (constrained to ``sigma``),
+the full width at half maximum is approximately :math:`3.6013\sigma`.
+
+
+:class:`PseudoVoigtModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: PseudoVoigtModel()
+
+a model based on a `pseudo-Voigt distribution function
+<http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation>`_,
+which is a weighted sum of a Gaussian and Lorentzian distribution functions
+with the same values for ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
+and ``sigma`` (:math:`\sigma`), and a parameter ``fraction`` (:math:`\alpha`)
+in
+
+.. math::
+
+  f(x; A, \mu, \sigma, \alpha) = (1-\alpha)\frac{A}{\pi}
+  \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big] + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
+
+
+The :meth:`guess_starting_values` function always gives a starting
+value for ``fraction`` of 0.5
+
+:class:`Pearson7Model`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: Pearson7Model()
+
+A model based on a `Pearson VII distribution
+<http://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_VII_distribution>`_.
+This is another Voigt-like distribution function.  It has the usual
+parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`), and also ``exponent`` (:math:`p`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, p) = \frac{sA}{\big\{[1 + (\frac{x-\mu}{\sigma})^2] (2^{1/p} -1)  \big\}^p}
+
+where
+
+.. math::
+
+    s = \frac{\Gamma(p) \sqrt{2^{1/p} -1}}{ \sigma\sqrt{\pi}\,\Gamma(p-1/2)}
+
+where :math:`\Gamma(x)` is the gamma function.
+
+The :meth:`guess_starting_values` function always gives a starting
+value for ``exponent`` of 0.5.
+
+:class:`StudentsTModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: StudentsTModel()
+
+A model based on a `Student's t distribution function
+<http://en.wikipedia.org/wiki/Student%27s_t-distribution>`_, with the usual
+parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma) = \frac{A \Gamma(\frac{\sigma+1}{2})} {\sqrt{\sigma\pi}\,\Gamma(\frac{\sigma}{2})} \Bigl[1+\frac{(x-\mu)^2}{\sigma}\Bigr]^{-\frac{\sigma+1}{2}}
+
+
+where :math:`\Gamma(x)` is the gamma function.
+
+
+:class:`BreitWignerModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: BreitWignerModel()
+
+A model based on a `Breit-Wigner-Fano function
+<http://en.wikipedia.org/wiki/Fano_resonance>`_.  It has the usual
+parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`), plus ``q`` (:math:`q`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, q) = \frac{A (q\sigma/2 + x - \mu)^2}{(\sigma/2)^2 + (x - \mu)^2}
+
+
+:class:`LognormalModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: LognormalModel()
+
+A model based on the `Log-normal distribution function
+<http://en.wikipedia.org/wiki/Lognormal>`_.
+It has the usual parameters
+``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma``
+(:math:`\sigma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma) = \frac{A e^{-(\ln(x) - \mu)/ 2\sigma^2}}{x}
+
+
+:class:`DampedOcsillatorModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: DampedOcsillatorModel()
+
+A model based on the `Damped Harmonic Oscillator Amplitude
+<http://en.wikipedia.org/wiki/Harmonic_oscillator#Amplitude_part>`_.
+It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma) = \frac{A}{\sqrt{ [1 - (x/\mu)^2]^2 + (2\sigma x/\mu)^2}}
+
+
+:class:`ExponentialGaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: ExponentialGaussianModel()
+
+A model of an `Exponentially modified Gaussian distribution
+<http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_.
+It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`), and also ``gamma`` (:math:`\gamma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2}
+    \exp\bigl[\gamma({\mu - x  + \sigma^2/2})\bigr]
+    {\operatorname{erfc}}\bigl[\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\bigr]
+
+
+where :func:`erfc` is the complimentary error function.
+
+
+:class:`DonaichModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: DonaichModel()
+
+A model of an `Doniach Sunjic asymmetric lineshape
+<http://www.casaxps.com/help_manual/line_shapes.htm>`_, used in
+photo-emission. With the usual parameters ``amplitude`` (:math:`A`),
+``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`), and also ``gamma``
+(:math:`\gamma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, \gamma) = A\frac{\cos\bigl[\pi\gamma/2 + (1-\gamma)
+    \arctan{(x - \mu)}/\sigma\bigr]} {\bigr[1 + (x-\mu)/\sigma\bigl]^{(1-\gamma)/2}}
+
+
+Linear and Polynomial Models
+------------------------------------
+
+These models correspond to polynomials of some degree.  Of course, lmfit is
+a very inefficient way to do linear regression (see :func:`numpy.polyfit`
+or :func:`scipy.stats.linregress`), but these models may be useful as one
+of many components of composite model.
+
+:class:`ConstantModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: ConstantModel()
+
+   a class that consists of a single value, ``c``.  This is constant in the
+   sense of having no dependence on the independent variable ``x``, not in
+   the sense of being non-varying.  To be clear, ``c`` will be a variable
+   Parameter.
+
+:class:`LinearModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: LinearModel()
+
+   a class that gives a linear model:
+
+.. math::
+
+    f(x; m, b) = m x + b
+
+with parameters ``slope`` for :math:`m` and  ``intercept`` for :math:`b`.
+
+
+:class:`QuadraticModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: QuadraticModel()
+
+
+   a class that gives a quadratic model:
+
+.. math::
+
+    f(x; a, b, c) = a x^2 + b x + c
+
+with parameters ``a``, ``b``, and ``c``.
+
+
+:class:`ParabolicModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: ParabolicModel()
+
+   same as :class:`QuadraticModel`.
+
+
+:class:`PolynomialModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+.. class:: PolynomialModel(degree)
+
+   a class that gives a polynomial model up to ``degree`` (with maximum
+   value of 7).
+
+.. math::
+
+    f(x; c_0, c_1, \ldots, c_7) = \sum_{i=0, 7} c_i  x^i
+
+with parameters ``c0``, ``c1``, ..., ``c7``.  The supplied ``degree``
+will specify how many of these are actual variable parameters.
+
+
+
+Step-like models
+-----------------------------------------------
+
+
+:class:`StepModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: StepModel(form='linear')
+
+A model based on a Step function, with four choices for functional form.
+The step function starts with a value 0, and ends with a value of :math:`A`
+(``amplitude``), rising to :math:`A/2` at :math:`\mu` (``center``),
+with :math:`\sigma` (``sigma``) setting the characteristic width. The
+supported functional forms are ``linear`` (the default), ``atan`` or
+``arctan`` for an arc-tangent function,  ``erf`` for an error function, or
+``logistic`` for a `logistic function <http://en.wikipedia.org/wiki/Logistic_function>`_.
+The forms are
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+   & f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}})  & = A \min{[1, \max{(0,  \alpha)}]} \\
+   & f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}})  & = A [1/2 + \arctan{(\alpha)}/{\pi}] \\
+   & f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}})     & = A [1 + {\operatorname{erf}}(\alpha)]/2 \\
+   & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A [1 - \frac{1}{1 +  e^{\alpha}} ]
+   \end{eqnarray*}
+
+where :math:`\alpha  = (x - \mu)/{\sigma}`.
+
+:class:`RectangleModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+.. class:: RectangleModel(form='linear')
+
+A model based on a Step-up and Step-down function of the same form.  The
+same choices for functional form as for :class:`StepModel` are supported,
+with ``linear`` as the default.  The function starts with a value 0, and
+ends with a value of :math:`A` (``amplitude``), rising to :math:`A/2` at
+:math:`\mu_1` (``center1``), with :math:`\sigma_1` (``sigma1``) setting the
+characteristic width.  It drops to rising to :math:`A/2` at :math:`\mu_2`
+(``center2``), with characteristic width :math:`\sigma_2` (``sigma2``).
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+   &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}})   &= A \{ \min{[1, \max{(0, \alpha_1)}]} + \min{[-1, \max{(0,  \alpha_2)}]} \} \\
+   &f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}})   &= A [\arctan{(\alpha_1)} + \arctan{(\alpha_2)}]/{\pi} \\
+   &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}})      &= A [{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)]/2 \\
+   &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A [1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 +  e^{\alpha_2}} ]
+   \end{eqnarray*}
+
+
+where :math:`\alpha_1  = (x - \mu_1)/{\sigma_1}` and :math:`\alpha_2  = -(x - \mu_2)/{\sigma_2}`.
+
+
+Exponential and Power law models
+-----------------------------------------------
+
+:class:`ExponentialModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: ExponentialModel()
+
+A model based on an `exponential decay function
+<http://en.wikipedia.org/wiki/Exponential_decay>`_. With parameters named
+``amplitude`` (:math:`A`), and ``decay`` (:math:`\tau`), this has the form:
+
+.. math::
+
+   f(x; A, \tau) = A e^{-x/\tau}
+
+
+:class:`PowerLawModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: PowerLawModel()
+
+A model based on a `Power Law <http://en.wikipedia.org/wiki/Power_law>`_.
+With parameters
+named ``amplitude`` (:math:`A`), and ``exponent`` (:math:`k`), this has the
+form:
+
+.. math::
+
+   f(x; A, k) = A x^k
+
+
+
+Example 1: Fit Peaked data to Gaussian or Voigt profiles
+------------------------------------------------------------------
+
+Here, we will fit data to two similar lineshapes, in order to decide which
+might be the better model.  We will start with a Gaussian profile, as in
+the previous chapter, but use the built-in :class:`GaussianModel` instead
+of one we write ourselves.  This is a slightly different version from the
+one in previous example in that the parameter names are different, and have
+built-in default values.  So, we'll simply use::
+
+    from numpy import loadtxt
+    from lmfit.models import GaussianModel
+
+    data = loadtxt('test_peak.dat')
+    x = data[:, 0]
+    y = data[:, 1]
+
+    mod = GaussianModel()
+    mod.guess_starting_values(y, x=x)
+    out  = mod.fit(y, x=x)
+    print(mod.fit_report(min_correl=0.25))
+
+which prints out the results::
+
+    [[Fit Statistics]]
+        # function evals   = 25
+        # data points      = 401
+        # variables        = 3
+        chi-square         = 29.994
+        reduced chi-square = 0.075
+    [[Variables]]
+        amplitude:     30.31352 +/- 0.1571252 (0.52%) initial =  21.54192
+        center:        9.242771 +/- 0.00737481 (0.08%) initial =  9.25
+        fwhm:          2.901562 +/- 0.01736635 (0.60%) == '2.354820*sigma'
+        sigma:         1.23218 +/- 0.00737481 (0.60%) initial =  1.35
+    [[Correlations]] (unreported correlations are <  0.250)
+        C(amplitude, sigma)          =  0.577
+
+We see a few interesting differences from the results of the previous
+chapter.  First, the parameter names are longer.  Second, there is a
+``fwhm``, defined as :math:`\sim 2.355\sigma`.  And third, the automated
+initial guesses are pretty good.  A plot of the fit shows not such a great
+fit:
+
+.. _figA1:
+
+  .. image::  _images/models_peak1.png
+     :target: _images/models_peak1.png
+     :width: 48 %
+  .. image::  _images/models_peak2.png
+     :target: _images/models_peak2.png
+     :width: 48 %
+
+  Fit to peak with Gaussian (left) and Lorentzian (right) models.
+
+suggesting that a different peak shape, with longer tails, should be used.
+Perhaps a Lorentzian would be better?  To do this, we simply replace
+``GaussianModel`` with ``LorentzianModel`` to get a
+:class:`LorentzianModel`::
+
+    from lmfit.models import LorentzianModel
+    mod = LorentzianModel()
+    mod.guess_starting_values(y, x=x)
+    out  = mod.fit(y, x=x)
+    print(mod.fit_report(min_correl=0.25))
+
+The results, or course, are worse::
+
+    [[Fit Statistics]]
+        # function evals   = 29
+        # data points      = 401
+        # variables        = 3
+        chi-square         = 53.754
+        reduced chi-square = 0.135
+    [[Variables]]
+        amplitude:     38.97278 +/- 0.3138612 (0.81%) initial =  21.54192
+        center:        9.244389 +/- 0.009276152 (0.10%) initial =  9.25
+        fwhm:          2.30968 +/- 0.02631297 (1.14%) == '2.0000000*sigma'
+        sigma:         1.15484 +/- 0.01315648 (1.14%) initial =  1.35
+    [[Correlations]] (unreported correlations are <  0.250)
+        C(amplitude, sigma)          =  0.709
+
+
+with the plot shown in the figure above.
+
+A Voigt model does a better job.  Using :class:`VoigtModel`, this is
+as simple as::
+
+    from lmfit.models import LorentzianModel
+    mod = LorentzianModel()
+    mod.guess_starting_values(y, x=x)
+    out  = mod.fit(y, x=x)
+    print(mod.fit_report(min_correl=0.25))
+
+which gives::
+
+    [[Fit Statistics]]
+        # function evals   = 30
+        # data points      = 401
+        # variables        = 3
+        chi-square         = 14.545
+        reduced chi-square = 0.037
+    [[Variables]]
+        amplitude:     35.75536 +/- 0.1386167 (0.39%) initial =  21.54192
+        center:        9.244111 +/- 0.005055079 (0.05%) initial =  9.25
+        fwhm:          2.629512 +/- 0.01326999 (0.50%) == '3.6013100*sigma'
+        gamma:         0.7301542 +/- 0.003684769 (0.50%) == 'sigma'
+        sigma:         0.7301542 +/- 0.003684769 (0.50%) initial =  1.35
+    [[Correlations]] (unreported correlations are <  0.250)
+        C(amplitude, sigma)          =  0.651
+
+with the much better value for :math:`\chi^2` and the obviously better
+match to the data as seen in the figure below (left).
+
+.. _figA2:
+
+  .. image::  _images/models_peak3.png
+     :target: _images/models_peak3.png
+     :width: 48 %
+  .. image::  _images/models_peak4.png
+     :target: _images/models_peak4.png
+     :width: 48 %
+
+  Fit to peak with Voigt model (left) and Voigt model with ``gamma``
+  varying independently of ``sigma`` (right).
+
+The Voigt function has a :math:`\gamma` parameter (``gamma``) that can be
+distinct from ``sigma``.  The default behavior used above constrains
+``gamma`` to have exactly the same value as ``sigma``.  If we allow these
+to vary separately, does the fit improve?  To do this, we have to change
+the ``gamma`` parameter from a constrained expression and give it a
+starting value::
+
+    mod = VoigtModel()
+    mod.guess_starting_values(y, x=x)
+    mod.params['gamma'].expr  = None
+    mod.params['gamma'].value = 0.7
+
+    out  = mod.fit(y, x=x)
+    print(mod.fit_report(min_correl=0.25))
+
+which gives::
+
+    [[Fit Statistics]]
+        # function evals   = 32
+        # data points      = 401
+        # variables        = 4
+        chi-square         = 10.930
+        reduced chi-square = 0.028
+    [[Variables]]
+        amplitude:     34.19147 +/- 0.1794683 (0.52%) initial =  21.54192
+        center:        9.243748 +/- 0.00441902 (0.05%) initial =  9.25
+        fwhm:          3.223856 +/- 0.05097446 (1.58%) == '3.6013100*sigma'
+        gamma:         0.5254013 +/- 0.01857953 (3.54%) initial =  0.7
+        sigma:         0.8951898 +/- 0.01415442 (1.58%) initial =  1.35
+    [[Correlations]] (unreported correlations are <  0.250)
+        C(amplitude, gamma)          =  0.821
+
+and the fit shown above (on the right).
+
+Comparing the two fits with the Voigt function, we see that :math:`\chi^2`
+is definitely better with a separately varying ``gamma`` parameter.  In
+addition, the two values for ``gamma`` and ``sigma`` differ significantly
+-- well outside the estimated uncertainties.  Even more compelling, reduced
+:math:`\chi^2` is improved even though a fourth variable has been added to
+the fit, justifying it as a significant variable in the model.
+
+
+This example shows how easy it can be to alter and compare fitting models
+for simple problems.
+
+
+Example 2: Fit data to a Composite Model with pre-defined models
+------------------------------------------------------------------
+
+
+Here, we repeat the point made at the end of the last chapter that instances
+of :class:`Model` class can be added them together to make a *composite
+model*.  But using the large number of built-in models available, this is
+very simple.  An example of a simple fit to a noisy step function plus a
+constant:
+
+.. literalinclude:: ../examples/doc_stepmodel.py
+
+After constructing step-like data, we first create a :class:`StepModel`
+telling it to use the ``erf`` form (see details below), and a
+:class:`ConstantModel`.  We set initial values, in one case using the data
+and :meth:`guess_starting_values` method, and using the explicit
+:meth:`set_paramval` for the initial constant value.    Making a composite
+model, we run :meth:`fit` and report the results, which give::
+
+    [[Fit Statistics]]
+        # function evals   = 52
+        # data points      = 201
+        # variables        = 4
+        chi-square         = 600.191
+        reduced chi-square = 3.047
+    [[Variables]]
+        amplitude:     111.1106 +/- 0.3122441 (0.28%) initial =  115.3431
+        c:             11.31151 +/- 0.2631688 (2.33%) initial =  9.278188
+        center:        3.122191 +/- 0.00506929 (0.16%) initial =  5
+        sigma:         0.6637199 +/- 0.009799607 (1.48%) initial =  1.428571
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(c, center)                 =  0.381
+        C(amplitude, sigma)          =  0.381
+
+with a plot of
+
+.. image::  _images/models_stepfit.png
+   :target: _images/models_stepfit.png
+   :width: 50 %
+
+
+Example 3: Fitting Multiple Peaks -- and using Prefixes
+------------------------------------------------------------------
+
+.. _NIST StRD: http://itl.nist.gov/div898/strd/nls/nls_main.shtml
+
+As shown above, many of the models have similar parameter names.  For
+composite models, this could lead to a problem of having parameters for
+different parts of the model having the same name.  To overcome this, each
+:class:`Model` can have a ``prefix`` attribute (normally set to a blank
+string) that will be put at the beginning of each parameter name.  To
+illustrate, we fit one of the classic datasets from the `NIST StRD`_ suite
+involving a decaying exponential and two gaussians.
+
+.. literalinclude:: ../examples/doc_nistgauss.py
+
+
+where we give a separate prefix to each model (they all have an
+``amplitude`` parameter).  The ``prefix`` values are attached transparently
+to the models.  Note that the calls to :meth:`set_paramval` used the bare
+name, without the prefix.   We could have used them, but because we used
+the individual model ``gauss1`` and ``gauss2``, there was no need.  Had we
+used the composite model to set the initial parameter values, we would have
+needed to, as with::
+
+    ## WRONG
+    mod.set_paramval('amplitude', 500, min=10)
+
+    ## Raises KeyError: "'amplitude' not a parameter name"
+
+    ## Correct
+    mod.set_paramval('g1_amplitude', 501, min=10)
+
+
+The fit results printed out are::
+
+    [[Fit Statistics]]
+        # function evals   = 66
+        # data points      = 250
+        # variables        = 8
+        chi-square         = 1247.528
+        reduced chi-square = 5.155
+    [[Variables]]
+        exp_amplitude:     99.01833 +/- 0.5374884 (0.54%) initial =  162.2102
+        exp_decay:         90.95088 +/- 1.103105 (1.21%) initial =  93.24905
+        g1_amplitude:      4257.774 +/- 42.38366 (1.00%) initial =  500
+        g1_center:         107.031 +/- 0.1500691 (0.14%) initial =  105
+        g1_fwhm:           39.26092 +/- 0.3779083 (0.96%) == '2.354820*g1_sigma'
+        g1_sigma:          16.67258 +/- 0.1604829 (0.96%) initial =  12
+        g2_amplitude:      2493.417 +/- 36.16923 (1.45%) initial =  500
+        g2_center:         153.2701 +/- 0.194667 (0.13%) initial =  150
+        g2_fwhm:           32.51287 +/- 0.4398624 (1.35%) == '2.354820*g2_sigma'
+        g2_sigma:          13.80695 +/- 0.1867924 (1.35%) initial =  12
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(g1_amplitude, g1_sigma)    =  0.824
+        C(g2_amplitude, g2_sigma)    =  0.815
+        C(g1_sigma, g2_center)       =  0.684
+        C(g1_amplitude, g2_center)   =  0.648
+        C(g1_center, g2_center)      =  0.621
+        C(g1_center, g1_sigma)       =  0.507
+        C(g1_amplitude, g1_center)   =  0.418
+        C(exp_amplitude, g2_amplitude)  =  0.282
+        C(exp_amplitude, g2_sigma)   =  0.171
+        C(exp_amplitude, g1_amplitude)  =  0.148
+        C(exp_decay, g1_center)      =  0.105
+
+We get a very good fit to this challenging problem (described at the NIST
+site as of average difficulty, but the tests there are generally hard) by
+applying reasonable initial guesses and putting modest but explicit bounds
+on the parameter values.  This fit is shown on the left:
+
+.. _figA3:
+
+  .. image::  _images/models_nistgauss.png
+     :target: _images/models_nistgauss.png
+     :width: 48 %
+  .. image::  _images/models_nistgauss2.png
+     :target: _images/models_nistgauss2.png
+     :width: 48 %
+
+
+One final point on setting initial values.  From looking at the data
+itself, we can see the two Gaussian peaks are reasonably well centered.  We
+can simplify the initial parameter values by using this, and by defining an
+:func:`index_of` function to limit the data range.  That is, with::
+
+    def index_of(arrval, value):
+        "return index of array *at or below* value "
+        if value < min(arrval):  return 0
+        return max(np.where(arrval<=value)[0])
+
+    ix1 = index_of(x,  75)
+    ix2 = index_of(x, 135)
+    ix3 = index_of(x, 175)
+
+    exp_mod.guess_starting_values(y[:ix1], x=x[:ix1])
+    gauss1.guess_starting_values(y[ix1:ix2], x=x[ix1:ix2])
+    gauss2.guess_starting_values(y[ix2:ix3], x=x[ix2:ix3])
+
+we can get a better initial estimate, and the fit converges in fewer steps,
+and without any bounds on parameters::
+
+    [[Fit Statistics]]
+        # function evals   = 46
+        # data points      = 250
+        # variables        = 8
+        chi-square         = 1247.528
+        reduced chi-square = 5.155
+    [[Variables]]
+        exp_amplitude:     99.01833 +/- 0.5374875 (0.54%) initial =  94.53724
+        exp_decay:         90.95089 +/- 1.103105 (1.21%) initial =  111.1985
+        g1_amplitude:      4257.773 +/- 42.38338 (1.00%) initial =  2126.432
+        g1_center:         107.031 +/- 0.1500679 (0.14%) initial =  106.5
+        g1_fwhm:           39.26091 +/- 0.3779053 (0.96%) == '2.354820*g1_sigma'
+        g1_sigma:          16.67258 +/- 0.1604816 (0.96%) initial =  14.5
+        g2_amplitude:      2493.418 +/- 36.16948 (1.45%) initial =  1878.892
+        g2_center:         153.2701 +/- 0.1946675 (0.13%) initial =  150
+        g2_fwhm:           32.51288 +/- 0.4398666 (1.35%) == '2.354820*g2_sigma'
+        g2_sigma:          13.80695 +/- 0.1867942 (1.35%) initial =  15
+    [[Correlations]] (unreported correlations are <  0.500)
+        C(g1_amplitude, g1_sigma)    =  0.824
+        C(g2_amplitude, g2_sigma)    =  0.815
+        C(g1_sigma, g2_center)       =  0.684
+        C(g1_amplitude, g2_center)   =  0.648
+        C(g1_center, g2_center)      =  0.621
+        C(g1_center, g1_sigma)       =  0.507
+
+
+This example is in the file ``doc_nistgauss2.py`` in the examples folder,
+and the fit result shown on the right above.
diff --git a/doc/conf.py b/doc/conf.py
index 27e19b2..451458e 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -16,7 +16,8 @@ import sys, os
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
-sys.path.append(os.path.abspath(os.path.join('.', 'ext')))
+sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
+sys.path.append(os.path.abspath(os.path.join('.')))
 # -- General configuration -----------------------------------------------------
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
@@ -24,11 +25,23 @@ sys.path.append(os.path.abspath(os.path.join('.', 'ext')))
 extensions = ['sphinx.ext.autodoc',
               'sphinx.ext.todo',
               'sphinx.ext.coverage',
-              'sphinx.ext.pngmath',
-              'ipython_directive',
-              'ipython_console_highlighting',
+              'sphinx.ext.mathjax',
+              'sphinx.ext.intersphinx',
               'numpydoc']
 
+try:
+    import IPython.sphinxext.ipython_directive
+    extensions.extend(['IPython.sphinxext.ipython_directive',
+                       'IPython.sphinxext.ipython_console_highlighting'])
+except ImportError:
+    pass
+
+
+intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
+                       'numpy': ('http://scipy.org/docs/numpy/', None),
+                       'scipy': ('http://scipy.org/docs/scipy/reference/', None)}
+
+intersphinx_cache_limit = 10
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
@@ -44,7 +57,7 @@ master_doc = 'index'
 
 # General information about the project.
 project = u'lmfit'
-copyright = u'2013, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
+copyright = u'2014, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -98,18 +111,8 @@ pygments_style = 'sphinx'
 
 # -- Options for HTML output ---------------------------------------------------
 
-# The theme to use for HTML and HTML Help pages.  Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-#html_theme = 'default'
-
-html_theme = 'sphinxdoc'
-
-# html_theme = 'nature'
-#html_theme = 'agogo'
-# html_theme_options = {'pagewidth':'85em', 'documentwidth':'60em', 'sidebarwidth': '25em',
-#                       # 'headercolor1': '#000080',
-#                       # 'headercolor2': '#0000A0',
-#                       }
+html_theme_path = ['sphinx/theme']
+html_theme = 'lmfitdoc'
 
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
@@ -117,10 +120,10 @@ html_theme = 'sphinxdoc'
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
 #html_title = None
-html_title = 'Least-Squares Minimization with Constraints for Python'
+html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-html_short_title = 'Least-Squares Minimization with Constraints for Python'
+html_short_title = 'Minimization and Curve-Fitting for Python'
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
@@ -142,23 +145,13 @@ html_static_path = ['_static']
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-html_use_smartypants = False # True
+html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
 html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
 
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_use_modindex = True
 html_use_modindex = False
-
-# If false, no index is generated.
 #html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
 #html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
@@ -175,36 +168,13 @@ html_show_sourcelink = True
 # Output file base name for HTML help builder.
 htmlhelp_basename = 'lmfitdoc'
 
-
 # -- Options for LaTeX output --------------------------------------------------
 
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
   ('index', 'lmfit.tex',
-   'Least-Squares Minimization with Constraints for Python',
-   'Matthew Newville', 'manual'),
+   'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
+   'Matthew Newville, Till Stensitzki, and others', 'manual'),
 ]
 
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_use_modindex = True
diff --git a/doc/confidence.rst b/doc/confidence.rst
index 17ef7d1..75bcae3 100644
--- a/doc/confidence.rst
+++ b/doc/confidence.rst
@@ -37,104 +37,134 @@ A log-likelihood method will be added soon.
 A basic example
 ---------------
 
-First we create a toy problem:
+First we create a toy problem::
 
-.. ipython:: python
 
-    import lmfit
-    import numpy as np
-    x = np.linspace(0.3,10,100)
-    y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
-    p = lmfit.Parameters()
-    p.add_many(('a',0.1),('b',1))
-    def residual(p):
-        a = p['a'].value
-        b = p['b'].value
-        return 1/(a*x)+b-y
+    >>> import lmfit
+    >>> import numpy as np
+    >>> x = np.linspace(0.3,10,100)
+    >>> y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
+    >>> p = lmfit.Parameters()
+    >>> p.add_many(('a',0.1),('b',1))
+    >>> def residual(p):
+    ...    a = p['a'].value
+    ...    b = p['b'].value
+    ...    return 1/(a*x)+b-y
 
-We have to fit it, before we can generate the confidence intervals.
 
-.. ipython:: python
+We have to fit it, before we can generate the confidence intervals::
 
-    mi = lmfit.minimize(residual, p)
-    mi.leastsq()
-    lmfit.printfuncs.report_fit(mi.params)
 
-Now it just a simple function call to start the calculation:
+    >>> mi = lmfit.minimize(residual, p)
+    >>> mi.leastsq()
+    >>> lmfit.printfuncs.report_fit(mi.params)
+    [[Variables]]
+         a:     0.09978076 +/- 0.0002112132 (0.21%) initial =  0.09978076
+         b:     1.992907 +/- 0.0132743 (0.67%) initial =  1.992907
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(a, b)                      =  0.601
 
-.. ipython:: python
 
-    ci = lmfit.conf_interval(mi)
-    lmfit.printfuncs.report_ci(ci)
+Now it just a simple function call to start the calculation::
 
-As we can see, the estimated error is almost the same:
-it is not necessary to calculate ci's for this problem.
+    >>> ci = lmfit.conf_interval(mi)
+    >>> lmfit.printfuncs.report_ci(ci)
+         99.70%    95.00%    67.40%     0.00%    67.40%    95.00%    99.70%
+    a   0.09960   0.09981   0.10000   0.10019   0.10039   0.10058   0.10079
+    b   1.97035   1.98326   1.99544   2.00008   2.01936   2.03154   2.04445
+
+
+As we can see, the estimated error is almost the same, and the
+uncertainties are well behaved: Going from 1 :math:`\sigma` (68%
+confidence) to 3 :math:`\sigma` (99.7% confidence) uncertainties is fairly
+linear.  For this problem, it is not necessary to calculate confidence
+intervals, and the estimates of the uncertainties from the covariance
+matrix are sufficient.
 
 An advanced example
 -------------------
-Now we look at a problem, where calculating the error from approximated
-covariance can lead to wrong results:
 
-.. ipython:: python
+Now we look at a problem where calculating the error from approximated
+covariance can lead to misleading results::
 
-    @suppress
-    np.random.seed(1)
-    y = 3*np.exp(-x/2.)-5*np.exp(-x/10.)+0.2*np.random.randn(x.size)
-    p = lmfit.Parameters()
-    p.add_many(('a1', 5), ('a2', -5), ('t1', 2), ('t2', 5))
-    def residual(p):
-        a1, a2, t1, t2 = [i.value for i in p.values()]
-        return a1*np.exp(-x/t1)+a2*np.exp(-x/t2)-y
+    >>> y = 3*np.exp(-x/2.)-5*np.exp(-x/10.)+0.2*np.random.randn(x.size)
+    >>> p = lmfit.Parameters()
+    >>> p.add_many(('a1', 5), ('a2', -5), ('t1', 2), ('t2', 5))
+    >>> def residual(p):
+    ...    a1, a2, t1, t2 = [i.value for i in p.values()]
+    ...    return a1*np.exp(-x/t1)+a2*np.exp(-x/t2)-y
 
-Now lets fit it:
+    >>> mi = lmfit.minimize(residual, p)
+    >>> mi.leastsq()
+    >>> lmfit.printfuncs.report_fit(mi.params, show_correl=False)
 
-.. ipython:: python
+    [[Variables]]
+         a1:     2.611013 +/- 0.3279648 (12.56%) initial =  2.611013
+         a2:    -4.512928 +/- 0.3991997 (8.85%) initial = -4.512928
+         t1:     1.569477 +/- 0.3345078 (21.31%) initial =  1.569477
+         t2:     10.96137 +/- 1.263874 (11.53%) initial =  10.96137
 
-    mi = lmfit.minimize(residual, p)
-    mi.leastsq()
-    lmfit.printfuncs.report_fit(mi.params, show_correl=False)
 
 Again we call :func:`conf_interval`, this time with tracing and only for 1-
-and 2-sigma:
+and 2 :math:`\sigma`::
+
+    >>> ci, trace = lmfit.conf_interval(mi, sigmas=[0.68,0.95], trace=True, verbose=False)
+    >>> lmfit.printfuncs.report_ci(ci)
+          95.00%    68.00%     0.00%    68.00%    95.00%
+    a1   2.11679   2.33696   2.61101   3.06631   4.28694
+    a2  -6.39449  -5.05982  -4.20173  -4.19528  -3.97850
+    t2   8.00414   9.62688  12.17331  12.17886  13.34857
+    t1   1.07009   1.28482   1.37407   1.97509   2.64341
+
+Comparing these two different estimates, we see that the estimate for `a1`
+is reasonable well approximated from the covariance matrix, but the
+estimates for `a2`, `t1`, and `t2` are very asymmetric and that going from
+1 :math:`\sigma` (68% confidence) to 2 :math:`\sigma` (95% confidence) is
+not very predictable.
 
-.. ipython:: python
+Now let's plot a confidence region::
 
-    ci, trace = lmfit.conf_interval(mi, sigmas=[0.68,0.95], trace=True, verbose=False)
-    lmfit.printfuncs.report_ci(ci)
+    >>> import matplotlib.pylab as plt
+    >>> x, y, grid = lmfit.conf_interval2d(mi,'a1','t2',30,30)
+    >>> plt.contourf(x, y, grid, np.linspace(0,1,11))
+    >>> plt.xlabel('a1')
+    >>> plt.colorbar()
+    >>> plt.ylabel('t2')
+    >>> plt.show()
 
-If you compare the calculated error estimates, you will see that the
-regular estimate is too small. Now let's plot a confidence region:
+which shows the figure on the left below for ``a1`` and ``t2``, and for
+``a2`` and ``t2`` on the right:
 
-.. ipython:: python
+.. _figC1:
 
-    import matplotlib.pylab as plt
+  .. image:: _images/conf_interval1.png
+     :target: _images/conf_interval1.png
+     :width: 48%
+  .. image:: _images/conf_interval1a.png
+     :target: _images/conf_interval1a.png
+     :width: 48%
 
-    x, y, grid = lmfit.conf_interval2d(mi,'a1','t2',30,30)
-    plt.contourf(x, y, grid, np.linspace(0,1,11))
-    plt.xlabel('a1');
-    plt.colorbar();
-    @savefig conf_interval.png width=7in
-    plt.ylabel('t2');
+Neither of these plots is very much like an ellipse, which is implicitly
+assumed by the approach using the covariance matrix.
 
-Remember the trace? It shows the dependence between two parameters.
+Remember the trace? It shows also shows the dependence between two parameters::
 
-.. ipython:: python
+    >>> x, y, prob = trace['a1']['a1'], trace['a1']['t2'],trace['a1']['prob']
+    >>> x2, y2, prob2 = trace['t2']['t2'], trace['t2']['a1'],trace['t2']['prob']
+    >>> plt.scatter(x, y, c=prob ,s=30)
+    >>> plt.scatter(x2, y2, c=prob2, s=30)
+    >>> plt.gca().set_xlim((1, 5))
+    >>> plt.gca().set_ylim((5, 15))
+    >>> plt.xlabel('a1')
+    >>> plt.ylabel('t2')
+    >>> plt.show()
 
-    @suppress
-    plt.contourf(x, y, grid, np.linspace(0,1,11))
-    @suppress
-    plt.xlabel('a1')
-    @suppress
-    plt.colorbar()
-    @suppress
-    plt.ylabel('t2')
 
+which shows the trace of values:
 
-    x, y, prob = trace['a1']['a1'], trace['a1']['t2'],trace['a1']['prob']
-    x2, y2, prob2 = trace['t2']['t2'], trace['t2']['a1'],trace['t2']['prob']
-    @savefig conf_interval2.png width=7in
-    plt.scatter(x, y, c=prob ,s=30)
-    plt.scatter(x2, y2, c=prob2, s=30)
+.. image:: _images/conf_interval2.png
+   :target: _images/conf_interval2.png
+   :width: 50%
 
 
 Documentation of methods
@@ -142,6 +172,3 @@ Documentation of methods
 
 .. autofunction:: lmfit.conf_interval
 .. autofunction:: lmfit.conf_interval2d
-
-
-
diff --git a/doc/constraints.rst b/doc/constraints.rst
index cc6e66e..ee5ca88 100644
--- a/doc/constraints.rst
+++ b/doc/constraints.rst
@@ -1,11 +1,11 @@
-.. _asteval: http://newville.github.io/asteval/
-
-.. _math-constraints-label:
+.. _constraints_chapter:
 
 =================================
 Using Mathematical Constraints
 =================================
 
+.. _asteval: http://newville.github.io/asteval/
+
 While being able to fix variables and place upper and lower bounds on their
 values are key parts of lmfit, the ability to place mathematical
 constraints on parameters is also highly desirable.  This section describes
diff --git a/doc/contents.rst b/doc/contents.rst
new file mode 100644
index 0000000..5879a61
--- /dev/null
+++ b/doc/contents.rst
@@ -0,0 +1,15 @@
+Contents
+=================
+
+.. toctree::
+   :maxdepth: 3
+
+   intro
+   installation
+   parameters
+   fitting
+   model
+   builtin_models
+   confidence
+   bounds
+   constraints
diff --git a/doc/fitting.rst b/doc/fitting.rst
index 97afb7f..eb56637 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -1,8 +1,10 @@
+.. _minimize_chapter:
+
 =======================================
 Performing Fits, Analyzing Outputs
 =======================================
 
-As shown in the previous sections, a simple fit can be performed with
+As shown in the introduction, a simple fit can be performed with
 the :func:`minimize` function.    For more sophisticated modeling,
 the :class:`Minimizer` class can be used to gain a bit more control,
 especially when using complicated constraints.
@@ -15,7 +17,7 @@ The minimize function takes a function to minimize, a dictionary of
 :class:`Parameter` , and several optional arguments.    See
 :ref:`fit-func-label` for details on writing the function to minimize.
 
-.. function:: minimize(function, params, args=None, kws=None, method='leastsq', **leastsq_kws)
+.. function:: minimize(function, params[, args=None[, kws=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **leastsq_kws]]]]]])
 
    find values for the params so that the sum-of-squares of the returned array
    from function is minimized.
@@ -30,16 +32,18 @@ The minimize function takes a function to minimize, a dictionary of
    :type  args:  tuple
    :param kws:   dictionary to pass to the residual function as keyword arguments.
    :type  kws:  dict
-   :param method:  name of fitting method to use. See  :ref:`fit-engines-label` for details
-   :type  method:  string
-   :param leastsq_kws:  dictionary to pass to scipy.optimize.leastsq
+   :param method:  name of fitting method to use. See  :ref:`fit-methods-label` for details
+   :type  method:  string (default ``leastsq``)
+   :param scale_covar:  whether to automatically scale covariance matrix (``leastsq`` only)
+   :type  scale_covar:  bool (default ``True``)
+   :param iter_cb:  function to be called at each fit iteration
+   :type  iter_cb:  callable or ``None``
+   :param leastsq_kws:  dictionary to pass to :func:`scipy.optimize.leastsq`.
    :type  leastsq_kws:  dict
+
    :return: Minimizer object, which can be used to inspect goodness-of-fit
             statistics, or to re-run fit.
 
-   For backward compatibility, the keyword `engine` is retained as a synonym for `method`,
-   but this should be considered deprecated.
-
    On output, the params will be updated with best-fit values and, where
    appropriate, estimated uncertainties and correlations.  See
    :ref:`fit-results-label` for further details.
@@ -120,8 +124,7 @@ but might be wiser to put this directly in the function with::
         if abs(period) < 1.e-10:
             period = sign(period)*1.e-10
 
-
-..  _fit-engines-label:
+..  _fit-methods-label:
 
 Choosing Different Fitting Methods
 ===========================================
@@ -143,9 +146,9 @@ modification of the quasi-Newton method.
 To select which of these algorithms to use, use the ``method`` keyword to the
 :func:`minimize` function or use the corresponding method name from the
 :class:`Minimizer` class as listed in the
-:ref:`Table of Supported Fitting Methods <fit-engine-table>`.
+:ref:`Table of Supported Fitting Methods <fit-methods-table>`.
 
-.. _fit-engine-table:
+.. _fit-methods-table:
 
  Table of Supported Fitting Methods:
 
@@ -165,7 +168,7 @@ To select which of these algorithms to use, use the ``method`` keyword to the
  +-----------------------+--------------------+---------------------+-------------------------+
  | Conjugate Gradient    |  ``cg``            |                     | ``CG``                  |
  +-----------------------+--------------------+---------------------+-------------------------+
- | Newtown-CG            |  ``newton``        |                     | ``Newton-CG``           |
+ | Newton-CG             |  ``newton``        |                     | ``Newton-CG``           |
  +-----------------------+--------------------+---------------------+-------------------------+
  | COBYLA                |  ``cobyla``        |                     |  ``COBYLA``             |
  +-----------------------+--------------------+---------------------+-------------------------+
@@ -173,7 +176,6 @@ To select which of these algorithms to use, use the ``method`` keyword to the
  | Squares Programming   |                    |                     |                         |
  +-----------------------+--------------------+---------------------+-------------------------+
 
-
 .. note::
 
    Use of :meth:`scipy.optimize.minimize` requires scipy 0.11 or higher.
@@ -227,9 +229,9 @@ stored as attributes of the corresponding :class:`Parameter`.
 +----------------------+----------------------------------------------------------------------------+
 |    message           | message about fit success.                                                 |
 +----------------------+----------------------------------------------------------------------------+
-|    ier               | integer error value from scipy.optimize.leastsq                            |
+|    ier               | integer error value from :func:`scipy.optimize.leastsq`                    |
 +----------------------+----------------------------------------------------------------------------+
-|    lmdif_message     | message from scipy.optimize.leastsq                                        |
+|    lmdif_message     | message from :func:`scipy.optimize.leastsq`                                |
 +----------------------+----------------------------------------------------------------------------+
 |    nvarys            | number of variables in fit  :math:`N_{\rm varys}`                          |
 +----------------------+----------------------------------------------------------------------------+
@@ -243,6 +245,10 @@ stored as attributes of the corresponding :class:`Parameter`.
 +----------------------+----------------------------------------------------------------------------+
 |    redchi            | reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm varys})}` |
 +----------------------+----------------------------------------------------------------------------+
+|    var_map           | list of variable parameter names for rows/columns of covar                 |
++----------------------+----------------------------------------------------------------------------+
+|    covar             | covariance matrix (with rows/columns using var_map                         |
++----------------------+----------------------------------------------------------------------------+
 
 Note that the calculation of chi-square and reduced chi-square assume that the
 returned residual function is scaled properly to the uncertainties in the data.
@@ -293,9 +299,9 @@ For full control of the fitting process, you'll want to create a
    :type  fcn_kws:  dict
    :param iter_cb:  function to be called at each fit iteration
    :type  iter_cb:  callable or ``None``
-   :param scale_covar:  flag for scaling covariance matrix and uncertainties to reduced chi-square (``leastsq`` only)
-   :type  scale_cover:  boolean, default ``True``
-   :param kws:      dictionary to pass as keywords to the underlying scipy.optimize method.
+   :param scale_covar:  flag for automatically scaling covariance matrix and uncertainties to reduced chi-square (``leastsq`` only)
+   :type  scale_cover:  bool (default ``True``).
+   :param kws:      dictionary to pass as keywords to the underlying :mod:`scipy.optimize` method.
    :type  kws:      dict
    :return: Minimizer object, which can be used to inspect goodness-of-fit
             statistics, or to re-run fit.
@@ -306,7 +312,7 @@ The Minimizer object has a few public methods:
 .. method:: leastsq(scale_covar=True, **kws)
 
    perform fit with Levenberg-Marquardt algorithm.  Keywords will be passed directly to
-   `scipy.optimize.leastsq <http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.leastsq.html>`_.
+   :func:`scipy.optimize.leastsq`.
    By default, numerical derivatives are used, and the following arguments are set:
 
     +------------------+----------------+------------------------------------------------------------+
@@ -323,26 +329,11 @@ The Minimizer object has a few public methods:
     +------------------+----------------+------------------------------------------------------------+
 
 
-.. method:: anneal(**kws)
-
-   perform fit with Simulated Annealing.  Keywords will be passed directly to
-   `scipy.optimize.anneal <http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.anneal.html>`_.
-
-    +------------------+----------------+------------------------------------------------------------+
-    | :meth:`anneal`   |  Default Value | Description                                                |
-    | arg              |                |                                                            |
-    +==================+================+============================================================+
-    |   schedule       | ``cauchy``     | annealing schedule                                         |
-    +------------------+----------------+------------------------------------------------------------+
-    |   maxiter        |  2000*(nvar+1) | maximum number of iterations                               |
-    +------------------+----------------+------------------------------------------------------------+
-
-    For me, this Simulated Annealing appears to never work.
 
 .. method:: lbfgsb(**kws)
 
    perform fit with L-BFGS-B algorithm.  Keywords will be passed directly to
-   `scipy.optimize.fmin_l_bfgs_b <http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html>`_.
+   :func:`scipy.optimize.fmin_l_bfgs_b`.
 
 
     +------------------+----------------+------------------------------------------------------------+
@@ -359,7 +350,7 @@ The Minimizer object has a few public methods:
 .. method:: fmin(**kws)
 
    perform fit with Nelder-Mead downhill simplex algorithm.  Keywords will be passed directly to
-   `scipy.optimize.fmin <http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin.html>`_.
+   :func:`scipy.optimize.fmin`.
 
     +------------------+----------------+------------------------------------------------------------+
     | :meth:`fmin`     |  Default Value | Description                                                |
@@ -376,7 +367,7 @@ The Minimizer object has a few public methods:
 .. method:: scalar_minimize(method='Nelder-Mead', hess=None, tol=None, **kws)
 
    perform fit with any of the scalar minimization algorithms supported by
-   `scipy.optimize.minimize <http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html>`_.
+   :func:`scipy.optimize.minimize`.
 
     +-------------------------+-----------------+-----------------------------------------------------+
     | :meth:`scalar_minimize` | Default Value   | Description                                         |
@@ -423,11 +414,13 @@ Getting and Printing Fit Reports
    generate and return text of report of best-fit values, uncertainties,
    and correlations from fit.
 
-   :param params:       Parameters from fit.
+   :param params:       Parameters from fit, or Minimizer object as returned by :func:`minimize`.
    :param modelpars:    Parameters with "Known Values" (optional, default None)
    :param show_correl:  whether to show list of sorted correlations [``True``]
    :param min_correl:   smallest correlation absolute value to show [0.1]
 
+   If the first argument is a Minimizer object, as returned from
+   :func:`minimize`, the report will include some goodness-of-fit statistics.
 
 .. function:: report_fit(params, modelpars=None, show_correl=True, min_correl=0.1)
 
diff --git a/doc/index.rst b/doc/index.rst
index 4cb3027..0167344 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -1,73 +1,71 @@
 .. lmfit documentation master file,
 
-Non-Linear Least-Square Minimization for Python
-================================================
+Non-Linear Least-Square Minimization and Curve-Fitting for Python
+===========================================================================
 
-.. _scipy.optimize.leastsq: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.leastsq.html
-.. _scipy.optimize.l_bfgs_b: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
-.. _scipy.optimize.anneal: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.anneal.html
-.. _scipy.optimize.fmin:   http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin.html
-.. _scipy.optimize.cobyla: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.cobyla.html
-.. _scipy.optimize: http://docs.scipy.org/doc/scipy/reference/optimize.html
+.. _Levenberg-Marquardt:     http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm
+.. _MINPACK-1:               http://en.wikipedia.org/wiki/MINPACK
+.. _Nelder-Mead:             http://en.wikipedia.org/wiki/Nelder-Mead_method
 
-.. _Nelder-Mead: http://en.wikipedia.org/wiki/Nelder-Mead_method
-.. _Levenberg-Marquardt: http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm
-.. _L-BFGS:  http://en.wikipedia.org/wiki/Limited-memory_BFGS
+The lmfit python package provides a simple and flexible interface to
+non-linear optimization and curve fitting problems.  Lmfit extends the
+optimization capabilities of :mod:`scipy.optimize`.  Initially designed to
+extend the the `Levenberg-Marquardt`_ algorithm in
+:func:`scipy.optimize.minimize.leastsq`, lmfit supports most of the
+optimization methods from :mod:`scipy.optimize`.  It also provides a simple
+way to apply this extension to *curve fitting* problems.
 
-.. _MINPACK-1: http://en.wikipedia.org/wiki/MINPACK
+The key concept in lmfit is that instead of using plain floating pointing
+values for the variables to be optimized (as all the optimization routines
+in :mod:`scipy.optimize` use), optimizations are done using
+:class:`Parameter` objects.  A :class:`Parameter` can have its value fixed
+or varied, have upper and/or lower bounds placed on its value, or have
+values that are evaluated from algebraic expressions of other Parameter
+values.  This is all done outside the optimization routine, so that these
+bounds and constraints can be applied to **all** optimization routines from
+:mod:`scipy.optimize`, and with a more Pythonic interface than any of the
+routines that do provide bounds.
 
-The lmfit Python package provides a simple, flexible interface to
-non-linear optimization or curve fitting problems.  The package extends the
-optimization capabilities of `scipy.optimize`_ by replacing floating
-pointing values for the variables to be optimized with Parameter objects.
-These Parameters can be fixed or varied, have upper and/or lower bounds
-placed on its value, or written as an algebraic expression of other
-Parameters.
+By using :class:`Parameter` objects instead of plain variables, the
+objective function does not have to be rewritten to reflect every change of
+what is varied in the fit, or if relationships or constraints are placed on
+the Parameters.  This simplifies the writing of models, and gives the user
+more flexibility in using and testing variations of that model.
 
-The principal advantage of using Parameters instead of simple variables is
-that the objective function does not have to be rewritten to reflect every
-change of what is varied in the fit, or what relationships or constraints
-are placed on the Parameters.  This means a scientific programmer can write
-a general model that encapsulates the phenomenon to be optimized, and then
-allow user of that model to change what is varied and fixed, what range of
-values is acceptable for Parameters, and what constraints are placed on the
-model.  The ease with which the model can be changed also allows one to
-easily test the significance of certain Parameters in a fitting model.
 
-The lmfit package allows a choice of several optimization methods available
-from `scipy.optimize`_.  The default, and by far best tested optimization
-method used is the `Levenberg-Marquardt`_ algorithm from
-from `MINPACK-1`_ as implemented in `scipy.optimize.leastsq`_.
-This method is by far the most tested and best support method in lmfit, and
-much of this document assumes this algorithm is used unless explicitly
-stated. An important point for many scientific analysis is that this is
-only method that automatically estimates uncertainties and correlations
-between fitted variables from the covariance matrix calculated during the fit.
+Lmfit supports several of the optimization methods from
+:mod:`scipy.optimize`.  The default, and by far best tested optimization
+method used (and the origin of the name) is the `Levenberg-Marquardt`_
+algorithm of :func:`scipy.optimize.leastsq` and
+:func:`scipy.optimize.curve_fit`.  Much of this document assumes this
+algorithm is used unless explicitly stated.  An important point for many
+scientific analysis is that this is only method that automatically
+estimates uncertainties and correlations between fitted variables from the
+covariance matrix calculated during the fit. Because the approach derived
+from `MINPACK-1`_ using the covariance matrix to determine uncertainties is
+sometimes questioned (and sometimes rightly so), lmfit supports methods to
+do a brute force search of the confidence intervals and correlations for
+sets of parameters.
 
-A few other optimization routines are also supported, including
-`Nelder-Mead`_ simplex downhill, Powell's method, COBYLA, Sequential Least
-Squares methods as implemented in `scipy.optimize.fmin`_, and several
-others from `scipy.optimize`_.  In their native form, some of these methods
-setting allow upper or lower bounds on parameter variables, or adding
-constraints on fitted variables.  By using Parameter objects, lmfit allows
-bounds and constraints for *all* of these methods, and makes it easy to
-swap between methods without hanging the objective function or set of
-Parameters.
+.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
 
-Finally, because the approach derived from `MINPACK-1`_ usin the covariance
-matrix to determine uncertainties is sometimes questioned (and sometimes
-rightly so), lmfit supports methods to do a brute force search of the
-confidence intervals and correlations for sets of parameters.
+The lmfit package is an open-source project, and this document are a works
+in progress.  If you are interested in participating in this effort please
+use the `lmfit github repository`_.
 
-lmfit and this document are a work in progress.
 
 .. toctree::
    :maxdepth: 2
 
+   intro
    installation
    parameters
    fitting
+   model
+   builtin_models
    confidence
    bounds
    constraints
-   models1d
+
+
+
diff --git a/doc/installation.rst b/doc/installation.rst
index ff1cb24..2c5ed93 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -5,54 +5,42 @@ Downloading and Installation
 Prerequisites
 ~~~~~~~~~~~~~~~
 
-The lmfit package requires Python, Numpy, and Scipy.  Scipy version 0.11 or
-higher is recommended, but extensive testing on version compatibility has
-not been done.  Initial tests do work with Python 3.2, but little testing
-with Python 3 has yet been done.  Scipy seems to not yet be available for
-Python 3.3.  No testing has been done with 64-bit architectures, but as
-this package is pure Python, no significant troubles are expected. Nose is
-a requirement for running the test suite.
-
-.. _uncertainties: http://packages.python.org/uncertainties/
-
-If installed, the `uncertainties`_ package will be used for propagation of
-uncertainties to constrained parameters.
+The lmfit package requires Python, Numpy, and Scipy.  Scipy version 0.13 or
+higher is recommended, but extensive testing on compatibility with various
+versions of scipy has not been done.  Lmfit does work with Python 2.7, 3.2,
+and 3.3.  No testing has been done with Python 3.4, but as the package is
+pure Python, relying only on scipy and numpy, no significant troubles are
+expected.  Nose is required for running the test suite, and IPython and
+matplotib are recommended.  If Pandas is available, it will be used in
+portions of lmfit.
 
 
 Downloads
 ~~~~~~~~~~~~~
 
-The latest stable version is available from PyPI:
-
-.. _lmfit-0.7.2.tar.gz (PyPI): http://pypi.python.org/packages/source/l/lmfit/lmfit-0.7.2.tar.gz
-.. _lmfit-0.7.2.win32-py2.6.exe (PyPI): http://pypi.python.org/packages/2.6/l/lmfit/lmfit-0.7.2.win32-py2.6.exe
-.. _lmfit-0.7.2.win32-py2.7.exe (PyPI): http://pypi.python.org/packages/2.7/l/lmfit/lmfit-0.7.2.win32-py2.7.exe
-.. _lmfit-0.7.2.win32-py3.2.exe (PyPI): http://pypi.python.org/packages/3.2/l/lmfit/lmfit-0.7.2.win32-py3.2.exe
-
 .. _lmfit github repository:   http://github.com/lmfit/lmfit-py
-.. _lmfit at pypi:             http://pypi.python.org/pypi/lmfit/
 .. _Python Setup Tools:        http://pypi.python.org/pypi/setuptools
+.. _pip:  https://pip.pypa.io/
+
+The latest stable version of lmfit is  available from `PyPi <http://pypi.python.org/pypi/lmfit/>`_.
+
+Installation
+~~~~~~~~~~~~~~~~~
+
+If you have `pip`_  installed, you can install lmfit with::
 
-+----------------------+------------------+--------------------------------------------+
-|  Download Option     | Python Versions  |  Location                                  |
-+======================+==================+============================================+
-|  Source Kit          | 2.6, 2.7, 3.2    | -  `lmfit-0.7.2.tar.gz (PyPI)`_            |
-+----------------------+------------------+--------------------------------------------+
-|  Win32 Installer     |   2.6            | -  `lmfit-0.7.2.win32-py2.6.exe (PyPI)`_   |
-+----------------------+------------------+--------------------------------------------+
-|  Win32 Installer     |   2.7            | -  `lmfit-0.7.2.win32-py2.7.exe (PyPI)`_   |
-+----------------------+------------------+--------------------------------------------+
-|  Win32 Installer     |   3.2            | -  `lmfit-0.7.2.win32-py3.2.exe (PyPI)`_   |
-+----------------------+------------------+--------------------------------------------+
-|  Development Version |   all            |  use `lmfit github repository`_            |
-+----------------------+------------------+--------------------------------------------+
-
-if you have `Python Setup Tools`_  installed, you can download and install
-the lmfit-py Package simply with::
+    pip install lmfit
+
+or, if  you have `Python Setup Tools`_  installed, you install lmfit with::
 
    easy_install -U lmfit
 
 
+or, you can download the source kit, unpack it and install with::
+
+   python setup.py install
+
+
 Development Version
 ~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -61,13 +49,11 @@ To get the latest development version, use::
    git clone http://github.com/lmfit/lmfit-py.git
 
 
-Installation
-~~~~~~~~~~~~~~~~~
-
-Installation from source on any platform is::
+and install using::
 
    python setup.py install
 
+
 Acknowledgements
 ~~~~~~~~~~~~~~~~~~
 
@@ -75,9 +61,12 @@ LMFIT was originally written by Matthew Newville.  Substantial code and
 documentation improvements, especially for improved estimates of confidence
 intervals was provided by Till Stensitzki.  The implementation of parameter
 bounds as described in the MINUIT documentation is taken from Jonathan
-J. Helmus' leastsqbound code, with permission. Many valuable suggestions
-for improvements have come from Christoph Deil.  The code obviously depends
-on, and owes a very large debt to the code in scipy.optimize.  Several
+J. Helmus' leastsqbound code, with permission.  The code for propagation of
+uncertainties is taken from Eric O. Le Bigot's uncertainties package, with
+permission.  Much of the work on improved unit testing and high-level model
+functions was done by Daniel B. Allen.  Many valuable suggestions for
+improvements have come from Christoph Deil.  The code obviously depends on,
+and owes a very large debt to the code in scipy.optimize.  Several
 discussions on the scipy mailing lists have also led to improvements in
 this code.
 
diff --git a/doc/intro.rst b/doc/intro.rst
new file mode 100644
index 0000000..c8e4acd
--- /dev/null
+++ b/doc/intro.rst
@@ -0,0 +1,150 @@
+.. _intro_chapter:
+
+===========================================================
+Getting started with Non-Linear Least-Squares Fitting
+===========================================================
+
+The lmfit package is designed to provide simple tools to help you build of
+complex fitting models for non-linear least-squares problems and apply
+these models to real data.  This section gives an overview of the concepts
+and describes how to set up and perform simple fits.  Some basic knowledge
+of Python, numpy, and modeling data are assumed.
+
+To do a non-linear least-squares fit of a model to data or for a variety of other
+optimization problems, the main task is to write an *objective function*
+that takes the values of the fitting variables and calculates either a
+scalar value to be minimized or an array of values that is to be minimized
+in the least-squares sense.   For many data fitting processes, the
+least-squares approach is used, and the objective function should
+return an array of (data-model), perhaps scaled by some weighting factor
+such as the inverse of the uncertainty in the data.  For such a problem,
+the chi-square (:math:`\chi^2`) statistic is often defined as:
+
+.. math::
+
+ \chi^2 =  \sum_i^{N} \frac{[y^{\rm meas}_i - y_i^{\rm model}({\bf{v}})]^2}{\epsilon_i^2}
+
+where :math:`y_i^{\rm meas}` is the set of measured data, :math:`y_i^{\rm
+model}({\bf{v}})` is the model calculation, :math:`{\bf{v}}` is the set of
+variables in the model to be optimized in the fit, and :math:`\epsilon_i`
+is the estimated uncertainty in the data.
+
+In a traditional non-linear fit, one writes an objective function that takes the
+variable values and calculates the residual :math:`y^{\rm meas}_i -
+y_i^{\rm model}({\bf{v}})`, or the residual scaled by the data
+uncertainties, :math:`[y^{\rm meas}_i - y_i^{\rm
+model}({\bf{v}})]/{\epsilon_i}`, or some other weighting factor.  As a
+simple example, one might write an objective function like this::
+
+    def residual(vars, x, data, eps_data):
+        amp = vars[0]
+        phaseshift = vars[1]
+	freq = vars[2]
+        decay = vars[3]
+
+	model = amp * sin(x * freq  + phaseshift) * exp(-x*x*decay)
+
+        return (data-model)/eps_data
+
+To perform the minimization with :mod:`scipy.optimize`, one would do::
+
+    from scipy.optimize import leastsq
+    vars = [10.0, 0.2, 3.0, 0.007]
+    out = leastsq(residual, vars, args=(x, data, eps_data))
+
+Though it is wonderful to be able to use python for such optimization
+problems, and the scipy library is robust and easy to use, the approach
+here is not terribly different from how one would do the same fit in C or
+Fortran.  There are several practical challenges to using this approach,
+including:
+
+  a) The user has to keep track of the order of the variables, and their
+     meaning -- vars[0] is the amplitude, vars[2] is the frequency, and so
+     on, although there is no intrinsic meaning to this order.
+
+  b) If the user wants to fix a particular variable (*not* vary it in the
+     fit), the residual function has to be altered to have fewer variables,
+     and have the corresponding constant value passed in some other way.
+     While reasonable for simple cases, this quickly becomes a significant
+     work for more complex models, and greatly complicates modeling for
+     people not intimately familiar with the details of the fitting code.
+
+  c) There is no simple, robust way to put bounds on values for the
+     variables, or enforce mathematical relationships between the
+     variables.  In fact, those optimization methods that do provide
+     bounds, require bounds to be set for all variables with separate
+     arrays that are in the same arbitrary order as variable values.
+     Again, this is acceptable for small or one-off cases, but becomes
+     painful if the fitting model needs to change.
+
+These shortcomings are really do solely to the use of traditional arrays of
+variables, as matches closely the implementation of the Fortran code.  The
+lmfit module overcomes these shortcomings by using a core reason for using
+Python -- objects.  The key concept for lmfit is to use :class:`Parameter`
+objects instead of plain floating point numbers as the variables for the
+fit.  By using :class:`Parameter` objects (or the closely related
+:class:`Parameters` -- a dictionary of :class:`Parameter` objects), one can
+
+   a) not care about the order of variables, but refer to Parameters
+      by meaningful names.
+   b) place bounds on Parameters as attributes, without worrying about order.
+   c) fix Parameters, without having to rewrite the objective function.
+   d) place algebraic constraints on Parameters.
+
+To illustrate the value of this approach, we can rewrite the above example
+as::
+
+    from lmfit import minimize, Parameters
+
+    def residual(params, x, data, eps_data):
+        amp = params['amp'].value
+        pshift = params['phase'].value
+	freq = params['frequency'].value
+        decay = params['decay'].value
+
+	model = amp * sin(x * freq  + pshift) * exp(-x*x*decay)
+
+        return (data-model)/eps_data
+
+    params = Parameters()
+    params.add('amp', value=10)
+    params.add('decay', value=0.007)
+    params.add('phase', value=0.2)
+    params.add('frequency', value=3.0)
+
+    out = minimize(residual, params, args=(x, data, eps_data))
+
+
+At first look, we simply replaced a list of values with a dictionary,
+accessed by name -- not a huge improvement.  But each of the named
+:class:`Parameter` in the :class:`Parameters` object hold additional
+attributes to modify the value during the fit.  For example, Parameters can
+be fixed or bounded.  This can be done when being defined::
+
+    params = Parameters()
+    params.add('amp', value=10, vary=False)
+    params.add('decay', value=0.007, min=0.0)
+    params.add('phase', value=0.2)
+    params.add('frequency', value=3.0, max=10)
+
+(where ``vary=False`` will prevent the value from changing in the fit, and
+``min=-0.0`` will set a lower bound on that parameters value) or after
+being defined by setting the corresponding attributes after they have been
+created::
+
+    params['amp'].vary = False
+    params['decay'].min = 0.10
+
+Importantly, our function to be minimized remains unchanged.
+
+The `params` object can be copied and modified to make many user-level
+changes to the model and fitting process.  Of course, most of the
+information about how your data is modeled goes into the objective
+function, but the approach here allows some external control, that is by
+the **user** of the objective function instead of just by the author of the
+objective function.
+
+Finally, in addition to the :class:`Parameters` approach to fitting data,
+lmfit allows you to easily switch optimization methods without rewriting
+your objective function, and provides tools for writing fitting reports and
+for better determining the confidence levels for Parameters.
diff --git a/doc/model.rst b/doc/model.rst
new file mode 100644
index 0000000..c741fb9
--- /dev/null
+++ b/doc/model.rst
@@ -0,0 +1,572 @@
+.. _model_chapter:
+
+=================================================
+Modeling Data and Curve Fitting
+=================================================
+
+A very common application of least-squares minimization is *curve fitting*,
+where one has a parametrized model function meant to explain some
+phenomena, and wants to adjust the numerical values for the model to
+most closely match some particular data.  Within the :mod:`scipy` world,
+such curve fitting problems are commonly solved with
+:func:`scipy.optimize.curve_fit`, which simply calls
+:func:`scipy.optimize.leastsq`.  As lmfit is a high-level wrapper around
+:func:`scipy.optimize.leastsq`, it can be used for curve-fitting problems,
+but here we discuss an even easier way to do it that is closer in spirit to
+:func:`scipy.optimize.curve_fit`, but better.
+
+The :class:`Model` class makes it easy to turn a model function that
+calculates a model for your data into a fitting model.  In an effort to
+make simple things truly simple, lmfit also provides canonical definitions
+for many known lineshapes such as Gaussian or Lorentzian peaks and
+Exponential decays that are widely used in many scientific domains.  These
+are available in the :mod:`models` module that will be discussed in more
+detail in the next chapter (:ref:`builtin_models_chapter`).  We mention it
+here as you may want to consult that list before writing your own model.
+For now, we focus on turning python function into high-level fitting models
+with the :class:`Model` class, and using these to fit data.
+
+
+Example: Fit data to Gaussian profile
+================================================
+
+Let's start with a simple and common example of fitting data to a Gaussian
+peak.  As we will see, there is a buit-in :class:`GaussianModel` class that
+provides a model function for a Gaussian profile, but here we'll build our
+own.  We start with a definition the model function that we might want to
+use to fit to some data::
+
+    >>> from numpy import sqrt, pi, exp, linspace
+    >>>
+    >>> def gaussian(x, amp, cen, wid):
+    ...    "1-d gaussian: gaussian(x, amp, cen, wid)"
+    ...    return (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+    ...
+
+To some data :math:`y(x)` represented by the arrays ``y`` and ``x`` with we
+would do something like::
+
+    >>> from scipy.optimize import curve_fit
+    >>>
+    >>> x, y = read_data_from_somewhere(....)
+    >>>
+    >>> init_vals = [5, 5, 1]     # for [amp, cen, wid]
+    >>> best_vals, covar = curve_fit(gaussian, x, y, p0=init_vals)
+    >>> print best_vals
+
+
+That is, we read in data from somewhere, make an initial guess of the model
+values, and run ``curve_fit`` with the model function, data arrays, and
+initial guesses.  The results returned are the optimal values for the
+parameters and the covariance matrix.   It's pretty simple to do, but
+misses many of the key benefits of lmfit.
+
+
+To solve this with lmfit we could write a residual function but such a
+residual function would be fairly simple (essentially, ``data - model``,
+possibly with some weighting), and we would need to define and use
+appropriately named parameters.  Though convenient, it also becomes
+somewhat of a burden to keep all the parameter names straight.  After doing
+this a few times it appears as a recurring pattern, and we can imagine
+automating this process.  That's where the :class:`Model` class comes in.
+We can pass this class the ``gaussian`` function, and it will automatically
+generate the appropriate residual function and the corresponding parameters
+from the function signature itself::
+
+    >>> from lmfit import Model
+    >>> gmod = Model(gaussian)
+    >>> for name, par in gmod.params.items():
+    ...     print(name, par)
+    ...
+    'amp', <Parameter 'amp', None, bounds=[None:None]>
+    'wid', <Parameter 'wid', None, bounds=[None:None]>
+    'cen', <Parameter 'cen', None, bounds=[None:None]>
+    >>> print("Independent Variables: ", gmod.independent_vars)
+    'Independent Variables: ', ['x']
+
+The Model ``gmod`` is constructed to have a ``params`` member that holds the
+:class:`Parameters` for the model, and an ``independent_vars`` that holds
+the name of the independent variables.  By default, the first argument of
+the function is taken as the independent variable, and the rest of the
+parameters are used for variable Parameters.  Thus, for the ``gaussian``
+function above, the parameters are named ``amp``, ``cen``, and ``wid``, and
+``x`` is the independent variable -- taken directly from the signature of
+the model function.
+
+On creation of the model, the parameters are not initialized (the values
+are all ``None``), and will need to be given initial values before the
+model can be used.  This can be done in one of two ways, or a mixture of
+the two.  First, the initial values for the models parameters can be set
+explicitly, as with:
+
+    >>> gmod.params['amp'].value = 10.0
+
+and so on.  This is also useful to setting parameter bounds and so forth.
+Alternatively, one can use the :meth:`eval` method (to evaluate the model)
+or the :meth:`fit` method (to fit data to this model) with explicit keyword
+arguments for the parameter values.  For example, one could use
+:meth:`eval` to calculate the predicted function::
+
+    >>> x = linspace(0, 10, 201)
+    >>> y = gmod.eval(x=x, amp=10, cen=6.2, wid=0.75)
+
+So far, this is a slightly long-winded way to calculate a Gaussian
+function.   But now that the model is set up, we can also use its
+:meth:`fit` method to fit this model to data, as with::
+
+    result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+
+Putting everything together, the script to do such a fit (included in the
+``examples`` folder with the source code) is:
+
+.. literalinclude:: ../examples/doc_model1.py
+
+which is pretty compact and to the point.  Of course, the parameter in the
+returned ``result`` have pulled apart the covariance matrix, so that the
+results printed out are::
+
+    [[Variables]]
+         amp:     8.880218 +/- 0.1135949 (1.28%) initial =  5
+         cen:     5.658661 +/- 0.01030495 (0.18%) initial =  5
+         wid:     0.6976547 +/- 0.01030495 (1.48%) initial =  1
+    [[Correlations]] (unreported correlations are <  0.250)
+         C(amp, wid)                  =  0.577
+
+
+and the plot generated gives:
+
+
+.. image:: _images/model_fit1.png
+   :target: _images/model_fit1.png
+   :width: 50%
+
+which shows the data in blue dots, the best fit as a solid red line, and
+the initial fit in black dashed line.
+
+We emphasize here that the fit to this model function was really performed
+with 2 lines of code.  These lines clearly express that we want to turn the
+``gaussian`` function into a fitting model, and then fit the :math:`y(x)`
+data to this model, starting with values of 5 for ``amp``, 5 for ``cen``
+and 1 for ``wid``::
+
+    gmod = Model(gaussian)
+    result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+
+which compares well to :func:`scipy.optimize.curve_fit`::
+
+    best_vals, covar = curve_fit(gaussian, x, y, p0=[5, 5, 1])
+
+except that all the other features of lmfit are included.
+
+Some model functions may be more complicated than the Gaussian function
+here.  We'll discuss these below, but for now we've shown that at least the
+wrapping of a simple model function for curve fitting is easy.
+
+
+The :class:`Model` class
+=======================================
+
+.. module:: model
+
+The :class:`Model` class provides a general way to wrap a pre-defined
+function as a fitting model.
+
+.. class:: Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix='' [, components=None]]]]])
+
+    Create a model based on the user-supplied function.  This uses
+    introspection to automatically converting argument names of the
+    function to Parameter names.
+
+    :param func: function to be wrapped
+    :type func: callable
+    :param independent_vars: list of argument names to ``func`` that are independent variables.
+    :type independent_vars: ``None`` (default) or list of strings.
+    :param param_names: list of argument names to ``func`` that should be made into Parameters.
+    :type param_names: ``None`` (default) or list of strings
+    :param missing: how to handle missing values.
+    :type missing: one of ``None`` (default), 'drop', or 'raise'
+    :param prefix: prefix to add to all parameter names to distinguish components.
+    :type prefix: string
+    :param components: list of model components for a composite fit (usually handled internally).
+    :type components: ``None`` or default.
+
+
+Methods and Attributes of the :class:`Model` class
+----------------------------------------------------
+
+.. method:: guess_starting_values(data, **kws)
+
+   by default this is left to raise a ``NotImplementedError``, but may be
+   overwritten by subclasses.  Generally, this method should take some
+   values for ``data`` and use it to construct reasonable starting values for
+   the parameters.
+
+.. method:: set_paramval(parname, value[, min=None[, max=None[, vary=True]]])
+
+   set the value for a named parameter.  This is convenient for setting
+   initial values.  The ``parname`` can include the models ``prefix`` or
+   not.
+
+   :param parname: parameter name.
+   :type parname: string
+   :param value: value for parameter
+   :type value: float
+   :param min:  lower bound for parameter value
+   :type min: ``None`` or float
+   :param max:  upper bound for parameter value
+   :type max: ``None`` or float
+   :param vary:  whether to vary parameter in fit.
+   :type vary: boolean
+
+
+.. method:: eval(params=None[, **kws])
+
+   evaluate the model function for a set of parameters and inputs.
+
+   :param params: parameters to use for fit.
+   :type params: ``None`` (default) or Parameters
+
+   :return:       ndarray for model given the parameters and other arguments.
+
+   If ``params`` is ``None``, the internal ``params`` will be used.
+
+   Note that all other arguments for the model function (including all the
+   independent variables!) will need to be passed in using keyword
+   arguments.
+
+
+.. method:: fit(data[, params=None[, weights=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **kws]]]]]])
+
+   perform a fit of the model to the ``data`` array.
+
+   :param data: array of data to be fitted.
+   :type data: ndarray-like
+   :param params: parameters to use for fit.
+   :type params: ``None`` (default) or Parameters
+   :param weights: weights to use fit.
+   :type weights: ``None`` (default) or ndarray-like.
+   :param method:  name of fitting method to use. See  :ref:`fit-methods-label` for details
+   :type  method:  string (default ``leastsq``)
+   :param scale_covar:  whether to automatically scale covariance matrix (``leastsq`` only)
+   :type  scale_covar:  bool (default ``True``)
+   :param iter_cb:  function to be called at each fit iteration
+   :type  iter_cb:  callable or ``None``
+
+   :return:       fit result object.
+
+   If ``params`` is ``None``, the internal ``params`` will be used. If it
+   is supplied, these will replace the internal ones.  If supplied,
+   ``weights`` must is an ndarray-like object of same size and shape as
+   ``data``.
+
+   Note that other arguments for the model function (including all the
+   independent variables!) will need to be passed in using keyword
+   arguments.
+
+   The result returned from :meth:`fit` will contains all of the items
+   returned from :func:`minimize` (see  :ref:`Table of Fit Results
+   <goodfit-table>` plus those listed in the :ref:`Table of Model Fit results <modelfit-table>`
+
+.. method:: fit_report(modelpars=None[, show_correl=True[, min_correl=0.1]])
+
+   return result of :func:`fit_report` after completing :meth:`fit`.
+
+
+.. _modelfit-table:
+
+Table of Model Fit Results: These values are included in the return value
+from :meth:`Model.fit`, in addition to the standard Goodness-of-Fit
+statistics and fit results given in :ref:`Table of Fit Results
+<goodfit-table>`.
+
+   +----------------------------+------------------------------------------------------+
+   | result attribute           |  Description / Formula                               |
+   +============================+======================================================+
+   | ``init_params``            | initial set of parameters                            |
+   +----------------------------+------------------------------------------------------+
+   | ``init_fit``               | initial estimate of fit to data                      |
+   +----------------------------+------------------------------------------------------+
+   | ``best_fit``               | final estimate of fit to data                        |
+   +----------------------------+------------------------------------------------------+
+
+
+.. attribute:: independent_vars
+
+   list of strings for independent variables.
+
+.. attribute:: param_names
+
+   list of strings of parameter names.
+
+.. attribute:: params
+
+   :class:`Parameters` object for the model
+
+.. attribute:: prefix
+
+   prefix used for name-mangling of parameter names.  The default is ''.
+   If a particular :class:`Model` has arguments ``amplitude``,
+   ``center``, and ``sigma``, these would become the parameter names.
+   Using a prefix of ``g1_`` would convert these parameter names to
+   ``g1_amplitude``, ``g1_center``, and ``g1_sigma``.   This can be
+   essential to avoid name collision in composite models.
+
+.. attribute:: missing
+
+   what to do for missing values.  The choices are
+
+    * ``None``: Do not check for null or missing values (default)
+    * ``'none'``: Do not check for null or missing values.
+    * ``'drop'``: Drop null or missing observations in data.  If pandas is
+                installed, ``pandas.isnull`` is used, otherwise :attr:`numpy.isnan` is used.
+    * ``'raise'``: Raise a (more helpful) exception when data contains null
+                  or missing values.
+
+.. attribute:: components
+
+   a list of instances of :class:`Model` that make up a composite model.
+   Normally, you will not need to use this, but is used my :class:`Model`
+   itself when constructing a composite model (that is adding models together).
+
+
+Determining parameter names and independent variables for a function
+-----------------------------------------------------------------------
+
+The :class:`Model` created from the supplied function ``func`` will create
+a :class:`Parameters` object, and names are inferred from the function
+arguments, and a residual function is automatically constructed.
+
+
+By default, the independent variable is take as the first argument to the
+function.  You can explicitly set this, of course, and will need to if the
+independent variable is not first in the list, or if there are actually more
+than one independent variables.
+
+If not specified, Parameters are constructed from all positional arguments
+and all keyword arguments that have a default value that is numerical, except
+the independent variable, of course.   Importantly, the Parameters can be
+modified after creation.  In fact, you'll have to do this because none of the
+parameters have valid initial values.  You can place bounds and constraints
+on Parameters, or fix their values.
+
+
+
+More Details on building models from functions
+============================================================
+
+
+Here we explore some of the variations of building a :class:`Model` from a
+user-defined function that didn't get mentioned in the example above for
+the Gaussian model.
+
+
+Explicitly specifying ``independent_vars``
+-------------------------------------------------
+
+As for the example above of the Gaussian model, creating a :class:`Model`
+from a function is fairly easy::
+
+    >>> def decay(t, tau, N):
+    ...    return N*np.exp(-t/tau)
+    ...
+    >>> decay_model = Model(decay)
+    >>> print decay_model.independent_vars
+    ['t']
+    >>> for pname, par in decay_model.params.items():
+    ...     print pname, par
+    ...
+    tau <Parameter 'tau', None, bounds=[None:None]>
+    N <Parameter 'N', None, bounds=[None:None]>
+
+Here, ``t`` is assumed to be the independent variable because it comes
+first, and that the other function arguments are used to create the
+remaining parameters are created from the other parameters.
+
+If you wanted ``tau`` to be the independent variable in the above example,
+you would just do this::
+
+    >>> decay_model = Model(decay, independent_vars=['tau'])
+    >>> print decay_model.independent_vars
+    ['tau']
+    >>> for pname, par in decay_model.params.items():
+    ...     print pname, par
+    ...
+    t <Parameter 't', None, bounds=[None:None]>
+    N <Parameter 'N', None, bounds=[None:None]>
+
+
+Functions with keyword arguments
+-----------------------------------------
+
+If the model function had keyword parameters, these would be turned into
+Parameters if the supplied default value was a valid number (but not
+``None``).
+
+    >>> def decay2(t, tau, N=10, check_positive=False):
+    ...    if check_small:
+    ...        arg = abs(t)/max(1.e-9, abs(tau))
+    ...    else:
+    ...        arg = t/tau
+    ...    return N*np.exp(arg)
+    ...
+    >>> mod = Model(decay2)
+    >>> for pname, par in mod.params.items():
+    ...     print pname, par
+    ...
+    t <Parameter 't', None, bounds=[None:None]>
+    N <Parameter 'N', 10, bounds=[None:None]>
+
+Here, even though ``N`` is a keyword argument to the function, it is turned
+into a parameter, with the default numerical value as its initial value.
+By default, it is permitted to be varied in the fit -- the 10 is taken as
+an initial value, not a fixed value.  On the other hand, the
+``check_positive`` keyword argument, was not converted to a parameter
+because it has a boolean default value.
+
+Defining a ``prefix`` for the Parameters
+--------------------------------------------
+
+As we will see in the next chapter when combining models, it is sometimes
+necessary to decorate the parameter names in the model, but still have them
+be correctly used in the underlying model function.  This would be
+necessary, for example, if two parameters in a composite model (see
+:ref:`composite_models_section` or examples in the next chapter) would have
+the same name.  To avoid this, we can add a ``prefix`` to the
+:class:`Model` which will automatically do this mapping for us.
+
+    >>> def myfunc(x, amplitude=1, center=0, sigma=1):
+    ...
+
+    >>> mod = Model(myfunc, prefix='f1_')
+    >>> for pname, par in mod.params.items():
+    ...     print pname, par
+    ...
+    f1_amplitude <Parameter 'f1_amplitude', None, bounds=[None:None]>
+    f1_center <Parameter 'f1_center', None, bounds=[None:None]>
+    f1_sigma <Parameter 'f1_sigma', None, bounds=[None:None]>
+
+You would refer to these parameters as ``f1_amplitude`` and so forth, and
+the model will know to map these to the ``amplitude`` argument of ``myfunc``.
+
+
+More on initialing model parameters
+-----------------------------------------
+
+As mentioned above, the parameters created by :class:`Model` are generally
+created with invalid initial values of ``None``.  These values must be
+initialized in order for the model to be evaluated or used in a fit.  There
+are three ways to do this initialization that can be used in any
+combination:
+
+  1. You can supply initial values in the definition of the model function.
+  2. You can initialize the parameters after the model has been created.
+  3. You can supply initial values for the parameters to the :meth:`eval`
+     or :meth:`fit` methods.
+
+For option 1, consider doing::
+
+    >>> def myfunc(x, a=1, b=0):
+    >>>     ...
+
+instead of::
+
+    >>> def myfunc(x, a, b):
+    >>>     ...
+
+For option 2, you can do::
+
+    >>> mod = Model(myfunc)
+    >>> mod.params['a'].value = 1.0
+    >>> mod.params['b'].value = 0.1
+
+An advantage of this approach is that you can set other parameter
+attributes such as bounds and constraints.
+
+For option 3, give explicit initial values for the parameters:
+
+   >>> y1 = mod.eval(x=x, a=1, b=3)
+
+Again, these methods can be combined.  For example, you can set parameter
+values and bounds as with option 2, but then change the initial value with
+option 3.
+
+
+.. _composite_models_section:
+
+Creating composite models
+=============================
+
+
+One of the most interesting features of the :class:`Model` class is that
+models can be added together to give a composite model, with parameters
+from the component models all being available to influence the total sum of
+the separat component models.  This will become even more useful in the
+next chapter, when pre-built subclasses of :class:`Model` are discussed.
+
+For now, we'll consider a simple example will build a model of a Gaussian
+plus a line.  Obviously, we could build a model that included both
+components::
+
+    def gaussian_plus_line(x, amp, cen, wid, slope, intercept):
+        "line + 1-d gaussian"
+
+        gauss = (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+        line = slope * x + intercept
+        return gauss + line
+
+and use that with::
+
+    mod = Model(gaussian_plus_line)
+
+but, of course, we already had a function for a gaussian function, and
+maybe we'll discover that a linear background isn't sufficient and we'd
+have to alter the model again.  As an alternative we could just define a
+linear function::
+
+    def line(x, slope, intercept):
+        "a line"
+        return slope * x + intercept
+
+and build a composite model with::
+
+    mod = Model(gaussian) + Model(line)
+
+This model has parameters for both component models, and can be used as:
+
+.. literalinclude:: ../examples/doc_model2.py
+
+which prints out the results::
+
+
+    [[Fit Statistics]]
+        # function evals   = 44
+        # data points      = 101
+        # variables        = 5
+        chi-square         = 2.579
+        reduced chi-square = 0.027
+    [[Variables]]
+         amp:           8.459311 +/- 0.1241451 (1.47%) initial =  5
+         cen:           5.655479 +/- 0.009176784 (0.16%) initial =  5
+         intercept:    -0.968602 +/- 0.03352202 (3.46%) initial =  1
+         slope:         0.264844 +/- 0.005748921 (2.17%) initial =  0
+         wid:           0.6754552 +/- 0.009916862 (1.47%) initial =  1
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(amp, wid)                  =  0.666
+        C(cen, intercept)            =  0.129
+
+and shows the plot:
+
+.. image:: _images/model_fit2.png
+   :target: _images/model_fit2.png
+   :width: 50%
+
+
+which shows the data in blue dots, the best fit as a solid red line, and
+the initial fit in black dashed line.
+
+In this example, the argument names for the model functions do not overlap.
+If they had, the ``prefix`` argument to :class:`Model` would have allowed
+us to identify which parameter went with which component model.  As we will
+see in the next chapter, using composite models with the built-in models
+provides a simple way to build up complex models.
+
diff --git a/doc/models1d.rst b/doc/models1d.rst
deleted file mode 100644
index 0bc95b0..0000000
--- a/doc/models1d.rst
+++ /dev/null
@@ -1,80 +0,0 @@
-.. _models1d-label:
-
-=================================
-Simple Builtin Fitting Models
-=================================
-
-It is common to want to fit some 1-dimensional data set to a simple
-peak or line shape, such as Gaussians, Lorentzian, and Voigt peaks,
-Exponential decays, and so on.  These are used in a wide range of
-spectroscopic techniques as well as in basic mathematical analysis.
-In an effort to make make simple things truly simple, the lmfit
-provides a few simple wrappers for doing such fits in its `models1d`
-module.
-
-
-Example
-===========
-
-Let's start with a very simple example.  We'll read data from a simple
-datafile, and fit it to a Gaussian peak.  A script to do this could be:
-
-.. literalinclude:: ../tests/model1d_doc1.py
-
-First, we read in the data for 'x' and 'y', then build a Gaussian model.
-This 'model' contains all the Parameters for a Gaussian line shape.  We
-then expliticly tell the model to make initial guesses for the Parameters
-based on the data arrays, and save the model predicted with these initial
-Parameter values.  We then perform the actual fit, and print out and
-display the results.  The printed output will be (approximately)::
-
-  [[Variables]]
-       amplitude:     8.880222 +/- 0.113597 (1.28%) initial =  8.182302
-       center:        5.65866 +/- 0.01030533 (0.18%) initial =  5.5
-       fwhm:          1.642853 +/- 0.02426699 (1.48%) == '2.35482*sigma'
-       sigma:         0.6976553 +/- 0.01030524 (1.48%) initial =  0.6794575
-  [[Correlations]] (unreported correlations are <  0.250)
-      C(amplitude, sigma)          =  0.577
-
-and the resulting plot will look like:
-
-.. image:: models1d_doc1.png
-   :width: 85%
-
-which shows a good fit (the data were simulated).
-
-You can see here that the model created Parameters named 'amplitude',
-'center', 'fwhm', and 'sigma' for the Gaussian model.
-
-You can also see from the results that the starting guess were a pretty
-good estimate for this simple data set.  In fact, it's generally possible
-to not bother running :meth:`guess_starting_values` explicitly.  If this
-method has not been run already, :meth:`fit` will run it for you. Good
-reasons to run this method yourself are if want to save the initial
-estimate of the data, or to alter the starting values by hand.
-
-classes in the :mod:`models1d` module
-=======================================
-
-Several fitting models are available
-
-.. class:: GaussianModel()
-
-.. class:: LorentzianModel()
-
-.. class:: VoigtModel()
-
-.. class:: PeakModel()
-
-
-.. class:: ExponentialModel()
-
-
-.. class:: ExponentialModel()
-
-.. class:: StepModel()
-
-
-.. class:: RectangleModel()
-
-
diff --git a/doc/models1d_doc1.png b/doc/models1d_doc1.png
deleted file mode 100644
index 9fc5a93..0000000
Binary files a/doc/models1d_doc1.png and /dev/null differ
diff --git a/doc/parameters.rst b/doc/parameters.rst
index 91fae30..57c3296 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -1,152 +1,20 @@
-===========================================================
-Getting started with Non-Linear Least-Squares Fitting
-===========================================================
+.. _parameters_chapter:
 
-The lmfit package is designed to provide a simple way to build complex
-fitting models and apply them to real data.  This chapter describes how to
-set up and perform simple fits.  Some basic knowledge of Python, Numpy, and
-modeling data are assumed.
-
-To do a least-squares fit of a model to data, or for a host of other
-optimization problems, the main task is to write an *objective function*
-that takes the values of the fitting variables and calculates either a
-scalar value to be minimized or an array of values that is to be minimized
-in the least-squares sense.  For many data fitting processes, the
-least-squares approach is used, and the objective function should
-return an array of (data-model), perhaps scaled by some weighting factor
-such as the inverse of the uncertainty in the data.  For such a problem,
-the chi-square (:math:`\chi^2`) statistic is often defined as:
-
-
-.. math::
-
- \chi^2 =  \sum_i^{N} \frac{[y^{\rm meas}_i - y_i^{\rm model}({\bf{v}})]^2}{\epsilon_i^2}
-
-where :math:`y_i^{\rm meas}` is the set of measured data, :math:`y_i^{\rm
-model}({\bf{v}})` is the model calculation, :math:`{\bf{v}}` is the set of
-variables in the model to be optimized in the fit, and :math:`\epsilon_i`
-is the estimated uncertainty in the data.
-
-In a traditional non-linear fit, one writes an objective function that takes the
-variable values and calculates the residual :math:`y^{\rm meas}_i -
-y_i^{\rm model}({\bf{v}})`, or the residual scaled by the data
-uncertainties, :math:`[y^{\rm meas}_i - y_i^{\rm
-model}({\bf{v}})]/{\epsilon_i}`, or some other weighting factor.  As a
-simple example, one might write an objective function like this::
-
-    def residual(vars, x, data, eps_data):
-        amp = vars[0]
-        phaseshift = vars[1]
-	freq = vars[2]
-        decay = vars[3]
-
-	model = amp * sin(x * freq  + phaseshift) * exp(-x*x*decay)
-
-        return (data-model)/eps_data
-
-To perform the minimization with scipy, one would do::
-
-    from scipy.optimize import leastsq
-    vars = [10.0, 0.2, 3.0, 0.007]
-    out = leastsq(residual, vars, args=(x, data, eps_data))
-
-Though it is wonderful to be able to use python for such optimization
-problems, and the scipy library is robust and easy to use, the approach
-here is not terribly different from how one would do the same fit in C or
-Fortran.
-
-
-.. _parameters-label:
-
-Using :class:`Parameters` instead of Variables
-=============================================================
-
-As described above, there are several practical challenges in doing
-least-squares fits and other optimizations with the traditional
-implementation (Fortran, scipy.optimize.leastsq, and most other) in which a
-list of fitting variables to the function to be minimized.  These
-challenges include:
-
-  a) The user has to keep track of the order of the variables, and their
-     meaning -- vars[2] is the frequency, and so on.
-
-  b) If the user wants to fix a particular variable (*not* vary it in the fit),
-     the residual function has to be altered.  While reasonable for simple
-     cases, this quickly becomes significant work for more complex models,
-     and greatly complicates modeling for people not intimately familiar
-     with the code.
-
-  c) There is no simple, robust way to put bounds on values for the
-     variables, or enforce mathematical relationships between the
-     variables.
-
-The lmfit module is designed to void these shortcomings.
-
-The main idea of lmfit is to expand a numerical variable with a
-:class:`Parameter`, which have more attributes than simply their value.
-Instead of a pass a list of numbers to the function to minimize, you create
-a :class:`Parameters` object, add parameters to this object, and pass along
-this object to your function to be minimized.  With this transformation,
-the above example would be translated to look like::
-
-    from lmfit import minimize, Parameters
-
-    def residual(params, x, data, eps_data):
-        amp = params['amp'].value
-        pshift = params['phase'].value
-	freq = params['frequency'].value
-        decay = params['decay'].value
-
-	model = amp * sin(x * freq  + pshift) * exp(-x*x*decay)
-
-        return (data-model)/eps_data
-
-    params = Parameters()
-    params.add('amp', value=10)
-    params.add('decay', value=0.007)
-    params.add('phase', value=0.2)
-    params.add('frequency', value=3.0)
-
-    out = minimize(residual, params, args=(x, data, eps_data))
-
-
-So far, this simply looks like it replaced a list of values with a
-dictionary, accessed by name.  But each of the named :class:`Parameter` in
-the :class:`Parameters` object hold additional attributes to modify the
-value during the fit.  For example, Parameters can be fixed or bounded, and
-this can be done when being defined::
-
-    params = Parameters()
-    params.add('amp', value=10, vary=False)
-    params.add('decay', value=0.007, min=0.0)
-    params.add('phase', value=0.2)
-    params.add('frequency', value=3.0, max=10)
-
-or after being defined by setting the corresponding attributes::
-
-    params['amp'].vary = False
-    params['decay'].min = 0.10
-
-In either case, the fit will *not* vary the amplitude parameter.  In
-addition, a lower bound will be placed on the decay factor, and upper
-bounds placed on two parameters. Importantly, our function to be minimized
-remains unchanged.
-
-An important point here is that the `params` object can be copied and
-modified to make many user-level changes to the model and fitting process.
-Of course, most of the information about how your data is modeled goes into
-the fitting function, but the approach here allows some external control as
-well.
+================================================
+:class:`Parameter`  and :class:`Parameters`
+================================================
 
+This chapter describes :class:`Parameter` objects, which are
+fundamental to the lmfit approach to optimization.   Most real use cases
+will use the :class:`Parameters` class, which provides an (ordered)
+dictionary of :class:`Parameter` objects.
 
 The :class:`Parameter` class
 ========================================
 
 .. class:: Parameter(name=None[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]])
 
-   create a Parameter object.  These are the fundamental extension of a fit
-   variable within lmfit, but you will probably create most of these with
-   the :class:`Parameters` class.
+   create a Parameter object.
 
    :param name: parameter name
    :type name: ``None`` or string -- will be overwritten during fit if ``None``.
@@ -159,9 +27,7 @@ The :class:`Parameter` class
    :type expr: ``None`` or string
 
 
-Each of these inputs is turned into an attribute of the same name.   As
-above, one hands a dictionary of Parameters to the fitting routines.   The
-name for the Parameter will be set to be consistent
+Each of these inputs is turned into an attribute of the same name.
 
 After a fit, a Parameter for a fitted variable (ie with vary = ``True``)
 will have the :attr:`value` attribute holding the best-fit value, and may
@@ -179,11 +45,11 @@ will have the :attr:`value` attribute holding the best-fit value, and may
    {'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}
 
 For details of the use of the bounds :attr:`min` and :attr:`max`,
-see :ref:`parameter-bounds-label`.
+see :ref:`bounds_chapter`.
 
 The :attr:`expr` attribute can contain a mathematical expression that will
 be used to compute the value for the Parameter at each step in the fit.
-See :ref:`math-constraints-label` for more details and examples of this
+See :ref:`constraints_chapter` for more details and examples of this
 feature.
 
 
@@ -240,6 +106,6 @@ Simple Example
 Putting it all together, a simple example of using a dictionary of
 :class:`Parameter` objects and :func:`minimize` might look like this:
 
-.. literalinclude:: ../tests/simple.py
+.. literalinclude:: ../examples/doc_basic.py
 
 
diff --git a/doc/conf.py b/doc/sphinx/mathjax/conf.py
similarity index 73%
copy from doc/conf.py
copy to doc/sphinx/mathjax/conf.py
index 27e19b2..451458e 100644
--- a/doc/conf.py
+++ b/doc/sphinx/mathjax/conf.py
@@ -16,7 +16,8 @@ import sys, os
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
-sys.path.append(os.path.abspath(os.path.join('.', 'ext')))
+sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
+sys.path.append(os.path.abspath(os.path.join('.')))
 # -- General configuration -----------------------------------------------------
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
@@ -24,11 +25,23 @@ sys.path.append(os.path.abspath(os.path.join('.', 'ext')))
 extensions = ['sphinx.ext.autodoc',
               'sphinx.ext.todo',
               'sphinx.ext.coverage',
-              'sphinx.ext.pngmath',
-              'ipython_directive',
-              'ipython_console_highlighting',
+              'sphinx.ext.mathjax',
+              'sphinx.ext.intersphinx',
               'numpydoc']
 
+try:
+    import IPython.sphinxext.ipython_directive
+    extensions.extend(['IPython.sphinxext.ipython_directive',
+                       'IPython.sphinxext.ipython_console_highlighting'])
+except ImportError:
+    pass
+
+
+intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
+                       'numpy': ('http://scipy.org/docs/numpy/', None),
+                       'scipy': ('http://scipy.org/docs/scipy/reference/', None)}
+
+intersphinx_cache_limit = 10
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
@@ -44,7 +57,7 @@ master_doc = 'index'
 
 # General information about the project.
 project = u'lmfit'
-copyright = u'2013, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
+copyright = u'2014, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -98,18 +111,8 @@ pygments_style = 'sphinx'
 
 # -- Options for HTML output ---------------------------------------------------
 
-# The theme to use for HTML and HTML Help pages.  Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-#html_theme = 'default'
-
-html_theme = 'sphinxdoc'
-
-# html_theme = 'nature'
-#html_theme = 'agogo'
-# html_theme_options = {'pagewidth':'85em', 'documentwidth':'60em', 'sidebarwidth': '25em',
-#                       # 'headercolor1': '#000080',
-#                       # 'headercolor2': '#0000A0',
-#                       }
+html_theme_path = ['sphinx/theme']
+html_theme = 'lmfitdoc'
 
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
@@ -117,10 +120,10 @@ html_theme = 'sphinxdoc'
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
 #html_title = None
-html_title = 'Least-Squares Minimization with Constraints for Python'
+html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-html_short_title = 'Least-Squares Minimization with Constraints for Python'
+html_short_title = 'Minimization and Curve-Fitting for Python'
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
@@ -142,23 +145,13 @@ html_static_path = ['_static']
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-html_use_smartypants = False # True
+html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
 html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
 
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_use_modindex = True
 html_use_modindex = False
-
-# If false, no index is generated.
 #html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
 #html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
@@ -175,36 +168,13 @@ html_show_sourcelink = True
 # Output file base name for HTML help builder.
 htmlhelp_basename = 'lmfitdoc'
 
-
 # -- Options for LaTeX output --------------------------------------------------
 
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
   ('index', 'lmfit.tex',
-   'Least-Squares Minimization with Constraints for Python',
-   'Matthew Newville', 'manual'),
+   'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
+   'Matthew Newville, Till Stensitzki, and others', 'manual'),
 ]
 
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_use_modindex = True
diff --git a/doc/conf.py b/doc/sphinx/pngmath/conf.py
similarity index 74%
copy from doc/conf.py
copy to doc/sphinx/pngmath/conf.py
index 27e19b2..2fb8e92 100644
--- a/doc/conf.py
+++ b/doc/sphinx/pngmath/conf.py
@@ -16,7 +16,8 @@ import sys, os
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
-sys.path.append(os.path.abspath(os.path.join('.', 'ext')))
+sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
+sys.path.append(os.path.abspath(os.path.join('.')))
 # -- General configuration -----------------------------------------------------
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
@@ -25,10 +26,22 @@ extensions = ['sphinx.ext.autodoc',
               'sphinx.ext.todo',
               'sphinx.ext.coverage',
               'sphinx.ext.pngmath',
-              'ipython_directive',
-              'ipython_console_highlighting',
+              'sphinx.ext.intersphinx',
               'numpydoc']
 
+try:
+    import IPython.sphinxext.ipython_directive
+    extensions.extend(['IPython.sphinxext.ipython_directive',
+                       'IPython.sphinxext.ipython_console_highlighting'])
+except ImportError:
+    pass
+
+
+intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
+                       'numpy': ('http://scipy.org/docs/numpy/', None),
+                       'scipy': ('http://scipy.org/docs/scipy/reference/', None)}
+
+intersphinx_cache_limit = 10
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
@@ -44,7 +57,7 @@ master_doc = 'index'
 
 # General information about the project.
 project = u'lmfit'
-copyright = u'2013, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
+copyright = u'2014, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -98,18 +111,8 @@ pygments_style = 'sphinx'
 
 # -- Options for HTML output ---------------------------------------------------
 
-# The theme to use for HTML and HTML Help pages.  Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-#html_theme = 'default'
-
-html_theme = 'sphinxdoc'
-
-# html_theme = 'nature'
-#html_theme = 'agogo'
-# html_theme_options = {'pagewidth':'85em', 'documentwidth':'60em', 'sidebarwidth': '25em',
-#                       # 'headercolor1': '#000080',
-#                       # 'headercolor2': '#0000A0',
-#                       }
+html_theme_path = ['sphinx/theme']
+html_theme = 'lmfitdoc'
 
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
@@ -117,10 +120,10 @@ html_theme = 'sphinxdoc'
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
 #html_title = None
-html_title = 'Least-Squares Minimization with Constraints for Python'
+html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-html_short_title = 'Least-Squares Minimization with Constraints for Python'
+html_short_title = 'Minimization and Curve-Fitting for Python'
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
@@ -142,23 +145,13 @@ html_static_path = ['_static']
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-html_use_smartypants = False # True
+html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
 html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
 
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_use_modindex = True
 html_use_modindex = False
-
-# If false, no index is generated.
 #html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
 #html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
@@ -175,36 +168,13 @@ html_show_sourcelink = True
 # Output file base name for HTML help builder.
 htmlhelp_basename = 'lmfitdoc'
 
-
 # -- Options for LaTeX output --------------------------------------------------
 
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
   ('index', 'lmfit.tex',
-   'Least-Squares Minimization with Constraints for Python',
-   'Matthew Newville', 'manual'),
+   'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
+   'Matthew Newville, Till Stensitzki, and others', 'manual'),
 ]
 
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_use_modindex = True
diff --git a/doc/sphinx/theme/lmfitdoc/layout.html b/doc/sphinx/theme/lmfitdoc/layout.html
new file mode 100644
index 0000000..5bf78eb
--- /dev/null
+++ b/doc/sphinx/theme/lmfitdoc/layout.html
@@ -0,0 +1,14 @@
+{#
+    sphinxdoc/layout.html
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Sphinx layout template for the sphinxdoc theme.
+
+    :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+#}
+{%- extends "basic/layout.html" %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
diff --git a/doc/sphinx/theme/lmfitdoc/static/contents.png b/doc/sphinx/theme/lmfitdoc/static/contents.png
new file mode 100644
index 0000000..7fb8215
Binary files /dev/null and b/doc/sphinx/theme/lmfitdoc/static/contents.png differ
diff --git a/doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t b/doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t
new file mode 100644
index 0000000..92b6913
--- /dev/null
+++ b/doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t
@@ -0,0 +1,348 @@
+/*
+ * lmfitdoc.css_t
+ * minor riff on sphinxdoc.css_t
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Sphinx stylesheet -- sphinxdoc theme.  Originally created by
+ * Armin Ronacher for Werkzeug.
+ *
+ * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+ at import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
+    font-size: 14px;
+    letter-spacing: -0.01em;
+    line-height: 150%;
+    text-align: center;
+    background-color: #D6DAC4;
+    color: black;
+    padding: 0;
+    border: 0px solid #D0D0C0;
+    margin: 15px 15px 15px 15px;
+    min-width: 740px;
+}
+
+div.document {
+    background-color: white;
+    text-align: left;
+    background-image: url(contents.png);
+    background-repeat: repeat-x;
+}
+
+div.bodywrapper {
+    margin: 0 {{ theme_sidebarwidth|toint + 10 }}px 0 0;
+    border-right: 1px solid #ccc;
+}
+
+div.body {
+    margin: 0;
+    padding: 0.5em 20px 20px 20px;
+}
+
+div.related {
+    font-size: 1em;
+    background-color: #0D0;
+}
+
+div.related ul {
+    height: 2em;
+    border-top: 1px solid #ddd;
+    border-bottom: 1px solid #ddd;
+    background-color: #F0EFE4;
+    color: #157;
+}
+
+div.related ul li {
+    margin: 0;
+    padding: 0;
+    height: 2em;
+    float: left;
+    background-color: #D0000;
+}
+
+div.related ul li.right {
+    float: right;
+    margin-right: 5px;
+}
+
+div.related ul li a {
+    margin: 0;
+    padding: 0 5px 0 5px;
+    line-height: 1.75em;
+    color: #EE9816;
+    color: #157;
+}
+
+div.related ul li a:hover {
+    color: #822;
+}
+
+div.sphinxsidebarwrapper {
+    padding: 0;
+}
+
+div.sphinxsidebar {
+    margin: 0;
+    padding: 0.5em 15px 15px 0;
+    width: {{ theme_sidebarwidth|toint - 20 }}px;
+    float: right;
+    font-size: 1em;
+    text-align: left;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+    margin: 1em 0 0.5em 0;
+    font-size: 1em;
+    padding: 0.1em 0 0.1em 0.5em;
+    color: #157;
+    border: 1px solid #A0A090;
+    background-color: #D0D0C4;
+}
+
+div.sphinxsidebar h3 a {
+    color: #157;
+    background-color: #D0D0C4;
+}
+
+div.sphinxsidebar ul {
+    padding-left: 1.5em;
+    margin-top: 7px;
+    padding: 0;
+    line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+    margin-left: 20px;
+}
+
+div.footer {
+    background-color: #E0E8D4;
+    color: #86989B;
+    padding: 3px 8px 3px 0;
+    clear: both;
+    font-size: 0.8em;
+    text-align: right;
+}
+
+div.footer a {
+    color: #86989B;
+    text-decoration: underline;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+    margin: 0.8em 0 0.5em 0;
+}
+
+a {
+    color: #CA7900;
+    text-decoration: none;
+}
+
+a:hover {
+    color: #2491CF;
+}
+
+div.body a {
+    text-decoration: underline;
+}
+
+h1 {
+    padding: 0.2em 0 0.2em 0;
+    margin: 0.7em 0 0.3em 0;
+    font-size: 1.5em;
+    color: #157;
+    background-color: #F0EFE4;
+}
+
+h2 {
+    padding: 0.2em 0 0.2em 0;
+    margin: 1.3em 0 0.2em 0;
+    font-size: 1.35em;
+    padding: 0;
+    background-color: #FAFAF0;
+}
+
+h3 {
+    padding: 0.2em 0 0.2em 0;
+    margin: 1em 0 -0.3em 0;
+    font-size: 1.2em;
+    background-color: #FBFBF3;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+    color: black!important;
+}
+
+h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
+    display: none;
+    margin: 0 0 0 0.3em;
+    padding: 0 0.2em 0 0.2em;
+    color: #aaa!important;
+}
+
+h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
+h5:hover a.anchor, h6:hover a.anchor {
+    display: inline;
+}
+
+h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
+h5 a.anchor:hover, h6 a.anchor:hover {
+    color: #777;
+    background-color: #eee;
+}
+
+a.headerlink {
+    color: #c60f0f!important;
+    font-size: 1em;
+    margin-left: 6px;
+    padding: 0 4px 0 4px;
+    text-decoration: none!important;
+}
+
+a.headerlink:hover {
+    background-color: #ccc;
+    color: white!important;
+}
+
+cite, code, tt {
+    font-family: 'Consolas', 'Deja Vu Sans Mono',
+                 'Bitstream Vera Sans Mono', monospace;
+    font-size: 0.95em;
+    letter-spacing: 0.01em;
+}
+
+tt {
+    background-color: #f2f2f2;
+    border-bottom: 1px solid #ddd;
+    color: #333;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+    border: 0;
+}
+
+hr {
+    border: 1px solid #abc;
+    margin: 2em;
+}
+
+a tt {
+    border: 0;
+    color: #CA7900;
+}
+
+a tt:hover {
+    color: #2491CF;
+}
+
+pre {
+    font-family: 'Consolas', 'Deja Vu Sans Mono',
+                 'Bitstream Vera Sans Mono', monospace;
+    font-size: 0.95em;
+    letter-spacing: 0.015em;
+    line-height: 120%;
+    padding: 0.5em;
+    border: 1px solid #ccc;
+    background-color: #f8f8f8;
+}
+
+pre a {
+    color: inherit;
+    text-decoration: underline;
+}
+
+td.linenos pre {
+    padding: 0.5em 0;
+}
+
+div.quotebar {
+    background-color: #f8f8f8;
+    max-width: 250px;
+    float: right;
+    padding: 2px 7px;
+    border: 1px solid #ccc;
+}
+
+div.topic {
+    background-color: #f8f8f8;
+}
+
+table {
+    border-collapse: collapse;
+    margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+    padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+    font-size: 0.9em;
+    margin: 1em 0 1em 0;
+    border: 1px solid #86989B;
+    background-color: #f7f7f7;
+    padding: 0;
+}
+
+div.admonition p, div.warning p {
+    margin: 0.5em 1em 0.5em 1em;
+    padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+    margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+    margin: 0;
+    padding: 0.1em 0 0.1em 0.5em;
+    color: white;
+    border-bottom: 1px solid #86989B;
+    font-weight: bold;
+    background-color: #AFC1C4;
+}
+
+div.warning {
+    border: 1px solid #940000;
+}
+
+div.warning p.admonition-title {
+    background-color: #CF0000;
+    border-bottom-color: #940000;
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+    margin: 0.1em 0.5em 0.5em 3em;
+    padding: 0;
+}
+
+div.versioninfo {
+    margin: 1em 0 0 0;
+    border: 1px solid #ccc;
+    background-color: #DDEAF0;
+    padding: 8px;
+    line-height: 1.3em;
+    font-size: 0.9em;
+}
+
+.viewcode-back {
+    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+                 'Verdana', sans-serif;
+}
+
+div.viewcode-block:target {
+    background-color: #f4debf;
+    border-top: 1px solid #ac9;
+    border-bottom: 1px solid #ac9;
+}
diff --git a/doc/sphinx/theme/lmfitdoc/static/navigation.png b/doc/sphinx/theme/lmfitdoc/static/navigation.png
new file mode 100644
index 0000000..1081dc1
Binary files /dev/null and b/doc/sphinx/theme/lmfitdoc/static/navigation.png differ
diff --git a/doc/sphinx/theme/lmfitdoc/theme.conf b/doc/sphinx/theme/lmfitdoc/theme.conf
new file mode 100644
index 0000000..d3bfaad
--- /dev/null
+++ b/doc/sphinx/theme/lmfitdoc/theme.conf
@@ -0,0 +1,4 @@
+[theme]
+inherit = basic
+stylesheet = lmfitdoc.css
+pygments_style = friendly
diff --git a/examples/NIST_STRD/Gauss2.dat b/examples/NIST_Gauss2.dat
similarity index 76%
copy from examples/NIST_STRD/Gauss2.dat
copy to examples/NIST_Gauss2.dat
index ff185d1..cd177bb 100644
--- a/examples/NIST_STRD/Gauss2.dat
+++ b/examples/NIST_Gauss2.dat
@@ -1,63 +1,63 @@
-NIST/ITL StRD
-Dataset Name:  Gauss2            (Gauss2.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  48)
-               Certified Values  (lines 41 to  53)
-               Data              (lines 61 to 310)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   The data are two slightly-blended Gaussians on a 
-               decaying exponential baseline plus normally 
-               distributed zero-mean noise with variance = 6.25. 
-
-Reference:     Rust, B., NIST (1996). 
-
-
-
-
-
-
-
-
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               250 Observations
-               Lower Level of Difficulty
-               Generated Data
-
-Model:         Exponential Class
-               8 Parameters (b1 to b8)
-
-               y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 ) 
-                                   + b6*exp( -(x-b7)**2 / b8**2 ) + e
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =    96.0        98.0         9.9018328406E+01  5.3748766879E-01
-  b2 =     0.009       0.0105      1.0994945399E-02  1.3335306766E-04
-  b3 =   103.0       103.0         1.0188022528E+02  5.9217315772E-01
-  b4 =   106.0       105.0         1.0703095519E+02  1.5006798316E-01
-  b5 =    18.0        20.0         2.3578584029E+01  2.2695595067E-01
-  b6 =    72.0        73.0         7.2045589471E+01  6.1721965884E-01
-  b7 =   151.0       150.0         1.5327010194E+02  1.9466674341E-01
-  b8 =    18.0        20.0         1.9525972636E+01  2.6416549393E-01
-
-Residual Sum of Squares:                    1.2475282092E+03
-Residual Standard Deviation:                2.2704790782E+00
-Degrees of Freedom:                               242
-Number of Observations:                           250
-
-
-
-
-
- 
-Data:   y          x
+# NIST/ITL StRD
+# Dataset Name:  Gauss2            (Gauss2.dat)
+#
+# File Format:   ASCII
+#                Starting Values   (lines 41 to  48)
+#                Certified Values  (lines 41 to  53)
+#                Data              (lines 61 to 310)
+#
+# Procedure:     Nonlinear Least Squares Regression
+#
+# Description:   The data are two slightly-blended Gaussians on a
+#                decaying exponential baseline plus normally
+#                distributed zero-mean noise with variance = 6.25.
+#
+# Reference:     Rust, B., NIST (1996).
+#
+#
+#
+#
+#
+#
+#
+#
+#
+# Data:          1 Response  (y)
+#                1 Predictor (x)
+#                250 Observations
+#                Lower Level of Difficulty
+#                Generated Data
+#
+# Model:         Exponential Class
+#                8 Parameters (b1 to b8)
+#
+#                y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+#                                    + b6*exp( -(x-b7)**2 / b8**2 ) + e
+#
+#
+#           Starting values                  Certified Values
+#
+#         Start 1     Start 2           Parameter     Standard Deviation
+#   b1 =    96.0        98.0         9.9018328406E+01  5.3748766879E-01
+#   b2 =     0.009       0.0105      1.0994945399E-02  1.3335306766E-04
+#   b3 =   103.0       103.0         1.0188022528E+02  5.9217315772E-01
+#   b4 =   106.0       105.0         1.0703095519E+02  1.5006798316E-01
+#   b5 =    18.0        20.0         2.3578584029E+01  2.2695595067E-01
+#   b6 =    72.0        73.0         7.2045589471E+01  6.1721965884E-01
+#   b7 =   151.0       150.0         1.5327010194E+02  1.9466674341E-01
+#   b8 =    18.0        20.0         1.9525972636E+01  2.6416549393E-01
+#
+# Residual Sum of Squares:                    1.2475282092E+03
+# Residual Standard Deviation:                2.2704790782E+00
+# Degrees of Freedom:                               242
+# Number of Observations:                           250
+#
+#
+#
+#
+#
+#
+# Data:   y          x
     97.58776    1.000000
     97.76344    2.000000
     96.56705    3.000000
diff --git a/examples/simple.py b/examples/doc_basic.py
similarity index 93%
rename from examples/simple.py
rename to examples/doc_basic.py
index af0495e..af92a55 100644
--- a/examples/simple.py
+++ b/examples/doc_basic.py
@@ -1,4 +1,5 @@
-
+#!/usr/bin/env python
+#<examples/doc_basic.py>
 from lmfit import minimize, Parameters, Parameter, report_fit
 import numpy as np
 
@@ -44,3 +45,4 @@ try:
 except:
     pass
 
+#<end of examples/doc_basic.py>
diff --git a/examples/doc_model1.py b/examples/doc_model1.py
new file mode 100644
index 0000000..6085766
--- /dev/null
+++ b/examples/doc_model1.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+#<examples/doc_model1.py>
+from numpy import sqrt, pi, exp, linspace, loadtxt
+from lmfit import  Model
+
+import matplotlib.pyplot as plt
+
+data = loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+def gaussian(x, amp, cen, wid):
+    "1-d gaussian: gaussian(x, amp, cen, wid)"
+    return (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+
+gmod = Model(gaussian)
+result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+
+print(gmod.fit_report())
+
+plt.plot(x, y,         'bo')
+plt.plot(x, result.init_fit, 'k--')
+plt.plot(x, result.best_fit, 'r-')
+plt.show()
+#<end examples/doc_model1.py>
diff --git a/examples/doc_model2.py b/examples/doc_model2.py
new file mode 100644
index 0000000..79635af
--- /dev/null
+++ b/examples/doc_model2.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+#<examples/model_doc2.py>
+from numpy import sqrt, pi, exp, linspace, loadtxt
+from lmfit import Model
+
+import matplotlib.pyplot as plt
+
+data = loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1] + 0.25*x - 1.0
+
+def gaussian(x, amp, cen, wid):
+    "1-d gaussian: gaussian(x, amp, cen, wid)"
+    return (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+
+def line(x, slope, intercept):
+    "line"
+    return slope * x + intercept
+
+mod = Model(gaussian) + Model(line)
+result = mod.fit(y, x=x, amp=5, cen=5, wid=1, slope=0, intercept=1)
+
+print(mod.fit_report())
+
+plt.plot(x, y,         'bo')
+plt.plot(x, result.init_fit, 'k--')
+plt.plot(x, result.best_fit, 'r-')
+plt.show()
+#<end examples/model_doc2.py>
diff --git a/examples/doc_nistgauss.py b/examples/doc_nistgauss.py
new file mode 100644
index 0000000..ce9fa49
--- /dev/null
+++ b/examples/doc_nistgauss.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+#<examples/doc_nistgauss.py>
+import numpy as np
+from lmfit.models import GaussianModel, ExponentialModel
+
+import matplotlib.pyplot as plt
+
+dat = np.loadtxt('NIST_Gauss2.dat')
+x = dat[:, 1]
+y = dat[:, 0]
+
+exp_mod = ExponentialModel(prefix='exp_')
+exp_mod.guess_starting_values(y, x=x)
+
+gauss1  = GaussianModel(prefix='g1_')
+gauss2  = GaussianModel(prefix='g2_')
+
+gauss1.set_paramval('center',    105, min=75, max=125)
+gauss1.set_paramval('sigma',      15, min=3)
+gauss1.set_paramval('amplitude', 2000, min=10)
+
+gauss2.set_paramval('center',    155, min=125, max=175)
+gauss2.set_paramval('sigma',      15, min=3)
+gauss2.set_paramval('amplitude', 2000, min=10)
+
+mod = gauss1 + gauss2 + exp_mod
+
+out = mod.fit(y, x=x)
+
+print(mod.fit_report(min_correl=0.5))
+
+plt.plot(x, y)
+plt.plot(x, out.init_fit, 'k--')
+plt.plot(x, out.best_fit, 'r-')
+plt.show()
+#<end examples/doc_nistgauss.py>
diff --git a/examples/doc_nistgauss2.py b/examples/doc_nistgauss2.py
new file mode 100644
index 0000000..852224f
--- /dev/null
+++ b/examples/doc_nistgauss2.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+#<examples/doc_nistgauss2.py>
+import numpy as np
+from lmfit.models import GaussianModel, ExponentialModel
+
+import matplotlib.pyplot as plt
+
+dat = np.loadtxt('NIST_Gauss2.dat')
+x = dat[:, 1]
+y = dat[:, 0]
+
+exp_mod = ExponentialModel(prefix='exp_')
+gauss1  = GaussianModel(prefix='g1_')
+gauss2  = GaussianModel(prefix='g2_')
+
+
+def index_of(arrval, value):
+    "return index of array *at or below* value "
+    if value < min(arrval):  return 0
+    return max(np.where(arrval<=value)[0])
+
+ix1 = index_of(x,  75)
+ix2 = index_of(x, 135)
+ix3 = index_of(x, 175)
+
+exp_mod.guess_starting_values(y[:ix1], x=x[:ix1])
+gauss1.guess_starting_values(y[ix1:ix2], x=x[ix1:ix2])
+gauss2.guess_starting_values(y[ix2:ix3], x=x[ix2:ix3])
+
+mod = gauss1 + gauss2 + exp_mod
+
+out = mod.fit(y, x=x)
+
+print(mod.fit_report(min_correl=0.5))
+
+plt.plot(x, y)
+plt.plot(x, out.init_fit, 'k--')
+plt.plot(x, out.best_fit, 'r-')
+plt.show()
+#<end examples/doc_nistgauss2.py>
diff --git a/examples/doc_stepmodel.py b/examples/doc_stepmodel.py
new file mode 100644
index 0000000..d166b08
--- /dev/null
+++ b/examples/doc_stepmodel.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+#<examples/doc_stepmodel.py>
+import numpy as np
+from lmfit.models import StepModel, ConstantModel
+
+import matplotlib.pyplot as plt
+
+x = np.linspace(0, 10, 201)
+y = np.ones_like(x)
+y[:48] = 0.0
+y[48:77] = np.arange(77-48)/(77.0-48)
+y = 110.2 * (y + 9e-3*np.random.randn(len(x))) + 12.0
+
+step_mod = StepModel(form='erf')
+offset = ConstantModel()
+step_mod.guess_starting_values(y, x)
+offset.set_paramval('c', y.min())
+
+mod = step_mod + offset
+out = mod.fit(y, x=x)
+
+print(mod.fit_report())
+
+plt.plot(x, y)
+plt.plot(x, out.init_fit, 'k--')
+plt.plot(x, out.best_fit, 'r-')
+plt.show()
+#<end examples/doc_stepmodel.py>
diff --git a/examples/example_anneal.py b/examples/example_anneal.py
deleted file mode 100644
index e59fa09..0000000
--- a/examples/example_anneal.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
-
-from lmfit import Parameters, minimize
-try:
-    import pylab
-    HASPYLAB = True
-except ImportError:
-    HASPYLAB = False
-
-p_true = Parameters()
-p_true.add('amp', value=14.0)
-p_true.add('period', value=5.33)
-p_true.add('shift', value=0.123)
-p_true.add('decay', value=0.010)
-
-def residual(pars, x, data=None):
-    amp = pars['amp'].value
-    per = pars['period'].value
-    shift = pars['shift'].value
-    decay = pars['decay'].value
-
-    if abs(shift) > pi/2:
-        shift = shift - sign(shift)*pi
-    model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
-    if data is None:
-        return model
-    return (model - data)
-
-n = 2500
-xmin = 0.
-xmax = 250.0
-noise = random.normal(scale=0.7215, size=n)
-x     = linspace(xmin, xmax, n)
-data  = residual(p_true, x) + noise
-
-fit_params = Parameters()
-fit_params.add('amp', value=13.0, min=-5, max=40)
-fit_params.add('period', value=2, min=0, max=7)
-fit_params.add('shift', value=0.0, min=-1.5, max=1.5)
-fit_params.add('decay', value=0.02, min=0, max=1.0)
-#p_true.add('amp', value=14.0)
-#p_true.add('period', value=5.33)
-#p_true.add('shift', value=0.123)
-#p_true.add('decay', value=0.010)
-
-out = minimize(residual, fit_params, method='anneal',
-               Tf= 1000,
-               args=(x,), kws={'data':data})
-
-print out.sa_out
-for key, par in fit_params.items():
-    print key, par, p_true[key].value
-
-
-if HASPYLAB:
-    pylab.plot(x, data, 'ro')
-    pylab.plot(x, fit, 'b')
-    pylab.show()
-
-
-
-
-
diff --git a/examples/example_covar.py b/examples/example_covar.py
index 1152b05..4eea1ea 100644
--- a/examples/example_covar.py
+++ b/examples/example_covar.py
@@ -3,7 +3,7 @@ from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
 from scipy.optimize import leastsq
 
 from lmfit import Parameters, Minimizer, report_fit
-from lmfit.utilfuncs import gauss, loren, pvoigt
+from lmfit.lineshapes import gaussian
 
 try:
     import pylab
@@ -14,7 +14,7 @@ except ImportError:
 HASPYLAB = False
 
 def residual(pars, x, sigma=None, data=None):
-    yg = gauss(x, pars['amp_g'].value,
+    yg = gaussian(x, pars['amp_g'].value,
                   pars['cen_g'].value, pars['wid_g'].value)
 
     slope = pars['line_slope'].value
@@ -40,8 +40,8 @@ p_true.add('wid_g', value=1.6)
 p_true.add('line_off', value=-1.023)
 p_true.add('line_slope', value=0.62)
 
-data = (gauss(x, p_true['amp_g'].value, p_true['cen_g'].value,
-              p_true['wid_g'].value) +
+data = (gaussian(x, p_true['amp_g'].value, p_true['cen_g'].value,
+                 p_true['wid_g'].value) +
         random.normal(scale=0.23,  size=n) +
         x*p_true['line_slope'].value + p_true['line_off'].value )
 
diff --git a/examples/example_peakmodel.py b/examples/example_peakmodel.py
index ce0bfea..686f6a3 100644
--- a/examples/example_peakmodel.py
+++ b/examples/example_peakmodel.py
@@ -2,7 +2,7 @@
 Example using the built-in Peak-like models
 """
 import numpy as np
-from lmfit.models1d import GaussianModel, LorentzianModel, VoigtModel
+from lmfit.old_models1d import GaussianModel, LorentzianModel, VoigtModel
 import matplotlib.pyplot as plt
 
 x  = np.linspace(0, 10, 101)
diff --git a/examples/example_stepmodel.py b/examples/example_stepmodel.py
deleted file mode 100644
index 9bfaad7..0000000
--- a/examples/example_stepmodel.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import numpy as np
-from lmfit.models1d import StepModel
-
-import matplotlib.pyplot as plt
-
-x  = np.linspace(0, 10, 201)
-dat = np.ones_like(x)
-dat[:48] = 0.0
-dat[48:77] = np.arange(77-48)/(77.0-48)
-dat = dat +  5e-2*np.random.randn(len(x))
-dat = 110.2 * dat + 12.0
-
-mod = StepModel(background='constant', form='erf') # linear') # 'atan')
-
-mod.guess_starting_values(dat, x)
-
-init = mod.model(x=x)+mod.calc_background(x)
-mod.fit(dat, x=x)
-
-print mod.fit_report()
-
-fit = mod.model(x=x)+mod.calc_background(x)
-
-plt.plot(x, dat)
-plt.plot(x, init, 'r+')
-plt.plot(x, fit)
-plt.show()
-
diff --git a/examples/fit_pvoigt.py b/examples/fit_pvoigt.py
index 8f828f7..0542236 100644
--- a/examples/fit_pvoigt.py
+++ b/examples/fit_pvoigt.py
@@ -1,7 +1,7 @@
 import sys
 
 from lmfit import Parameters, Parameter, Minimizer, report_fit
-from lmfit.utilfuncs import gauss, loren, pvoigt
+from lmfit.lineshapes import gaussian, lorentzian, pvoigt
 
 from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
 
@@ -20,10 +20,10 @@ def per_iteration(pars, i, resid, x, *args, **kws):
             print( p.name , p.value)
 
 def residual(pars, x, sigma=None, data=None):
-    yg = gauss(x, pars['amp_g'].value,
-               pars['cen_g'].value, pars['wid_g'].value)
-    yl = loren(x, pars['amp_l'].value,
-               pars['cen_l'].value, pars['wid_l'].value)
+    yg = gaussian(x, pars['amp_g'].value,
+                  pars['cen_g'].value, pars['wid_g'].value)
+    yl = lorentzian(x, pars['amp_l'].value,
+                    pars['cen_l'].value, pars['wid_l'].value)
 
     frac = pars['frac'].value
     slope = pars['line_slope'].value
diff --git a/examples/fit_pvoigt2.py b/examples/fit_pvoigt2.py
index 13df297..23320aa 100644
--- a/examples/fit_pvoigt2.py
+++ b/examples/fit_pvoigt2.py
@@ -3,7 +3,7 @@ import sys
 from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
 
 from lmfit import Parameters, Parameter, Minimizer
-from lmfit.utilfuncs import gauss, loren, pvoigt
+from lmfit.lineshapes import gaussian, lorentzian, pvoigt
 from lmfit.printfuncs import report_fit
 
 try:
@@ -16,10 +16,10 @@ except ImportError:
 
 
 def residual(pars, x, sigma=None, data=None):
-    yg = gauss(x, pars['amp_g'].value,
-               pars['cen_g'].value, pars['wid_g'].value)
-    yl = loren(x, pars['amp_l'].value,
-               pars['cen_l'].value, pars['wid_l'].value)
+    yg = gaussian(x, pars['amp_g'].value,
+                  pars['cen_g'].value, pars['wid_g'].value)
+    yl = lorentzian(x, pars['amp_l'].value,
+                    pars['cen_l'].value, pars['wid_l'].value)
 
     slope = pars['line_slope'].value
     offset = pars['line_off'].value
@@ -36,8 +36,8 @@ xmin = 0.
 xmax = 20.0
 x = linspace(xmin, xmax, n)
 
-data = (gauss(x, 21, 8.1, 1.2) + 
-        loren(x, 10, 9.6, 2.4) +
+data = (gaussian(x, 21, 8.1, 1.2) +
+        lorentzian(x, 10, 9.6, 2.4) +
         random.normal(scale=0.23,  size=n) +
         x*0.5)
 
@@ -53,13 +53,13 @@ pfit = [Parameter(name='amp_g',  value=10),
         Parameter(name='amp_l',  expr='amp_tot - amp_g'),
         Parameter(name='cen_l',  expr='1.5+cen_g'),
         Parameter(name='wid_l',  expr='2*wid_g'),
-        
+
         Parameter(name='line_slope', value=0.0),
         Parameter(name='line_off', value=0.0)]
 
 sigma = 0.021  # estimate of data error (for all data points)
 
-myfit = Minimizer(residual, pfit, 
+myfit = Minimizer(residual, pfit,
                   fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                   scale_covar=True)
 
diff --git a/examples/fit_pvoigt_NelderMead.py b/examples/fit_pvoigt_NelderMead.py
index 790ae9a..279de10 100644
--- a/examples/fit_pvoigt_NelderMead.py
+++ b/examples/fit_pvoigt_NelderMead.py
@@ -1,8 +1,7 @@
 import sys
 from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
 from lmfit import Parameters, Parameter, Minimizer, report_fit
-from lmfit.utilfuncs import gauss, loren, pvoigt
-
+from lmfit.lineshapes import gaussian, lorentzian, pvoigt
 
 try:
     import matplotlib
@@ -19,10 +18,10 @@ def per_iteration(pars, i, resid, x, *args, **kws):
             print( p.name , p.value)
 
 def residual(pars, x, sigma=None, data=None):
-    yg = gauss(x, pars['amp_g'].value,
-               pars['cen_g'].value, pars['wid_g'].value)
-    yl = loren(x, pars['amp_l'].value,
-               pars['cen_l'].value, pars['wid_l'].value)
+    yg = gaussian(x, pars['amp_g'].value,
+                  pars['cen_g'].value, pars['wid_g'].value)
+    yl = lorentzian(x, pars['amp_l'].value,
+                    pars['cen_l'].value, pars['wid_l'].value)
 
     frac = pars['frac'].value
     slope = pars['line_slope'].value
diff --git a/examples/fit_pvoigt_NelderMead2.py b/examples/fit_pvoigt_NelderMead2.py
index 5bac47e..111afda 100644
--- a/examples/fit_pvoigt_NelderMead2.py
+++ b/examples/fit_pvoigt_NelderMead2.py
@@ -2,7 +2,7 @@ import sys
 from numpy import linspace, exp, random
 
 from lmfit import Parameters, minimize
-from lmfit.utilfuncs import gauss, loren, pvoigt
+from lmfit.lineshapes import gaussian, lorentzian, pvoigt
 
 try:
     import matplotlib
@@ -18,10 +18,10 @@ def per_iteration(pars, i, resid, x, *args, **kws):
             print( p.name , p.value)
 
 def residual(pars, x, sigma=None, data=None):
-    yg = gauss(x, pars['amp_g'].value,
-               pars['cen_g'].value, pars['wid_g'].value)
-    yl = loren(x, pars['amp_l'].value,
-               pars['cen_l'].value, pars['wid_l'].value)
+    yg = gaussian(x, pars['amp_g'].value,
+                  pars['cen_g'].value, pars['wid_g'].value)
+    yl = lorentzian(x, pars['amp_l'].value,
+                    pars['cen_l'].value, pars['wid_l'].value)
 
     frac = pars['frac'].value
     slope = pars['line_slope'].value
diff --git a/examples/fit_with_algebraic_constraint.py b/examples/fit_with_algebraic_constraint.py
index fe634d5..5e6447f 100644
--- a/examples/fit_with_algebraic_constraint.py
+++ b/examples/fit_with_algebraic_constraint.py
@@ -3,7 +3,7 @@ import sys
 from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
 
 from lmfit import Parameters, Parameter, Minimizer
-from lmfit.utilfuncs import gauss, loren, pvoigt
+from lmfit.lineshapes import gaussian, lorentzian
 from lmfit.printfuncs import report_fit
 
 try:
@@ -14,9 +14,9 @@ except ImportError:
 
 
 def residual(pars, x, sigma=None, data=None):
-    yg = gauss(x, pars['amp_g'].value,
-               pars['cen_g'].value, pars['wid_g'].value)
-    yl = loren(x, pars['amp_l'].value,
+    yg = gaussian(x, pars['amp_g'].value,
+                  pars['cen_g'].value, pars['wid_g'].value)
+    yl = lorentzian(x, pars['amp_l'].value,
                pars['cen_l'].value, pars['wid_l'].value)
 
     slope = pars['line_slope'].value
@@ -34,8 +34,8 @@ xmin = 0.
 xmax = 20.0
 x = linspace(xmin, xmax, n)
 
-data = (gauss(x, 21, 8.1, 1.2) + 
-        loren(x, 10, 9.6, 2.4) +
+data = (gaussian(x, 21, 8.1, 1.2) +
+        lorentzian(x, 10, 9.6, 2.4) +
         random.normal(scale=0.23,  size=n) +
         x*0.5)
 
@@ -51,13 +51,13 @@ pfit = [Parameter(name='amp_g',  value=10),
         Parameter(name='amp_l',  expr='amp_tot - amp_g'),
         Parameter(name='cen_l',  expr='1.5+cen_g'),
         Parameter(name='wid_l',  expr='2*wid_g'),
-        
+
         Parameter(name='line_slope', value=0.0),
         Parameter(name='line_off', value=0.0)]
 
 sigma = 0.021  # estimate of data error (for all data points)
 
-myfit = Minimizer(residual, pfit, 
+myfit = Minimizer(residual, pfit,
                   fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
                   scale_covar=True)
 
diff --git a/examples/m1.py b/examples/m1.py
index b42af60..352ce22 100644
--- a/examples/m1.py
+++ b/examples/m1.py
@@ -1,6 +1,6 @@
 
 import numpy as np
-from lmfit.models1d import  GaussianModel
+from lmfit.old_models1d import  GaussianModel
 import matplotlib.pyplot as plt
 
 data = np.loadtxt('model1d_gauss.dat')
diff --git a/examples/model1d_doc1.py b/examples/model1d_doc1.py
index ed6b136..57e74b3 100644
--- a/examples/model1d_doc1.py
+++ b/examples/model1d_doc1.py
@@ -1,5 +1,5 @@
 import numpy as np
-from lmfit.models1d import  GaussianModel
+from lmfit.old_models1d import  GaussianModel
 import matplotlib.pyplot as plt
 
 data = np.loadtxt('model1d_gauss.dat')
diff --git a/examples/model1d_doc2.py b/examples/model1d_doc2.py
index a8ae418..bf3b1f7 100644
--- a/examples/model1d_doc2.py
+++ b/examples/model1d_doc2.py
@@ -1,5 +1,5 @@
 import numpy as np
-from lmfit.models1d import  GaussianModel, VoigtModel
+from lmfit.old_models1d import  GaussianModel, VoigtModel
 import matplotlib.pyplot as plt
 
 data = np.loadtxt('model1d_gauss.dat')
diff --git a/examples/models_doc1.py b/examples/models_doc1.py
new file mode 100644
index 0000000..29c0641
--- /dev/null
+++ b/examples/models_doc1.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+#<examples/models_doc1.py>
+from numpy import loadtxt
+from lmfit import fit_report
+from lmfit.models import GaussianModel, VoigtModel
+import matplotlib.pyplot as plt
+
+
+data = loadtxt('test_peak.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+gmodel = GaussianModel()
+gmodel.guess_starting_values(y, x=x)
+gresult = gmodel.fit(y, x=x)
+
+print 'With Gaussian: '
+print fit_report(gresult.params, min_correl=0.25)
+print 'Chi-square = %.3f, Reduced Chi-square = %.3f' % (gresult.chisqr, gresult.redchi)
+plt.plot(x, y,         'k')
+plt.plot(x, 10*(y-gresult.best_fit), 'r-')
+
+
+vmodel = VoigtModel()
+vmodel.guess_starting_values(y, x=x)
+vresult = vmodel.fit(y, x=x)
+
+print 'With Voigt: '
+print fit_report(vresult.params, min_correl=0.25)
+print 'Chi-square = %.3f, Reduced Chi-square = %.3f' % (vresult.chisqr, vresult.redchi)
+
+plt.plot(x, 10*(y-vresult.best_fit), 'b-')
+
+
+vmodel.params['gamma'].vary = True
+vmodel.params['gamma'].expr = None
+
+vresult2 = vmodel.fit(y, x=x)
+
+print 'With Voigt, varying gamma: '
+print fit_report(vresult2.params, min_correl=0.25)
+print 'Chi-square = %.3f, Reduced Chi-square = %.3f' % (vresult2.chisqr, vresult2.redchi)
+plt.plot(x, 10*(y-vresult2.best_fit), 'g-')
+
+plt.show()
+
+#<end examples/models_doc1.py>
diff --git a/examples/models_doc2.py b/examples/models_doc2.py
new file mode 100644
index 0000000..a5ae679
--- /dev/null
+++ b/examples/models_doc2.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+#<examples/models_doc2.py>
+from numpy import loadtxt
+from lmfit import fit_report
+from lmfit.models import GaussianModel, LinearModel
+import matplotlib.pyplot as plt
+
+data = loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1] + x  * 0.1 - 3.0
+
+gauss = GaussianModel()
+gauss.guess_starting_values(y, x=x)
+
+line = LinearModel()
+line.params['slope'].value  = 0
+line.params['intercept'].value  = -1.0
+
+total = gauss + line
+
+result = total.fit(y, x=x)
+
+print fit_report(result.params, min_correl=0.25)
+
+plt.plot(x, y,         'bo')
+plt.plot(x, result.init_fit, 'k--')
+plt.plot(x, result.best_fit, 'r-')
+plt.show()
+
+#<end examples/models_doc2.py>
diff --git a/examples/peakfit_1.py b/examples/peakfit_1.py
index 5f1b734..7c72306 100644
--- a/examples/peakfit_1.py
+++ b/examples/peakfit_1.py
@@ -8,11 +8,11 @@ except ImportError:
 
 
 from lmfit import Parameters, Minimizer, report_fit
-from lmfit.utilfuncs import gauss, loren
+from lmfit.lineshapes import gaussian
 
 def residual(pars, x, data=None):
-    g1 = gauss(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
-    g2 = gauss(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
+    g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
+    g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
     model = g1 + g2
     if data is None:
         return model
diff --git a/examples/fit_NIST.py b/examples/test_NIST_Strd.py
similarity index 90%
rename from examples/fit_NIST.py
rename to examples/test_NIST_Strd.py
index bf1ceaf..d053bbf 100644
--- a/examples/fit_NIST.py
+++ b/examples/test_NIST_Strd.py
@@ -4,22 +4,27 @@ import math
 
 from optparse import OptionParser
 
-
 try:
     import matplotlib
     matplotlib.use('WXAgg')
     import pylab
     HASPYLAB = True
+
 except ImportError:
     HASPYLAB = False
 
+if 'nose' in arg:
+    HASPYLAB = False
+
 from lmfit import Parameters, minimize
 
 from NISTModels import Models, ReadNistData
 
 
 def ndig(a, b):
-    return int(0.5-math.log10(abs(abs(a)-abs(b))/abs(b)))
+    "precision for NIST values"
+    return round(-math.log10((abs(abs(a)-abs(b)) +1.e-15)/ abs(b)))
+
 
 def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
     print(' ======================================')
@@ -41,7 +46,7 @@ def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
 
         thiserr = par.stderr
         certerr = NISTdata['cert_stderr'][i]
-        if thiserr is not None:
+        if thiserr is not None and myfit.errorbars:
             edig   = ndig(thiserr, certerr)
             ename = (parname + ' stderr' + ' '*14)[:14]
             print(' | %s | % -.7e | % -.7e   | %2i                |' % (ename, thiserr, certerr, edig))
@@ -56,6 +61,9 @@ def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
     except:
         pass
     print(' |----------------+----------------+------------------+-------------------|')
+    if not myfit.errorbars:
+        print(' |          * * * * COULD NOT ESTIMATE UNCERTAINTIES * * * *              |')
+        err_dig_min = 0
     if err_dig_min < 199:
         print(' Worst agreement: %i digits for value, %i digits for error ' % (val_dig_min, err_dig_min))
     else:
@@ -79,8 +87,6 @@ def NIST_Test(DataSet, method='leastsq', start='start2', plot=True):
 
 
     myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y})
-
-
     digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata)
 
     if plot and HASPYLAB:
@@ -109,7 +115,7 @@ usage:
 ------
     python fit_NIST.py [options] Model Start
 
-where Start is either 'start1' or 'start2', for different
+where Start is one of 'start1','start2' or 'cert', for different
 starting values, and Model is one of
 
     %s
@@ -132,7 +138,7 @@ parser.add_option("-m", "--method", dest="method", metavar='METH',
 
 (opts, args) = parser.parse_args()
 dset = ''
-start = 'start1'
+start = 'start2'
 if len(args) > 0:
     dset = args[0]
 if len(args) > 1:
@@ -144,7 +150,7 @@ if dset.lower() == 'all':
     failures = []
     dsets = sorted(Models.keys())
     for dset in dsets:
-        for start in ('start1', 'start2'):
+        for start in ('start1', 'start2', 'cert'):
             if NIST_Test(dset, method=opts.method, start=start, plot=False):
                 tpass += 1
             else:
diff --git a/examples/test_peak.dat b/examples/test_peak.dat
new file mode 100644
index 0000000..972ca7a
--- /dev/null
+++ b/examples/test_peak.dat
@@ -0,0 +1,404 @@
+# test peak data
+#---------------------------------
+#  t         y 
+  0.000000    0.021654
+  0.050000    0.019221
+  0.100000   -0.146881
+  0.150000    0.109422
+  0.200000    0.385367
+  0.250000    0.426230
+  0.300000    0.019241
+  0.350000    0.075568
+  0.400000    0.193304
+  0.450000    0.237610
+  0.500000   -0.107071
+  0.550000    0.207026
+  0.600000    0.103481
+  0.650000    0.175033
+  0.700000    0.022074
+  0.750000    0.070510
+  0.800000    0.404041
+  0.850000    0.126622
+  0.900000   -0.138651
+  0.950000    0.149783
+  1.000000    0.212585
+  1.050000    0.133744
+  1.100000    0.190065
+  1.150000   -0.254227
+  1.200000    0.253212
+  1.250000    0.059663
+  1.300000    0.187533
+  1.350000    0.253744
+  1.400000   -0.037306
+  1.450000    0.080513
+  1.500000    0.012607
+  1.550000    0.224475
+  1.600000    0.271415
+  1.650000    0.118073
+  1.700000   -0.077723
+  1.750000    0.164330
+  1.800000    0.025614
+  1.850000   -0.034864
+  1.900000    0.068968
+  1.950000   -0.103238
+  2.000000    0.066419
+  2.050000    0.271850
+  2.100000    0.139049
+  2.150000    0.162034
+  2.200000   -0.034347
+  2.250000    0.135812
+  2.300000    0.067858
+  2.350000   -0.161792
+  2.400000    0.153702
+  2.450000    0.071054
+  2.500000   -0.049010
+  2.550000    0.203306
+  2.600000    0.161341
+  2.650000    0.199279
+  2.700000    0.252416
+  2.750000    0.355513
+  2.800000   -0.097676
+  2.850000    0.254533
+  2.900000    0.217187
+  2.950000    0.154375
+  3.000000   -0.061880
+  3.050000    0.128343
+  3.100000    0.205941
+  3.150000    0.349665
+  3.200000    0.085341
+  3.250000    0.125593
+  3.300000    0.254381
+  3.350000    0.006456
+  3.400000    0.083674
+  3.450000    0.126626
+  3.500000    0.132028
+  3.550000    0.367231
+  3.600000    0.190944
+  3.650000   -0.004054
+  3.700000    0.072112
+  3.750000    0.383266
+  3.800000    0.222168
+  3.850000    0.098595
+  3.900000    0.324558
+  3.950000    0.125419
+  4.000000    0.214417
+  4.050000    0.287499
+  4.100000    0.230579
+  4.150000    0.141035
+  4.200000    0.341221
+  4.250000    0.162993
+  4.300000    0.174737
+  4.350000    0.483097
+  4.400000    0.634501
+  4.450000    0.152268
+  4.500000    0.440815
+  4.550000    0.125279
+  4.600000    0.302566
+  4.650000    0.612674
+  4.700000   -0.023226
+  4.750000    0.481199
+  4.800000    0.101096
+  4.850000    0.572197
+  4.900000    0.394625
+  4.950000    0.461077
+  5.000000   -0.106441
+  5.050000    0.635505
+  5.100000    0.440675
+  5.150000    0.335979
+  5.200000    0.567396
+  5.250000    0.588661
+  5.300000    0.101309
+  5.350000    0.370770
+  5.400000    0.531899
+  5.450000    0.347064
+  5.500000    0.387862
+  5.550000    0.415243
+  5.600000    0.459800
+  5.650000    0.559310
+  5.700000    0.527272
+  5.750000    0.659222
+  5.800000    0.646655
+  5.850000    0.872127
+  5.900000    0.506336
+  5.950000    0.832841
+  6.000000    0.662228
+  6.050000    0.666240
+  6.100000    0.745486
+  6.150000    0.773303
+  6.200000    0.820844
+  6.250000    0.949833
+  6.300000    0.999748
+  6.350000    1.194918
+  6.400000    0.947696
+  6.450000    1.034669
+  6.500000    1.004666
+  6.550000    1.155702
+  6.600000    1.541353
+  6.650000    1.342422
+  6.700000    1.477986
+  6.750000    1.375675
+  6.800000    1.763981
+  6.850000    1.638405
+  6.900000    1.652637
+  6.950000    2.125423
+  7.000000    1.846081
+  7.050000    2.008594
+  7.100000    1.967327
+  7.150000    2.420829
+  7.200000    2.986333
+  7.250000    2.816069
+  7.300000    2.779284
+  7.350000    2.452606
+  7.400000    3.182907
+  7.450000    3.345209
+  7.500000    3.210506
+  7.550000    3.630722
+  7.600000    3.786487
+  7.650000    4.288308
+  7.700000    4.107791
+  7.750000    4.223391
+  7.800000    4.822287
+  7.850000    4.852727
+  7.900000    5.153562
+  7.950000    5.540655
+  8.000000    5.739122
+  8.050000    5.965430
+  8.100000    5.893505
+  8.150000    6.520379
+  8.200000    6.744448
+  8.250000    6.982811
+  8.300000    6.871811
+  8.350000    7.381590
+  8.400000    7.295213
+  8.450000    7.770220
+  8.500000    7.855105
+  8.550000    8.178695
+  8.600000    8.737766
+  8.650000    8.659328
+  8.700000    8.761986
+  8.750000    9.325407
+  8.800000    9.693782
+  8.850000    9.493158
+  8.900000    9.840173
+  8.950000    9.591383
+  9.000000    9.894218
+  9.050000    9.781619
+  9.100000    9.787061
+  9.150000    9.944484
+  9.200000    10.193956
+  9.250000    10.452393
+  9.300000    10.198352
+  9.350000    10.220196
+  9.400000    10.091519
+  9.450000    9.803956
+  9.500000    9.976457
+  9.550000    9.644976
+  9.600000    9.652392
+  9.650000    9.364996
+  9.700000    9.141562
+  9.750000    9.123553
+  9.800000    8.670938
+  9.850000    8.830762
+  9.900000    8.612662
+  9.950000    8.200565
+  10.000000    8.004205
+  10.050000    7.786050
+  10.100000    7.729310
+  10.150000    7.287126
+  10.200000    6.773599
+  10.250000    6.820778
+  10.300000    6.790992
+  10.350000    6.324548
+  10.400000    6.076502
+  10.450000    5.768973
+  10.500000    5.787036
+  10.550000    5.553690
+  10.600000    5.127315
+  10.650000    4.902255
+  10.700000    4.929891
+  10.750000    4.171166
+  10.800000    4.303762
+  10.850000    3.767545
+  10.900000    3.791083
+  10.950000    3.814857
+  11.000000    3.426006
+  11.050000    3.078426
+  11.100000    2.789747
+  11.150000    2.620130
+  11.200000    2.416431
+  11.250000    2.430768
+  11.300000    2.268585
+  11.350000    2.235498
+  11.400000    2.311363
+  11.450000    2.005221
+  11.500000    1.970229
+  11.550000    1.907982
+  11.600000    1.748020
+  11.650000    1.481710
+  11.700000    1.519127
+  11.750000    1.777618
+  11.800000    1.135594
+  11.850000    1.345861
+  11.900000    1.046777
+  11.950000    1.040376
+  12.000000    0.888514
+  12.050000    0.994942
+  12.100000    1.002009
+  12.150000    1.235839
+  12.200000    1.030794
+  12.250000    0.894109
+  12.300000    0.839384
+  12.350000    0.564763
+  12.400000    0.543024
+  12.450000    1.067728
+  12.500000    0.569039
+  12.550000    0.546196
+  12.600000    0.767751
+  12.650000    0.372794
+  12.700000    0.506039
+  12.750000    0.094006
+  12.800000    0.657551
+  12.850000    0.689847
+  12.900000    0.235074
+  12.950000    0.511880
+  13.000000    0.495730
+  13.050000    0.720208
+  13.100000    0.458972
+  13.150000    0.515104
+  13.200000    0.447520
+  13.250000    0.309378
+  13.300000    0.336000
+  13.350000    0.403743
+  13.400000    0.173839
+  13.450000    0.542466
+  13.500000    0.435708
+  13.550000    0.502801
+  13.600000    0.256758
+  13.650000    0.269744
+  13.700000    0.204110
+  13.750000    0.219654
+  13.800000    0.596106
+  13.850000    0.272604
+  13.900000    0.228125
+  13.950000    0.308160
+  14.000000    0.065328
+  14.050000    0.491292
+  14.100000    0.494818
+  14.150000    0.321783
+  14.200000    0.197267
+  14.250000    0.602161
+  14.300000    0.155016
+  14.350000    0.333368
+  14.400000    0.260038
+  14.450000    0.149090
+  14.500000    0.164818
+  14.550000    0.032011
+  14.600000    0.460880
+  14.650000    0.275423
+  14.700000    0.343308
+  14.750000    0.348898
+  14.800000    0.335248
+  14.850000    0.223771
+  14.900000    0.056021
+  14.950000    0.146267
+  15.000000    0.295977
+  15.050000    0.029256
+  15.100000    0.188720
+  15.150000    0.185713
+  15.200000   -0.010228
+  15.250000   -0.075438
+  15.300000   -0.049977
+  15.350000    0.156545
+  15.400000    0.138670
+  15.450000    0.430603
+  15.500000    0.107233
+  15.550000    0.268609
+  15.600000    0.192113
+  15.650000   -0.089082
+  15.700000    0.076649
+  15.750000    0.494606
+  15.800000    0.304371
+  15.850000    0.311904
+  15.900000    0.146849
+  15.950000   -0.035298
+  16.000000    0.442517
+  16.050000    0.129210
+  16.100000    0.202598
+  16.150000   -0.038198
+  16.200000    0.164944
+  16.250000    0.089727
+  16.300000   -0.029338
+  16.350000    0.321681
+  16.400000    0.001907
+  16.450000    0.357234
+  16.500000    0.706248
+  16.550000    0.189379
+  16.600000    0.207504
+  16.650000    0.252780
+  16.700000    0.337652
+  16.750000    0.164710
+  16.800000    0.012640
+  16.850000   -0.200321
+  16.900000    0.063620
+  16.950000    0.014513
+  17.000000    0.090878
+  17.050000    0.261647
+  17.100000    0.140731
+  17.150000    0.351465
+  17.200000   -0.222967
+  17.250000    0.192524
+  17.300000   -0.083316
+  17.350000    0.139459
+  17.400000    0.391717
+  17.450000   -0.091359
+  17.500000   -0.118886
+  17.550000   -0.054844
+  17.600000    0.180295
+  17.650000    0.065399
+  17.700000    0.319015
+  17.750000    0.166328
+  17.800000    0.206875
+  17.850000    0.108605
+  17.900000    0.085493
+  17.950000    0.270683
+  18.000000    0.240595
+  18.050000    0.299822
+  18.100000   -0.040008
+  18.150000    0.306279
+  18.200000   -0.037437
+  18.250000    0.006128
+  18.300000    0.224231
+  18.350000    0.054691
+  18.400000    0.139918
+  18.450000   -0.079608
+  18.500000   -0.215388
+  18.550000   -0.063221
+  18.600000    0.012560
+  18.650000   -0.138384
+  18.700000    0.326622
+  18.750000    0.130812
+  18.800000   -0.053009
+  18.850000   -0.028960
+  18.900000    0.053191
+  18.950000    0.239460
+  19.000000    0.226069
+  19.050000   -0.016509
+  19.100000    0.155364
+  19.150000    0.186324
+  19.200000    0.076879
+  19.250000    0.184640
+  19.300000    0.194979
+  19.350000    0.153825
+  19.400000    0.078599
+  19.450000    0.082126
+  19.500000    0.069517
+  19.550000    0.169040
+  19.600000    0.016125
+  19.650000   -0.145533
+  19.700000   -0.314756
+  19.750000    0.409688
+  19.800000   -0.071217
+  19.850000   -0.318566
+  19.900000    0.159099
+  19.950000   -0.014190
+  20.000000   -0.091474
diff --git a/examples/use_models1d.py b/examples/use_models1d.py
index 239f12c..d5d9f4b 100644
--- a/examples/use_models1d.py
+++ b/examples/use_models1d.py
@@ -1,6 +1,6 @@
 import numpy as np
-from lmfit.models1d import LinearModel, QuadraticModel, ExponentialModel
-from lmfit.models1d import  LorenztianModel, GaussianModel, VoigtModel
+from lmfit.old_models1d import LinearModel, QuadraticModel, ExponentialModel
+from lmfit.old_models1d import  LorenztianModel, GaussianModel, VoigtModel
 import matplotlib.pyplot as plt
 
 
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
index 2c3f84f..fdcf8ce 100644
--- a/lmfit/__init__.py
+++ b/lmfit/__init__.py
@@ -7,25 +7,24 @@
    installed, parameters can be constrained as a simple
    mathematical expression of other Parameters.
 
-   version: 0.7.4
-   last update: 2014-Jan-22
+   version: 0.8.0-rc1
+   last update: 2014-Aug-13
    License: BSD
    Author:  Matthew Newville <newville at cars.uchicago.edu>
             Center for Advanced Radiation Sources,
             The University of Chicago
 """
-__version__ = '0.7.4'
+__version__ = '0.8.0-rc1'
 from .minimizer import minimize, Minimizer, MinimizerException
 from .parameter import Parameter, Parameters
 from .confidence import conf_interval, conf_interval2d
 from .printfuncs import (fit_report, ci_report,
                          report_fit, report_ci, report_errors)
 
-from .wrap      import wrap_function, make_paras_and_func
+from .wrap  import wrap_function, make_paras_and_func
 from .model import Model
-from .specified_models import *
+from . import models
 
-from . import models1d
 from . import uncertainties
 from .uncertainties import ufloat, correlated_values
 
diff --git a/lmfit/asteval.py b/lmfit/asteval.py
index 9dabf82..cd6ca7f 100644
--- a/lmfit/asteval.py
+++ b/lmfit/asteval.py
@@ -16,8 +16,9 @@ import ast
 import math
 
 from .astutils import (FROM_PY, FROM_MATH, FROM_NUMPY, UNSAFE_ATTRS,
-                       NUMPY_RENAMES, op2func, ExceptionHolder,
-                       ReturnedNone, valid_symbol_name)
+                       LOCALFUNCS, NUMPY_RENAMES, op2func,
+                       ExceptionHolder, ReturnedNone, valid_symbol_name)
+
 HAS_NUMPY = False
 try:
     import numpy
@@ -25,8 +26,6 @@ try:
 except ImportError:
     print("Warning: numpy not available... functionality will be limited.")
 
-__version__ = '0.3'
-
 
 class Interpreter:
     """mathematical expression compiler and interpreter.
@@ -35,23 +34,30 @@ class Interpreter:
   using python's ast module, and then executes the AST representation
   using a dictionary of named object (variable, functions).
 
-  This then gives a restricted version of Python, being a procedural
-  language (though working on Python objects) with a simplified, flat
-  namespace (this is overcome in related implementaions). The program
-  syntax here is expected to be valid Python.
-
-  The following Python syntax elements are not supported:
-      Import, Exec, Lambda, Class, Global, Generators,
-      Yield, Decorators, Finally for Try-Except
+  The result is a restricted, simplified version of Python meant for
+  numerical caclulations that is somewhat safer than 'eval' because some
+  operations (such as 'import' and 'eval') are simply not allowed.  The
+  resulting language uses a flat namespace that works on Python objects,
+  but does not allow new classes to be defined.
 
   Many parts of Python syntax are supported, including:
+     for loops, while loops, if-then-elif-else conditionals
+     try-except (including 'finally')
+     function definitions with def
      advanced slicing:    a[::-1], array[-3:, :, ::2]
      if-expressions:      out = one_thing if TEST else other
-     list comprehension
-     for-loops, while-loops
-     if-then-elif-else conditionals
-     try-except (but not the 'finally' variant...)
-     function definitions with def
+     list comprehension   out = [sqrt(i) for i in values]
+
+  The following Python syntax elements are not supported:
+      Import, Exec, Lambda, Class, Global, Generators,
+      Yield, Decorators
+
+  In addition, while many builtin functions are supported, several
+  builtin functions are missing ('eval', 'exec', and 'getattr' for
+  example) that can be considered unsafe.
+
+  If numpy is installed, many numpy functions are also imported.
+
   """
 
     supported_nodes = ('arg', 'assert', 'assign', 'attribute', 'augassign',
@@ -61,8 +67,7 @@ class Interpreter:
                        'functiondef', 'if', 'ifexp', 'index', 'interrupt',
                        'list', 'listcomp', 'module', 'name', 'num', 'pass',
                        'print', 'raise', 'repr', 'return', 'slice', 'str',
-                       'subscript', 'tryexcept', 'tuple', 'unaryop',
-                       'while')
+                       'subscript', 'try', 'tuple', 'unaryop', 'while')
 
     def __init__(self, symtable=None, writer=None, use_numpy=True):
         self.writer = writer or stdout
@@ -72,22 +77,25 @@ class Interpreter:
         self.symtable = symtable
         self._interrupt = None
         self.error = []
+        self.error_msg = None
         self.expr = None
         self.retval = None
         self.lineno = 0
-        global HAS_NUMPY
-        if not use_numpy:
-            HAS_NUMPY = False
+        self.use_numpy = HAS_NUMPY and use_numpy
 
         symtable['print'] = self._printer
         for sym in FROM_PY:
             if sym in __builtins__:
                 symtable[sym] = __builtins__[sym]
+
+        for symname, obj in LOCALFUNCS.items():
+            symtable[symname] = obj
+
         for sym in FROM_MATH:
             if hasattr(math, sym):
                 symtable[sym] = getattr(math, sym)
 
-        if HAS_NUMPY:
+        if self.use_numpy:
             for sym in FROM_NUMPY:
                 if hasattr(numpy, sym):
                     symtable[sym] = getattr(numpy, sym)
@@ -98,6 +106,11 @@ class Interpreter:
         self.node_handlers = dict(((node, getattr(self, "on_%s" % node))
                                    for node in self.supported_nodes))
 
+        # to rationalize try/except try/finally for Python2.6 through Python3.3
+        self.node_handlers['tryexcept'] = self.node_handlers['try']
+        self.node_handlers['tryfinally'] = self.node_handlers['try']
+
+
     def unimplemented(self, node):
         "unimplemented nodes"
         self.raise_exception(node, exc=NotImplementedError,
@@ -116,7 +129,17 @@ class Interpreter:
         err = ExceptionHolder(node, exc=exc, msg=msg, expr=expr, lineno=lineno)
         self._interrupt = ast.Break()
         self.error.append(err)
-        raise RuntimeError(err.msg)
+        if self.error_msg is None:
+            self.error_msg = "%s in expr='%s'" % (msg, self.expr)
+        elif len(msg) > 0:
+            self.error_msg = "%s\n %s" % (self.error_msg, msg)
+        if exc is None:
+            try:
+                exc = self.error[0].exc
+            except:
+                exc = RuntimeError
+        raise exc(self.error_msg)
+
 
     # main entry point for Ast node evaluation
     #  parse:  text of statements -> ast
@@ -127,9 +150,10 @@ class Interpreter:
         self.expr = text
         try:
             return ast.parse(text)
+        except SyntaxError:
+            self.raise_exception(None, msg='Syntax Error', expr=text)
         except:
-            self.raise_exception(None, exc=SyntaxError,
-                                 msg='Syntax Error', expr=text)
+            self.raise_exception(None, msg='Runtime Error', expr=text)
 
     def run(self, node, expr=None, lineno=None, with_raise=True):
         """executes parsed Ast representation for an expression"""
@@ -173,22 +197,30 @@ class Interpreter:
         self.error = []
         try:
             node = self.parse(expr)
-        except RuntimeError:
+        except:
             errmsg = exc_info()[1]
             if len(self.error) > 0:
                 errmsg = "\n".join(self.error[0].get_error())
             if not show_errors:
-                raise RuntimeError(errmsg)
+                try:
+                    exc = self.error[0].exc
+                except:
+                    exc = RuntimeError
+                raise exc(errmsg)
             print(errmsg, file=self.writer)
             return
         try:
             return self.run(node, expr=expr, lineno=lineno)
-        except RuntimeError:
+        except:
             errmsg = exc_info()[1]
             if len(self.error) > 0:
                 errmsg = "\n".join(self.error[0].get_error())
             if not show_errors:
-                raise RuntimeError(errmsg)
+                try:
+                    exc = self.error[0].exc
+                except:
+                    exc = RuntimeError
+                raise exc(errmsg)
             print(errmsg, file=self.writer)
             return
 
@@ -319,22 +351,28 @@ class Interpreter:
     def on_attribute(self, node):    # ('value', 'attr', 'ctx')
         "extract attribute"
         ctx = node.ctx.__class__
-        if ctx == ast.Load:
-            sym = self.run(node.value)
-            if hasattr(sym, node.attr) and node.attr not in UNSAFE_ATTRS:
-                return getattr(sym, node.attr)
-            else:
-                obj = self.run(node.value)
-                fmt = "%s does not have attribute '%s'"
-                msg = fmt % (obj, node.attr)
-                self.raise_exception(node, exc=AttributeError, msg=msg)
-
-        elif ctx == ast.Del:
-            return delattr(sym, node.attr)
-        elif ctx == ast.Store:
+        if ctx == ast.Store:
             msg = "attribute for storage: shouldn't be here!"
             self.raise_exception(node, exc=RuntimeError, msg=msg)
 
+        sym = self.run(node.value)
+        if ctx == ast.Del:
+            return delattr(sym, node.attr)
+
+        # ctx is ast.Load
+        fmt = "cannnot access attribute '%s' for %s"
+        if node.attr not in UNSAFE_ATTRS:
+            fmt = "no attribute '%s' for %s"
+            try:
+                return getattr(sym, node.attr)
+            except AttributeError:
+                pass
+
+        # AttributeError or accessed unsafe attribute
+        obj = self.run(node.value)
+        msg = fmt % (node.attr, obj)
+        self.raise_exception(node, exc=AttributeError, msg=msg)
+
     def on_assign(self, node):    # ('targets', 'value')
         "simple assignment"
         val = self.run(node.value)
@@ -419,7 +457,7 @@ class Interpreter:
             rval = self.run(rnode)
             out = op2func(op)(lval, rval)
             lval = rval
-            if HAS_NUMPY and isinstance(out, numpy.ndarray) and out.any():
+            if self.use_numpy and isinstance(out, numpy.ndarray) and out.any():
                 break
             elif not out:
                 break
@@ -511,8 +549,8 @@ class Interpreter:
         "exception handler..."
         return (self.run(node.type), node.name, node.body)
 
-    def on_tryexcept(self, node):    # ('body', 'handlers', 'orelse')
-        "try/except blocks"
+    def on_try(self, node):    # ('body', 'handlers', 'orelse', 'finalbody')
+        "try/except/else/finally blocks"
         no_errors = True
         for tnode in node.body:
             self.run(tnode, with_raise=False)
@@ -530,10 +568,14 @@ class Interpreter:
                         for tline in hnd.body:
                             self.run(tline)
                         break
-        if no_errors:
+        if no_errors and hasattr(node, 'orelse'):
             for tnode in node.orelse:
                 self.run(tnode)
 
+        if hasattr(node, 'finalbody'):
+            for tnode in node.finalbody:
+                self.run(tnode)
+
     def on_raise(self, node):    # ('type', 'inst', 'tback')
         "raise statement: note difference for python 2 and 3"
         if version_info[0] == 3:
@@ -574,8 +616,7 @@ class Interpreter:
         try:
             return func(*args, **keywords)
         except:
-            self.raise_exception(node, exc=RuntimeError,
-                                 msg="Error running %s" % (func))
+            self.raise_exception(node, msg="Error running %s" % (func))
 
     def on_arg(self, node):    # ('test', 'msg')
         "arg for function definitions"
@@ -623,8 +664,8 @@ class Procedure(object):
                  body=None, args=None, kwargs=None,
                  vararg=None, varkws=None):
         self.name = name
-        self.interpreter = interp
-        self.raise_exc = self.interpreter.raise_exception
+        self.__asteval__ = interp
+        self.raise_exc = self.__asteval__.raise_exception
         self.__doc__ = doc
         self.body = body
         self.argnames = args
@@ -709,22 +750,22 @@ class Procedure(object):
             msg = 'incorrect arguments for Procedure %s' % self.name
             self.raise_exc(None, msg=msg, lineno=self.lineno)
 
-        save_symtable = self.interpreter.symtable.copy()
-        self.interpreter.symtable.update(symlocals)
-        self.interpreter.retval = None
+        save_symtable = self.__asteval__.symtable.copy()
+        self.__asteval__.symtable.update(symlocals)
+        self.__asteval__.retval = None
         retval = None
 
         # evaluate script of function
         for node in self.body:
-            self.interpreter.run(node, expr='<>', lineno=self.lineno)
-            if len(self.interpreter.error) > 0:
+            self.__asteval__.run(node, expr='<>', lineno=self.lineno)
+            if len(self.__asteval__.error) > 0:
                 break
-            if self.interpreter.retval is not None:
-                retval = self.interpreter.retval
+            if self.__asteval__.retval is not None:
+                retval = self.__asteval__.retval
                 if retval is ReturnedNone:
                     retval = None
                 break
 
-        self.interpreter.symtable = save_symtable
+        self.__asteval__.symtable = save_symtable
         symlocals = None
         return retval
diff --git a/lmfit/astutils.py b/lmfit/astutils.py
index 8bc9d3a..7c75dc0 100644
--- a/lmfit/astutils.py
+++ b/lmfit/astutils.py
@@ -19,11 +19,13 @@ RESERVED_WORDS = ('and', 'as', 'assert', 'break', 'class', 'continue',
 
 NAME_MATCH = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$").match
 
-UNSAFE_ATTRS = ('__subclasses__', '__bases__', '__globals__',
-                '__code__', '__closure__', '__func__', '__self__',
-                '__module__', '__dict__', '__class__', 'func_globals',
-                'func_code', 'func_closure', 'im_class', 'im_func',
-                'im_self', 'gi_code', 'gi_frame')
+UNSAFE_ATTRS = ('__subclasses__', '__bases__', '__globals__', '__code__',
+                '__closure__', '__func__', '__self__', '__module__',
+                '__dict__', '__class__', '__call__', '__get__',
+                '__getattribute__', '__subclasshook__', '__new__',
+                '__init__', 'func_globals', 'func_code', 'func_closure',
+                'im_class', 'im_func', 'im_self', 'gi_code', 'gi_frame',
+                '__asteval__')
 
 # inherit these from python's __builtins__
 FROM_PY = ('ArithmeticError', 'AssertionError', 'AttributeError',
@@ -40,13 +42,12 @@ FROM_PY = ('ArithmeticError', 'AssertionError', 'AttributeError',
            'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError',
            'UnicodeTranslateError', 'UnicodeWarning', 'ValueError',
            'Warning', 'ZeroDivisionError', 'abs', 'all', 'any', 'bin',
-           'bool', 'bytearray', 'bytes', 'chr', 'complex', 'delattr',
-           'dict', 'dir', 'divmod', 'enumerate', 'filter', 'float',
-           'format', 'frozenset', 'getattr', 'hasattr', 'hash', 'hex',
-           'id', 'int', 'isinstance', 'len', 'list', 'map', 'max', 'min',
-           'oct', 'open', 'ord', 'pow', 'property', 'range', 'repr',
-           'reversed', 'round', 'set', 'setattr', 'slice', 'sorted', 'str',
-           'sum', 'tuple', 'type', 'zip')
+           'bool', 'bytearray', 'bytes', 'chr', 'complex', 'dict', 'dir',
+           'divmod', 'enumerate', 'filter', 'float', 'format', 'frozenset',
+           'hash', 'hex', 'id', 'int', 'isinstance', 'len', 'list', 'map',
+           'max', 'min', 'oct', 'ord', 'pow', 'range', 'repr',
+           'reversed', 'round', 'set', 'slice', 'sorted', 'str', 'sum',
+           'tuple', 'type', 'zip')
 
 # inherit these from python's math
 FROM_MATH = ('acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
@@ -130,61 +131,73 @@ FROM_NUMPY = ('Inf', 'NAN', 'abs', 'absolute', 'add', 'alen', 'all',
               'record', 'remainder', 'repeat', 'require', 'reshape',
               'resize', 'restoredot', 'right_shift', 'rint', 'roll',
               'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack',
-              's_', 'safe_eval', 'save', 'savetxt', 'savez', 'sctype2char',
-              'sctypeDict', 'sctypeNA', 'sctypes', 'searchsorted',
-              'select', 'setbufsize', 'setdiff1d', 'seterr', 'setxor1d',
-              'shape', 'short', 'sign', 'signbit', 'signedinteger', 'sin',
-              'sinc', 'single', 'singlecomplex', 'sinh', 'size',
-              'sometrue', 'sort', 'sort_complex', 'source', 'spacing',
-              'split', 'sqrt', 'square', 'squeeze', 'std', 'str', 'str_',
-              'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh',
-              'tensordot', 'test', 'testing', 'tile', 'trace', 'transpose',
-              'trapz', 'tri', 'tril', 'tril_indices', 'tril_indices_from',
-              'trim_zeros', 'triu', 'triu_indices', 'triu_indices_from',
-              'true_divide', 'trunc', 'typeDict', 'typeNA', 'typecodes',
-              'typename', 'ubyte', 'ufunc', 'uint', 'uint0', 'uint16',
-              'uint32', 'uint64', 'uint8', 'uintc', 'uintp', 'ulonglong',
-              'union1d', 'unique', 'unravel_index', 'unsignedinteger',
-              'unwrap', 'ushort', 'vander', 'var', 'vdot', 'vectorize',
-              'version', 'void', 'void0', 'vsplit', 'vstack', 'where',
-              'who', 'zeros', 'zeros_like')
+              's_', 'sctype2char', 'sctypeDict', 'sctypeNA', 'sctypes',
+              'searchsorted', 'select', 'setbufsize', 'setdiff1d',
+              'seterr', 'setxor1d', 'shape', 'short', 'sign', 'signbit',
+              'signedinteger', 'sin', 'sinc', 'single', 'singlecomplex',
+              'sinh', 'size', 'sometrue', 'sort', 'sort_complex', 'source',
+              'spacing', 'split', 'sqrt', 'square', 'squeeze', 'std',
+              'str', 'str_', 'subtract', 'sum', 'swapaxes', 'take', 'tan',
+              'tanh', 'tensordot', 'test', 'testing', 'tile', 'trace',
+              'transpose', 'trapz', 'tri', 'tril', 'tril_indices',
+              'tril_indices_from', 'trim_zeros', 'triu', 'triu_indices',
+              'triu_indices_from', 'true_divide', 'trunc', 'typeDict',
+              'typeNA', 'typecodes', 'typename', 'ubyte', 'ufunc', 'uint',
+              'uint0', 'uint16', 'uint32', 'uint64', 'uint8', 'uintc',
+              'uintp', 'ulonglong', 'union1d', 'unique', 'unravel_index',
+              'unsignedinteger', 'unwrap', 'ushort', 'vander', 'var',
+              'vdot', 'vectorize', 'version', 'void', 'void0', 'vsplit',
+              'vstack', 'where', 'who', 'zeros', 'zeros_like')
 
 NUMPY_RENAMES = {'ln': 'log', 'asin': 'arcsin', 'acos': 'arccos',
                  'atan': 'arctan', 'atan2': 'arctan2', 'atanh':
                  'arctanh', 'acosh': 'arccosh', 'asinh': 'arcsinh'}
 
-OPERATORS = {ast.Is:     lambda a, b: a is b,
-             ast.IsNot:  lambda a, b: a is not b,
-             ast.In:     lambda a, b: a in b,
-             ast.NotIn:  lambda a, b: a not in b,
-             ast.Add:    lambda a, b: a + b,
+def _open(filename, mode='r', buffering=0):
+    """read only version of open()"""
+    umode = 'r'
+    if mode == 'rb':
+        umode = 'rb'
+    return open(filename, umode, buffering)
+
+LOCALFUNCS = {'open': _open}
+
+OPERATORS = {ast.Is: lambda a, b: a is b,
+             ast.IsNot: lambda a, b: a is not b,
+             ast.In: lambda a, b: a in b,
+             ast.NotIn: lambda a, b: a not in b,
+             ast.Add: lambda a, b: a + b,
              ast.BitAnd: lambda a, b: a & b,
-             ast.BitOr:  lambda a, b: a | b,
+             ast.BitOr: lambda a, b: a | b,
              ast.BitXor: lambda a, b: a ^ b,
-             ast.Div:    lambda a, b: a / b,
+             ast.Div: lambda a, b: a / b,
              ast.FloorDiv: lambda a, b: a // b,
              ast.LShift: lambda a, b: a << b,
              ast.RShift: lambda a, b: a >> b,
-             ast.Mult:   lambda a, b: a * b,
-             ast.Pow:    lambda a, b: a ** b,
-             ast.Sub:    lambda a, b: a - b,
-             ast.Mod:    lambda a, b: a % b,
-             ast.And:    lambda a, b: a and b,
-             ast.Or:     lambda a, b: a or b,
-             ast.Eq:     lambda a, b: a == b,
-             ast.Gt:     lambda a, b: a > b,
-             ast.GtE:    lambda a, b: a >= b,
-             ast.Lt:     lambda a, b: a < b,
-             ast.LtE:    lambda a, b: a <= b,
-             ast.NotEq:  lambda a, b: a != b,
+             ast.Mult: lambda a, b: a * b,
+             ast.Pow: lambda a, b: a ** b,
+             ast.Sub: lambda a, b: a - b,
+             ast.Mod: lambda a, b: a % b,
+             ast.And: lambda a, b: a and b,
+             ast.Or: lambda a, b: a or b,
+             ast.Eq: lambda a, b: a == b,
+             ast.Gt: lambda a, b: a > b,
+             ast.GtE: lambda a, b: a >= b,
+             ast.Lt: lambda a, b: a < b,
+             ast.LtE: lambda a, b: a <= b,
+             ast.NotEq: lambda a, b: a != b,
              ast.Invert: lambda a: ~a,
-             ast.Not:    lambda a: not a,
-             ast.UAdd:   lambda a: +a,
-             ast.USub:   lambda a: -a}
+             ast.Not: lambda a: not a,
+             ast.UAdd: lambda a: +a,
+             ast.USub: lambda a: -a}
 
 
 def valid_symbol_name(name):
-    "input is a valid name"
+    """determines whether the input symbol name is a valid name
+
+    This checks for reserved words, and that the name matches the
+    regular expression ``[a-zA-Z_][a-zA-Z0-9_]``
+    """
     if name in RESERVED_WORDS:
         return False
     return NAME_MATCH(name) is not None
@@ -196,6 +209,10 @@ def op2func(op):
 
 
 class Empty:
+    """empty class"""
+    def __init__(self):
+        pass
+
     def __nonzero__(self):
         return False
 
diff --git a/lmfit/confidence.py b/lmfit/confidence.py
index aa4ec18..9631232 100644
--- a/lmfit/confidence.py
+++ b/lmfit/confidence.py
@@ -10,6 +10,9 @@ from scipy.stats import f
 from scipy.optimize import brentq
 from .minimizer import MinimizerException
 
+CONF_ERR_GEN    = 'Cannot determine Confidence Intervals'
+CONF_ERR_STDERR = '%s without sensible uncertainty estimates' % CONF_ERR_GEN
+CONF_ERR_NVARS  = '%s with < 2 variables' % CONF_ERR_GEN
 
 def f_compare(ndata, nparas, new_chi, best_chi, nfix=1.):
     """
@@ -154,13 +157,18 @@ class ConfidenceInterval(object):
         self.fit_params = [minimizer.params[p] for p in self.p_names]
 
         # check that there are at least 2 true variables!
+        # check that all stderrs are sensible (including not None or NaN)
         nvars = 0
-        for p in self.p_names:
-            if minimizer.params[p].vary:
+        for par in self.fit_params:
+            if par.vary:
                 nvars += 1
+            try:
+                if not (par.vary and par.stderr > 0.0):
+                    raise MinimizerException(CONF_ERR_STDERR)
+            except TypeError:
+                raise MinimizerException(CONF_ERR_STDERR)
         if nvars < 2:
-            raise MinimizerException(
-                'Cannot determine Confidence Intervals with < 2 variables!')
+            raise MinimizerException(CONF_ERR_NVARS)
 
         if prob_func is None or not hasattr(prob_func, '__call__'):
             self.prob_func = f_compare
@@ -217,7 +225,8 @@ class ConfidenceInterval(object):
         start_val = para.value.copy()
         a_limit = start_val.copy()
         ret = []
-
+        orig_warn_settings = np.geterr()
+        np.seterr(all='ignore')
         for prob in self.sigmas:
             if prob > max_prob:
                 ret.append((prob, direction*np.inf))
@@ -229,14 +238,18 @@ class ConfidenceInterval(object):
 
             except ValueError:
                 self.reset_vals()
-                val = brentq(calc_prob, start_val,
-                             limit, rtol=.5e-4, args=prob)
+                try:
+                    val = brentq(calc_prob, start_val,
+                                 limit, rtol=.5e-4, args=prob)
+                except ValueError:
+                    val = np.nan
 
             a_limit = val
             ret.append((prob, val))
 
         para.vary = True
         self.reset_vals()
+        np.seterr(**orig_warn_settings)
         return ret
 
     def reset_vals(self):
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
new file mode 100644
index 0000000..06eb7ae
--- /dev/null
+++ b/lmfit/lineshapes.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+"""
+basic model line shapes and distribution functions
+"""
+from numpy import (pi, log, exp, sqrt, arctan, cos, where)
+from numpy.testing import assert_allclose
+
+from scipy.special import gamma as gamfcn
+from scipy.special import gammaln, erf, erfc, wofz
+
+log2 = log(2)
+s2pi = sqrt(2*pi)
+spi  = sqrt(pi)
+s2   = sqrt(2.0)
+
+def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):
+    """1 dimensional gaussian:
+    gaussian(x, amplitude, center, sigma)
+    """
+    return (amplitude/(s2pi*sigma)) * exp(-(1.0*x-center)**2 /(2*sigma**2))
+
+def lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0):
+    """1 dimensional lorentzian
+    lorentzian(x, amplitude, center, sigma)
+    """
+    return (amplitude/(1 + ((1.0*x-center)/sigma)**2) ) / (pi*sigma)
+
+def voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None):
+    """1 dimensional voigt function.
+    see http://en.wikipedia.org/wiki/Voigt_profile
+    """
+    if gamma is None:
+        gamma = sigma
+    z = (x-center + 1j*gamma)/ (sigma*s2)
+    return amplitude*wofz(z).real / (sigma*s2pi)
+
+def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):
+    """1 dimensional pseudo-voigt:
+    pvoigt(x, amplitude, center, sigma, fraction)
+       = amplitude*(1-fraction)*gaussion(x, center,sigma) +
+         amplitude*fraction*lorentzian(x, center, sigma)
+    """
+    return ((1-fraction)*gaussian(x, amplitude, center, sigma) +
+               fraction*lorentzian(x, amplitude, center, sigma))
+
+def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=0.5):
+    """pearson7 lineshape, according to NIST StRD
+    though it seems wikpedia gives a different formula...
+    pearson7(x, center, sigma, expon)
+    """
+    scale = amplitude * gamfcn(expon) * (sqrt((2**(1/expon) -1)) /
+                                         (gamfcn(expon-0.5)) / (sigma*spi))
+    return scale/(1 + (((1.0*x-center)/sigma)**2) * (2**(1/expon) -1))**expon
+
+def breit_wigner(x, amplitude=1.0, center=0.0, sigma=1.0, q=1.0):
+    """Breit-Wigner-Fano lineshape:
+       = amplitude*(q*sigma/2 + x - center)**2 / ( (sigma/2)**2 + (x - center)**2 )
+    """
+    gam = sigma/2.0
+    return  amplitude*(q*gam + x - center)**2 / (gam*gam + (x-center)**2)
+
+def damped_oscillator(x, amplitude=1.0, center=1., sigma=0.1):
+    """amplitude for a damped harmonic oscillator
+    amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
+    """
+    center = max(1.e-9, abs(center))
+    return (amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
+
+def logistic(x, amplitude=1., center=0., sigma=1.):
+    """Logistic lineshape (yet another sigmoidal curve)
+        = amplitude*(1.  - 1. / (1 + exp((x-center)/sigma)))
+    """
+    return amplitude*(1. - 1./(1. + exp((x-center)/sigma)))
+
+def lognormal(x, amplitude=1.0, center=0., sigma=1):
+    """log-normal function
+    lognormal(x, center, sigma)
+        = (amplitude/x) * exp(-(ln(x) - center)/ (2* sigma**2))
+    """
+    return (amplitude/(x*sigma*s2pi)) * exp(-(log(x)-center)**2/ (2* sigma**2))
+
+def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
+    """Student's t distribution:
+        gamma((sigma+1)/2)   (1 + (x-center)**2/sigma)^(-(sigma+1)/2)
+     =  -------------------------
+        sqrt(sigma*pi)gamma(sigma/2)
+
+    """
+    s1  = (sigma+1)/2.0
+    denom = (sqrt(sigma*pi)*gamfcn(sigma/2))
+    return amplitude*(1 + (x-center)**2/sigma)**(-s1) * gamfcn(s1) / denom
+
+
+def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
+    """exponentially modified Gaussian
+
+    = (gamma/2) exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] *
+                erfc[(center + gamma*sigma**2 - x)/(sqrt(2)*sigma)]
+
+    http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
+    """
+    gss = gamma*sigma*sigma
+    arg1 = gamma*(center +gss/2.0 - x)
+    arg2 = (center + gss - x)/s2
+    return amplitude*(gamma/2) * exp(arg1) * erfc(arg2)
+
+def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
+    """Doniach Sunjic asymmetric lineshape, used for photo-emission
+
+    = amplitude* cos(pi*gamma/2 + (1-gamma) arctan((x-center)/sigma) /
+                      (sigma**2 + (x-center)**2)**[(1-gamma)/2]
+
+    see http://www.casaxps.com/help_manual/line_shapes.htm
+    """
+    arg = (x-center)/sigma
+    gm1 = (1.0 - gamma)
+    scale = amplitude/(sigma**gm1)
+    return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2)
+
+def skewed_voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None, skew=0.0):
+    """Skewed Voigt lineshape, skewed with error function
+    useful for ad-hoc Compton scatter profile
+
+    with beta = skew/(sigma*sqrt(2))
+    = voigt(x, center, sigma, gamma)*(1+erf(beta*(x-center)))
+
+    skew < 0:  tail to low value of centroid
+    skew > 0:  tail to high value of centroid
+
+    see http://en.wikipedia.org/wiki/Skew_normal_distribution
+    """
+    beta = skew/(s2*sigma)
+    asym = 1 + erf(beta*(x-center))
+    return asym * voigt(x, amplitude, center, sigma, gamma=gamma)
+
+def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
+    """step function:
+    starts at 0.0, ends at amplitude, with half-max at center, and
+    rising with form:
+      'linear' (default) = amplitude * min(1, max(0, arg))
+      'atan', 'arctan'   = amplitude * (0.5 + atan(arg)/pi)
+      'erf'              = amplitude * (1 + erf(arg))/2.0
+      'logistic'         = amplitude * [1 - 1/(1 + exp(arg))]
+
+    where arg = (x - center)/sigma
+    """
+    if abs(sigma) <  1.e-13:
+        sigma = 1.e-13
+
+    out = (x - center)/sigma
+    if form == 'erf':
+        out = 0.5*(1 + erf(out))
+    elif form.startswith('logi'):
+        out = (1. - 1./(1. + exp(out)))
+    elif form in ('atan', 'arctan'):
+        out = 0.5 + arctan(out)/pi
+    else:
+        out[where(out < 0)] = 0.0
+        out[where(out > 1)] = 1.0
+    return amplitude*out
+
+def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
+              center2=1.0, sigma2=1.0, form='linear'):
+    """rectangle function: step up, step down  (see step function)
+    starts at 0.0, rises to amplitude (at center1 with width sigma1)
+    then drops to 0.0 (at center2 with width sigma2) with form:
+      'linear' (default) = ramp_up + ramp_down
+      'atan', 'arctan'   = amplitude*(atan(arg1) + atan(arg2))/pi
+      'erf'              = amplitude*(erf(arg1) + erf(arg2))/2.
+      'logisitic'        = amplitude*[1 - 1/(1 + exp(arg1)) - 1/(1+exp(arg2))]
+
+    where arg1 =  (x - center1)/sigma1
+    and   arg2 = -(x - center2)/sigma2
+    """
+    if abs(sigma1) <  1.e-13:
+        sigma1 = 1.e-13
+    if abs(sigma2) <  1.e-13:
+        sigma2 = 1.e-13
+
+    arg1 = (x - center1)/sigma1
+    arg2 = (center2 - x)/sigma2
+    if form == 'erf':
+        out = 0.5*(erf(arg1) + erf(arg2))
+    elif form.startswith('logi'):
+        out = (1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2)))
+    elif form in ('atan', 'arctan'):
+        out = (arctan(arg1) + arctan(arg2))/pi
+    else:
+        arg1[where(arg1 <  0)]  = 0.0
+        arg1[where(arg1 >  1)]  = 1.0
+        arg2[where(arg2 >  0)]  = 0.0
+        arg2[where(arg2 < -1)] = -1.0
+        out = arg1 + arg2
+    return amplitude*out
+
+def _erf(x):
+    """error function.  = 2/sqrt(pi)*integral(exp(-t**2), t=[0, z])"""
+    return erf(x)
+
+def _erfc(x):
+    """complented error function.  = 1 - erf(x)"""
+    return erfc(x)
+
+def _wofz(x):
+    """fadeeva function for complex argument. = exp(-x**2)*erfc(-i*x)"""
+    return wofz(x)
+
+def _gamma(x):
+    """gamma function"""
+    return gamfcn(x)
+
+def _gammaln(x):
+    """log of absolute value of gamma function"""
+    return gammaln(x)
+
+
+def exponential(x, amplitude=1, decay=1):
+    "x -> amplitude * exp(-x/decay)"
+    return amplitude * exp(-x/decay)
+
+
+def powerlaw(x, amplitude=1, exponent=1.0):
+    "x -> amplitude * x**exponent"
+    return amplitude * x**exponent
+
+
+def linear(x, slope, intercept):
+    "x -> slope * x + intercept"
+    return slope * x + intercept
+
+
+def parabolic(x, a, b, c):
+    "x -> a * x**2 + b * x + c"
+    return a * x**2 + b * x + c
+
+
+def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
+                         err_msg='', verbose=True):
+    """returns whether all parameter values in actual are close to
+    those in desired"""
+    for param_name, value in desired.items():
+        assert_allclose(actual[param_name], value, rtol,
+                        atol, err_msg, verbose)
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index 1167a10..f427783 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -20,7 +20,6 @@ from numpy.linalg import LinAlgError
 
 from scipy.optimize import leastsq as scipy_leastsq
 from scipy.optimize import fmin as scipy_fmin
-from scipy.optimize import anneal as scipy_anneal
 from scipy.optimize.lbfgsb import fmin_l_bfgs_b as scipy_lbfgsb
 
 # check for scipy.optimize.minimize
@@ -124,8 +123,21 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         self.scale_covar = scale_covar
         self.nfev = 0
         self.nfree = 0
+        self.ndata = 0
+        self.nvarys = 0
+        self.ier = 0
+        self.success = None
         self.message = None
+        self.lmdif_message = None
+        self.chisqr = None
+        self.redchi = None
+        self.covar = None
+        self.errorbars = None
+        self.residual = None
         self.var_map = []
+        self.vars = []
+        self.params = []
+        self.updated = []
         self.jacfcn = None
         self.asteval = Interpreter()
         self.namefinder = NameFinder()
@@ -245,7 +257,6 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         self.nfev = 0
         self.var_map = []
         self.vars = []
-        self.vmin, self.vmax = [], []
         for name, par in self.params.items():
             if par.expr is not None:
                 par.ast = self.asteval.parse(par.expr)
@@ -261,9 +272,6 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
             elif par.vary:
                 self.var_map.append(name)
                 self.vars.append(par.setup_bounds())
-                # self.vars.append(par.set_internal_value())
-                #self.vmin.append(par.min)
-                #self.vmax.append(par.max)
 
             self.asteval.symtable[name] = par.value
             par.init_value = par.value
@@ -291,24 +299,8 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
                 delattr(par, 'ast')
 
     def anneal(self, schedule='cauchy', **kws):
-        """
-        use simulated annealing
-        """
-        sched = 'fast'
-        if schedule in ('cauchy', 'boltzmann'):
-            sched = schedule
-
-        self.prepare_fit()
-        sakws = dict(full_output=1, schedule=sched,
-                     maxiter=2000 * (self.nvarys + 1))
-
-        sakws.update(self.kws)
-        sakws.update(kws)
-        print("WARNING:  scipy anneal appears unusable!")
-        saout = scipy_anneal(self.penalty, self.vars, **sakws)
-        self.sa_out = saout
-        self.unprepare_fit()
-        return
+        """scipy simulated annealing is broken"""
+        raise NotImplementedError("scipy simulated annealing is broken")
 
     def lbfgsb(self, **kws):
         """
@@ -317,7 +309,6 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         self.prepare_fit()
         lb_kws = dict(factr=1000.0, approx_grad=True, m=20,
                       maxfun=2000 * (self.nvarys + 1),
-                      # bounds = zip(self.vmin, self.vmax),
                       )
         lb_kws.update(self.kws)
         lb_kws.update(kws)
@@ -347,7 +338,7 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
 
         fmin_kws.update(kws)
         ret = scipy_fmin(self.penalty, self.vars, **fmin_kws)
-        xout, fout, iter, funccalls, warnflag, allvecs = ret
+        xout, fout, niter, funccalls, warnflag, allvecs = ret
         self.nfev = funccalls
         self.chisqr = self.residual = self.__residual(xout)
         self.ndata = 1
@@ -368,7 +359,6 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
           CG  (conjugate gradient)
           BFGS
           Newton-CG
-          Anneal
           L-BFGS-B
           TNC
           COBYLA
@@ -512,6 +502,9 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
             par.stderr, par.correl = 0, None
             has_expr = has_expr or par.expr is not None
 
+        # self.errorbars = error bars were successfully estimated
+        self.errorbars = self.covar is not None
+
         if self.covar is not None:
             if self.scale_covar:
                 self.covar = self.covar * sum_sqr / self.nfree
@@ -520,10 +513,14 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
                 par = self.params[varname]
                 par.stderr = sqrt(self.covar[ivar, ivar])
                 par.correl = {}
-                for jvar, varn2 in enumerate(self.var_map):
-                    if jvar != ivar:
-                        par.correl[varn2] = (self.covar[ivar, jvar] /
-                             (par.stderr * sqrt(self.covar[jvar, jvar])))
+                try:
+                    self.errorbars = self.errorbars and (par.stderr > 0.0)
+                    for jvar, varn2 in enumerate(self.var_map):
+                        if jvar != ivar:
+                            par.correl[varn2] = (self.covar[ivar, jvar] /
+                                 (par.stderr * sqrt(self.covar[jvar, jvar])))
+                except:
+                    self.errorbars = False
 
             uvars = None
             if has_expr:
@@ -538,17 +535,15 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
                     uvars = None
 
                 if uvars is not None:
-                    for pname, par in self.params.items():
+                    for par in self.params.values():
                         eval_stderr(par, uvars, self.var_map,
                                     self.params, self.asteval)
                     # restore nominal values
                     for v, nam in zip(uvars, self.var_map):
                         self.asteval.symtable[nam] = v.nominal_value
 
-        self.errorbars = True
-        if self.covar is None:
-            self.errorbars = False
-            self.message = '%s. Could not estimate error-bars'
+        if not self.errorbars:
+            self.message = '%s. Could not estimate error-bars'  % self.message
 
         np.seterr(**orig_warn_settings)
         self.unprepare_fit()
@@ -569,7 +564,6 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None,
                        'cg': 'CG',
                        'bfgs': 'BFGS',
                        'newton': 'Newton-CG',
-                       'anneal': 'Anneal',
                        'lbfgs': 'L-BFGS-B',
                        'l-bfgs':'L-BFGS-B',
                        'tnc': 'TNC',
@@ -578,8 +572,7 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None,
                        'dogleg': 'dogleg',
                        'trust-ncg': 'trust-ncg'}
 
-    _fitmethods = {'anneal': 'anneal',
-                   'nelder': 'fmin',
+    _fitmethods = {'nelder': 'fmin',
                    'lbfgsb': 'lbfgsb',
                    'leastsq': 'leastsq'}
 
diff --git a/lmfit/model.py b/lmfit/model.py
index 83c41fb..8960745 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -1,6 +1,5 @@
 """
 Concise nonlinear curve fitting.
-
 """
 
 import warnings
@@ -8,119 +7,175 @@ import inspect
 import copy
 import numpy as np
 from . import Parameters, Parameter, minimize
+from .printfuncs import fit_report
 
+# Use pandas.isnull for aligning missing data is pandas is available.
+# otherwise use numpy.isnan
 try:
-    import pandas
+    from pandas import isnull, Series
 except ImportError:
-    # Use pandas.isnull if available. Fall back on numpy.isnan.
     isnull = np.isnan
+    Series = type(NotImplemented)
 
-    # When handling missing data or data not the same size as independent vars,
-    # use pandas to align. If pandas is not available, data and vars must be
-    # the same size, but missing data can still be handled with masks.
-
-    def _align(var, mask, data):
-        if mask is not None:
-            return var[mask]
-        return var
-else:
-    isnull = pandas.isnull
-
-    def _align(var, mask, data):
-        if isinstance(data, pandas.Series) and isinstance(var, pandas.Series):
-            return var.reindex_like(data).dropna()
-        elif mask is not None:
-            return var[mask]
-        else:
-            return var
-
+def _align(var, mask, data):
+    "align missing data, with pandas is available"
+    if isinstance(data, Series) and isinstance(var, Series):
+        return var.reindex_like(data).dropna()
+    elif mask is not None:
+        return var[mask]
+    return var
 
 class Model(object):
-
-    def __init__(self, func, independent_vars=[], missing='none'):
-        """Create a model from a user-defined function.
-
-        Parameters
-        ----------
-        func: function
-        independent_vars: list of strings, optional
-            matching argument(s) to func
-        missing: 'none', 'drop', or 'raise'
-            'none': Do not check for null or missing values.
-            'drop': Drop null or missing observations in data.
-                Use pandas.isnull if pandas is available; otherwise,
-                silently fall back to numpy.isnan.
-            'raise': Raise a (more helpful) exception when data contains null
-                or missing values.
-
-        Note
-        ----
-        Parameter names are inferred from the function arguments,
-        and a residual function is automatically constructed.
-
-        Example
-        -------
-        >>> def decay(t, tau, N):
-        ...     return N*np.exp(-t/tau)
-        ...
-        >>> my_model = Model(decay, independent_vars=['t'])
-        """
+    """Create a model from a user-defined function.
+
+    Parameters
+    ----------
+    func: function to be wrapped
+    independent_vars: list of strings or None (default)
+        arguments to func that are independent variables
+    param_names: list of strings or None (default)
+        names of arguments to func that are to be made into parameters
+    missing: None, 'none', 'drop', or 'raise'
+        'none' or None: Do not check for null or missing values (default)
+        'drop': Drop null or missing observations in data.
+            if pandas is installed, pandas.isnull is used, otherwise
+            numpy.isnan is used.
+        'raise': Raise a (more helpful) exception when data contains null
+            or missing values.
+
+    Note
+    ----
+    Parameter names are inferred from the function arguments,
+    and a residual function is automatically constructed.
+
+    Example
+    -------
+    >>> def decay(t, tau, N):
+    ...     return N*np.exp(-t/tau)
+    ...
+    >>> my_model = Model(decay, independent_vars=['t'])
+    """
+
+    _forbidden_args = ('data', 'weights', 'params')
+    _invalid_ivar  = "Invalid independent variable name ('%s') for function %s"
+    _invalid_par   = "Invalid parameter name ('%s') for function %s"
+    _invalid_missing = "missing must be None, 'none', 'drop', or 'raise'."
+    def __init__(self, func, independent_vars=None, param_names=None,
+                 missing='none', prefix='', components=None, **kws):
         self.func = func
+        self.prefix = prefix
+        self.param_names = param_names
         self.independent_vars = independent_vars
-        if not missing in ['none', 'drop', 'raise']:
-            raise ValueError("missing must be 'none', 'drop', or 'raise'.")
+        self.func_allargs = []
+        self.func_haskeywords = False
+        self.has_initial_guess = False
+        self.components = components
+        if not missing in [None, 'none', 'drop', 'raise']:
+            raise ValueError(self._invalid_missing)
         self.missing = missing
+        self.opts = kws
+        self.result = None
+        self.params = Parameters()
         self._parse_params()
         self._residual = self._build_residual()
+        if self.independent_vars is None:
+            self.independent_vars = []
 
     def _parse_params(self):
-        model_arg_names = inspect.getargspec(self.func)[0]
+        "build params from function arguments"
+        argspec = inspect.getargspec(self.func)
+        pos_args = argspec.args[:]
+        keywords = argspec.keywords
+        kw_args = {}
+        if argspec.defaults is not None:
+            for val in reversed(argspec.defaults):
+                kw_args[pos_args.pop()] = val
+        #
+        self.func_haskeywords = keywords is not None
+        self.func_allargs = pos_args + list(kw_args.keys())
+        allargs = self.func_allargs
+
+        if len(allargs) == 0 and keywords is not None:
+            return
+
+        # default independent_var = 1st argument
+        if self.independent_vars is None:
+            self.independent_vars = [pos_args[0]]
+
+        # default param names: all positional args
+        # except independent variables
+        def_vals = {}
+        if self.param_names is None:
+            self.param_names = pos_args[:]
+            for key, val in kw_args.items():
+                if (not isinstance(val, bool) and
+                    isinstance(val, (float, int))):
+                    self.param_names.append(key)
+                    def_vals[key] = val
+            for p in self.independent_vars:
+                if p in self.param_names:
+                    self.param_names.remove(p)
+
+        # check variables names for validity
         # The implicit magic in fit() requires us to disallow some
-        # variable names.
-        forbidden_args = ['data', 'weights', 'params']
-        for arg in forbidden_args:
-            if arg in model_arg_names:
-                raise ValueError("The model function cannot have an " +
-                                 "argument named %s. " % arg +
-                                 "Choose a different name.")
-        self.param_names = set(model_arg_names) - set(self.independent_vars)
-
-    def __set_param_names(self, param_names):
-        # used when models are added
-        self.param_names = param_names
-
-    def params(self):
-        """Return a blank copy of model params.
-
-        Example
-        -------
-        >>> params = my_model.params()
-        >>> params['N'].value = 1.0  # initial guess
-        >>> params['tau'].value = 2.0  # initial guess
-        >>> params['tau'].min = 0  # (optional) lower bound
+        fname = self.func.__name__
+        for arg in self.independent_vars:
+            if arg not in allargs or arg in self._forbidden_args:
+                raise ValueError(self._invalid_ivar % (arg, fname))
+        for arg in self.param_names:
+            if arg not in allargs or arg in self._forbidden_args:
+                raise ValueError(self._invalid_par % (arg, fname))
+
+        names = []
+        for pname in self.param_names:
+            if not pname.startswith(self.prefix):
+                pname = "%s%s" % (self.prefix, pname)
+            names.append(pname)
+        self.param_names = set(names)
+        for name in self.param_names:
+            self.params.add(name)
+        for key, val in def_vals.items():
+            self.set_paramval(key, val)
+
+    def guess_starting_values(self, data=None, **kws):
+        """stub for guess starting values --
+        should be implemented for each model subclass
         """
-        params = Parameters()
-        [params.add(name) for name in self.param_names]
-        return params
+        cname = self.__class__.__name__
+        msg = 'guess_starting_values() not implemented for %s' % cname
+        raise NotImplementedError(msg)
 
     def _build_residual(self):
         "Generate and return a residual function."
-        def residual(params, *args, **kwargs):
-            # Unpack Parameter objects into simple key -> value pairs,
-            # and combine them with any non-parameter kwargs.
-            data, weights = args
-            params = dict([(name, p.value) for name, p in params.items()])
-            kwargs = dict(list(params.items()) + list(kwargs.items()))
-            f = self.func(**kwargs)
-            if weights is None:
-                e = f - data
-            else:
-                e = (f - data)*weights
-            return np.asarray(e)  # for compatibility with pandas.Series
-
+        def residual(params, data, weights, **kwargs):
+            "default residual:  (data-model)*weights"
+            diff = self.eval(params=params, **kwargs) - data
+            if weights is not None:
+                diff *= weights
+            return np.asarray(diff)  # for compatibility with pandas.Series
         return residual
 
+    def make_funcargs(self, params, kwargs):
+        """convert parameter values and keywords to function arguments"""
+        out = {}
+        out.update(self.opts)
+        npref = len(self.prefix)
+        for name, par in params.items():
+            if npref > 0 and name.startswith(self.prefix):
+                name = name[npref:]
+            if name in self.func_allargs or self.func_haskeywords:
+                out[name] = par.value
+        for name, val in kwargs.items():
+            if name in self.func_allargs or self.func_haskeywords:
+                out[name] = val
+                if name in params:
+                    params[name].value = val
+        if self.func_haskeywords and self.components is not None:
+            out['__components__'] = self.components
+        return out
+
     def _handle_missing(self, data):
+        "handle missing data"
         if self.missing == 'raise':
             if np.any(isnull(data)):
                 raise ValueError("Data contains a null value.")
@@ -131,7 +186,31 @@ class Model(object):
             mask = np.asarray(mask)  # for compatibility with pandas.Series
             return mask
 
-    def fit(self, data, params=None, weights=None, **kwargs):
+    def set_paramval(self, paramname, value, min=None, max=None, vary=True):
+        """set parameter value, as for initial guess.
+        name can include prefix or not
+        """
+        pname = paramname
+        if pname not in self.params:
+            pname = "%s%s" % (self.prefix, pname)
+        if pname not in self.params:
+            raise KeyError("'%s' not a parameter name" % pname)
+        self.params[pname].value = value
+        self.params[pname].vaary = vary
+        if min is not None:
+            self.params[pname].min = min
+        if max is not None:
+            self.params[pname].max = max
+
+    def eval(self, params=None, **kwargs):
+        """evaluate the model with the supplied or current parameters"""
+        if params is None:
+            params = self.params
+        fcnargs = self.make_funcargs(params, kwargs)
+        return self.func(**fcnargs)
+
+    def fit(self, data, params=None, weights=None, method='leastsq',
+            iter_cb=None, scale_covar=True, **kwargs):
         """Fit the model to the data.
 
         Parameters
@@ -140,6 +219,9 @@ class Model(object):
         params: Parameters object, optional
         weights: array-like of same size as data
             used for weighted fit
+        method: fitting method to use (default = 'leastsq')
+        iter_cb:  None or callable  callback function to call at each iteration.
+        scale_covar:  bool (default True) whether to auto-scale covariance matrix
         keyword arguments: optional, named like the arguments of the
             model function, will override params. See examples below.
 
@@ -156,7 +238,6 @@ class Model(object):
         >>> result = my_model.fit(data, tau=5, N=3, t=t)
 
         # Or, for more control, pass a Parameters object.
-        # See docstring for Model.params()
         >>> result = my_model.fit(data, params, t=t)
 
         # Keyword arguments override Parameters.
@@ -169,7 +250,7 @@ class Model(object):
 
         """
         if params is None:
-            params = self.params()
+            params = self.params
         else:
             params = copy.deepcopy(params)
 
@@ -195,16 +276,18 @@ class Model(object):
                               "It will be ignored.", UserWarning)
 
         # If any parameter is not initialized raise a more helpful error.
-        missing_param = set(params.keys()) != self.param_names
-        blank_param = any([p.value is None for p in params.values()])
+        missing_param = any([p not in params.keys()
+                             for p in self.param_names])
+        blank_param = any([(p.value is None and p.expr is None)
+                           for p in params.values()])
         if missing_param or blank_param:
-            raise ValueError("Assign each parameter an initial value by " +
-                             "passing Parameters or keyword arguments to " +
-                             "fit().")
+            raise ValueError("""Assign each parameter an initial value by
+ passing Parameters or keyword arguments to fit""")
+
 
         # Handle null/missing values.
         mask = None
-        if self.missing != 'none':
+        if self.missing not in (None, 'none'):
             mask = self._handle_missing(data)  # This can raise.
             if mask is not None:
                 data = data[mask]
@@ -217,24 +300,31 @@ class Model(object):
             if not np.isscalar(self.independent_vars):  # just in case
                 kwargs[var] = _align(kwargs[var], mask, data)
 
-        result = minimize(self._residual, params,
-                          args=(data, weights), kws=kwargs)
+        kwargs['__components__'] = self.components
+        result = minimize(self._residual, params, args=(data, weights),
+                          method=method, iter_cb=iter_cb,
+                          scale_covar=scale_covar, kws=kwargs)
 
         # Monkey-patch the Minimizer object with some extra information.
         result.model = self
         result.init_params = init_params
-        result.init_values = dict([(name, p.value) for name, p
-                                  in init_params.items()])
-        indep_vars = dict([(k, v) for k, v in kwargs.items() if k in
-                           self.independent_vars])
-        evaluation_kwargs = dict(list(indep_vars.items()) +
-                                 list(result.init_values.items()))
-        result.init_fit = self.func(**evaluation_kwargs)
-        evaluation_kwargs = dict(list(indep_vars.items()) +
-                                 list(result.values.items()))
-        result.best_fit = self.func(**evaluation_kwargs)
+        result.init_values = self.make_funcargs(init_params, {})
+        if '__components__' in result.init_values:
+            result.init_values.pop('__components__')
+        result.init_fit = self.eval(params=init_params, **kwargs)
+        result.best_fit = self.eval(params=result.params, **kwargs)
+        self.result = result
         return result
 
+    def fit_report(self, modelpars=None, show_correl=True, min_correl=0.1):
+        "return fit report"
+        if self.result is None:
+            raise ValueError("must run .fit() first")
+
+        return fit_report(self.result, modelpars=modelpars,
+                          show_correl=show_correl,
+                          min_correl=min_correl)
+
     def __add__(self, other):
         colliding_param_names = self.param_names & other.param_names
         if len(colliding_param_names) != 0:
@@ -243,15 +333,40 @@ class Model(object):
                             "%s. Redefine the models " % collision +
                             "with distinct names.")
 
-        def func(**kwargs):
-            self_kwargs = dict([(k, kwargs.get(k)) for k in
-                                self.param_names | set(self.independent_vars)])
-            other_kwargs = dict([(k, kwargs.get(k)) for k in
-                                 other.param_names | set(other.independent_vars)])
-            return self.func(**self_kwargs) + other.func(**other_kwargs)
-
-        model = Model(func=func,
-                      independent_vars=self.independent_vars,
-                      missing=self.missing)
-        model.__set_param_names(self.param_names | other.param_names)
-        return model
+        def composite_func(**kwargs):
+            "composite model function"
+            components = kwargs.get('__components__', None)
+            out = None
+            if components is not None:
+                for comp in components:
+                    pars = Parameters()
+                    prefix = comp.prefix
+                    for p in self.params.values():
+                        if p.name.startswith(prefix):
+                            pars.__setitem__(p.name, p)
+                            pars[p.name].value = kwargs[p.name]
+
+                    fcnargs = comp.make_funcargs(pars, kwargs)
+                    comp_out = comp.func(**fcnargs)
+                    if out is None:
+                        out = np.zeros_like(comp_out)
+                    out += comp_out
+            return out
+
+        components = self.components
+        if components is None:
+            components = [self]
+        if other.components is None:
+            components.append(other)
+        else:
+            components.extend(other.components)
+        all_params = self.params
+        for key, val in other.params.items():
+            all_params[key] = val
+
+        out = Model(func=composite_func, independent_vars=self.independent_vars,
+                    param_names=self.param_names | other.param_names,
+                    missing=self.missing, components=components)
+        out.components = components
+        out.params = all_params
+        return out
diff --git a/lmfit/models.py b/lmfit/models.py
new file mode 100644
index 0000000..f2a514a
--- /dev/null
+++ b/lmfit/models.py
@@ -0,0 +1,334 @@
+import numpy as np
+from .model import Model
+from .parameter import Parameter
+
+from .lineshapes import (gaussian, lorentzian, voigt, pvoigt, pearson7,
+                         step, rectangle, breit_wigner, logistic,
+                         students_t, lognormal, damped_oscillator,
+                         expgaussian, donaich, skewed_voigt, exponential,
+                         powerlaw, linear, parabolic)
+
+class DimensionalError(Exception):
+    pass
+
+def _validate_1d(independent_vars):
+    if len(independent_vars) != 1:
+        raise DimensionalError(
+            "This model requires exactly one independent variable.")
+
+def index_of(arr, val):
+    """return index of array nearest to a value
+    """
+    if val < min(arr):
+        return 0
+    return np.abs(arr-val).argmin()
+
+def estimate_peak(y, x, negative):
+    "estimate amp, cen, sigma for a peak"
+    if x is None:
+        return 1.0, 0.0, 1.0
+    maxy, miny = max(y), min(y)
+    maxx, minx = max(x), min(x)
+    imaxy = index_of(y, maxy)
+    cen = x[imaxy]
+    amp = (maxy - miny)*2.0
+    sig = (maxx-minx)/6.0
+
+    halfmax_vals = np.where(y > (maxy+miny)/2.0)[0]
+    if negative:
+        imaxy = index_of(y, miny)
+        amp = -(maxy - miny)*2.0
+        halfmax_vals = np.where(y < (maxy+miny)/2.0)[0]
+    if len(halfmax_vals) > 2:
+        sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0
+        cen = x[halfmax_vals].mean()
+    return amp*sig, cen, sig
+
+COMMON_DOC = """
+
+Parameters
+----------
+independent_vars: list of strings to be set as variable names
+missing: None, 'drop', or 'raise'
+    None: Do not check for null or missing values.
+    'drop': Drop null or missing observations in data.
+        Use pandas.isnull if pandas is available; otherwise,
+        silently fall back to numpy.isnan.
+    'raise': Raise a (more helpful) exception when data contains null
+        or missing values.
+suffix: string to append to paramter names, needed to add two Models that
+    have parameter names in common. None by default.
+"""
+class ConstantModel(Model):
+    __doc__ = "x -> c" + COMMON_DOC
+    def __init__(self, **kwargs):
+        def func(x, c):
+            return c
+        super(ConstantModel, self).__init__(func, **kwargs)
+
+    def guess_starting_values(self, data, **kwargs):
+        self.set_paramval('c', data.mean())
+        self.has_initial_guess = True
+
+class LinearModel(Model):
+    __doc__ = linear.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(LinearModel, self).__init__(linear, **kwargs)
+
+    def guess_starting_values(self, data, x=None, **kwargs):
+        sval, oval = 0., 0.
+        if x is not None:
+            sval, oval = np.polyfit(x, data, 1)
+        self.set_paramval('intercept', oval)
+        self.set_paramval('sslope' , sval)
+        self.has_initial_guess = True
+
+class QuadraticModel(Model):
+    __doc__ = parabolic.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(QuadraticModel, self).__init__(parabolic, **kwargs)
+
+    def guess_starting_values(self, data, x=None, **kwargs):
+        a, b, c = 0., 0., 0.
+        if x is not None:
+            a, b, c = np.polyfit(x, data, 2)
+        self.set_paramval('a', a)
+        self.set_paramval('b', b)
+        self.set_paramval('c', c)
+        self.has_initial_guess = True
+
+ParabolicModel = QuadraticModel
+
+class PolynomialModel(Model):
+    __doc__ = "x -> c0 + c1 * x + c2 * x**2 + ... c7 * x**7" + COMMON_DOC
+    MAX_DEGREE=7
+    DEGREE_ERR = "degree must be an integer less than %d."
+    def __init__(self, degree, **kwargs):
+        if not isinstance(degree, int)  or degree > self.MAX_DEGREE:
+            raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE)
+
+        self.poly_degree = degree
+        pnames = ['c%i' % (i) for i in range(degree + 1)]
+        kwargs['param_names'] = pnames
+
+        def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
+            out = np.zeros_like(x)
+            args = dict(c0=c0, c1=c1, c2=c2, c3=c3,
+                        c4=c4, c5=c5, c6=c6, c7=c7)
+            for i in range(self.poly_degree+1):
+                out += x**i * args.get('c%i' % i, 0)
+            return out
+        super(PolynomialModel, self).__init__(polynomial, **kwargs)
+
+    def guess_starting_values(self, data, x=None, **kws):
+        coefs = np.zeros(self.MAX_DEGREE+1)
+        if x is not None:
+            out = np.polyfit(x, data, self.poly_degree)
+            for i, coef in enumerate(out[::-1]):
+                coefs[i] = coef
+        for i in range(self.poly_degree+1):
+            self.set_paramval('c%i' % (i), coefs[i])
+        self.has_initial_guess = True
+
+class GaussianModel(Model):
+    __doc__ = gaussian.__doc__ + COMMON_DOC
+    fwhm_factor = 2.354820
+    def __init__(self, **kwargs):
+        super(GaussianModel, self).__init__(gaussian, **kwargs)
+        self.params.add('%sfwhm' % self.prefix,
+                        expr='%.6f*%ssigma' % (self.fwhm_factor, self.prefix))
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.set_paramval('amplitude', amp)
+        self.set_paramval('center', cen)
+        self.set_paramval('sigma', sig)
+        self.has_initial_guess = True
+
+class LorentzianModel(Model):
+    __doc__ = lorentzian.__doc__ + COMMON_DOC
+    fwhm_factor = 2.0
+    def __init__(self, **kwargs):
+        super(LorentzianModel, self).__init__(lorentzian, **kwargs)
+        self.params.add('%sfwhm' % self.prefix,
+                        expr='%.7f*%ssigma' % (self.fwhm_factor,
+                                               self.prefix))
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.set_paramval('amplitude', amp)
+        self.set_paramval('center', cen)
+        self.set_paramval('sigma', sig)
+        self.has_initial_guess = True
+
+class VoigtModel(Model):
+    __doc__ = voigt.__doc__ + COMMON_DOC
+    fwhm_factor = 3.60131
+    def __init__(self, **kwargs):
+        super(VoigtModel, self).__init__(voigt, **kwargs)
+        self.params.add('%sfwhm' % self.prefix,
+                        expr='%.7f*%ssigma' % (self.fwhm_factor,
+                                               self.prefix))
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.set_paramval('amplitude', amp)
+        self.set_paramval('center', cen)
+        self.set_paramval('sigma', sig)
+        self.params['%sgamma' % self.prefix] = \
+                              Parameter(expr = '%ssigma' % self.prefix)
+        self.has_initial_guess = True
+
+class PseudoVoigtModel(Model):
+    __doc__ = pvoigt.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(PseudoVoigtModel, self).__init__(pvoigt, **kwargs)
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.set_paramval('amplitude', amp)
+        self.set_paramval('center', cen)
+        self.set_paramval('sigma', sig)
+        self.set_paramval('fraction', 0.5)
+        self.has_initial_guess = True
+
+
+class Pearson7Model(Model):
+    __doc__ = pearson7.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(Pearson7Model, self).__init__(pearson7, **kwargs)
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.set_paramval('amplitude', amp)
+        self.set_paramval('center', cen)
+        self.set_paramval('sigma', sig)
+        self.set_paramval('exponent', 0.5)
+        self.has_initial_guess = True
+
+
+class StudentsTModel(Model):
+    __doc__ = students_t.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(StudentsTModel, self).__init__(students_t, **kwargs)
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.params['%samplitude' % self.prefix].value = amp
+        self.params['%scenter' % self.prefix].value = cen
+        self.params['%ssigma' % self.prefix].value = sig
+        self.has_initial_guess = True
+
+class BrietWignerModel(Model):
+    __doc__ = breit_wigner.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(BreitWignerModel, self).__init__(breit_wigner, **kwargs)
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.params['%samplitude' % self.prefix].value = amp
+        self.params['%scenter' % self.prefix].value = cen
+        self.params['%ssigma' % self.prefix].value = sig
+        self.params['%sq' % self.prefix].value = 1.0
+        self.has_initial_guess = True
+
+class DampedOscillatorModel(Model):
+    __doc__ = damped_oscillator.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(DampedOscillatorModel, self).__init__(damped_oscillator, **kwargs)
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.params['%samplitude' % self.prefix].value = amp
+        self.params['%scenter' % self.prefix].value = cen
+        self.params['%ssigma' % self.prefix].value = sig
+        self.has_initial_guess = True
+
+class ExponentialGaussianModel(Model):
+    __doc__ = expgaussian.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(ExponentialGaussianModel, self).__init__(expgaussian, **kwargs)
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.params['%samplitude' % self.prefix].value = amp
+        self.params['%scenter' % self.prefix].value = cen
+        self.params['%ssigma' % self.prefix].value = sig
+        self.has_initial_guess = True
+
+
+class DonaichModel(Model):
+    __doc__ = donaich.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(DonaichModel, self).__init__(donaich, **kwargs)
+
+    def guess_starting_values(self, data, x=None, negative=False, **kwargs):
+        amp, cen, sig = estimate_peak(data, x, negative)
+        self.params['%samplitude' % self.prefix].value = amp
+        self.params['%scenter' % self.prefix].value = cen
+        self.params['%ssigma' % self.prefix].value = sig
+        self.has_initial_guess = True
+
+
+class PowerLawModel(Model):
+    __doc__ = powerlaw.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(PowerLawModel, self).__init__(powerlaw, **kwargs)
+
+    def guess_starting_values(self, data, x=None, **kws):
+        try:
+            expon, amp = np.polyfit(log(x+1.e-14), log(data+1.e-14), 1)
+        except:
+            expon, amp = 1, np.log(abs(max(data)+1.e-9))
+        self.params['%samplitude' % self.prefix].value = np.exp(amp)
+        self.params['%sexponent' % self.prefix].value = expon
+        self.has_initial_guess = True
+
+class ExponentialModel(Model):
+    __doc__ = exponential.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(ExponentialModel, self).__init__(exponential, **kwargs)
+
+    def guess_starting_values(self, data, x=None, **kws):
+        try:
+            sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
+        except:
+            sval, oval = 1., np.log(abs(max(data)+1.e-9))
+        self.params['%samplitude' % self.prefix].value = np.exp(oval)
+        self.params['%sdecay' % self.prefix].value = -1/sval
+        self.has_initial_guess = True
+
+class StepModel(Model):
+    __doc__ = step.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(StepModel, self).__init__(step, **kwargs)
+
+    def guess_starting_values(self, data, x=None, **kws):
+        if x is None:
+            return
+        ymin, ymax = min(data), max(data)
+        xmin, xmax = min(x), max(x)
+        self.set_paramval('amplitude', (ymax-ymin))
+        self.set_paramval('center',    (xmax+xmin)/2.0)
+        self.set_paramval('sigma',     (xmax-xmin)/7.0)
+        self.has_initial_guess = True
+
+class RectangleModel(Model):
+    __doc__ = rectangle.__doc__ + COMMON_DOC
+    def __init__(self, **kwargs):
+        super(RectangleModel, self).__init__(rectangle, **kwargs)
+        self.params.add('%smidpoint' % self.prefix,
+                        expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
+                                                            self.prefix))
+    def guess_starting_values(self, data, x=None, **kws):
+        if x is None:
+            return
+        ymin, ymax = min(data), max(data)
+        xmin, xmax = min(x), max(x)
+        self.set_paramval('amplitude', (ymax-ymin))
+        self.set_paramval('center1',   (xmax+xmin)/4.0)
+        self.set_paramval('sigma1' ,   (xmax-xmin)/7.0)
+        self.set_paramval('center2', 3*(xmax+xmin)/4.0)
+        self.set_paramval('sigma2',    (xmax-xmin)/7.0)
+        self.has_initial_guess = True
+
diff --git a/lmfit/models1d.py b/lmfit/old_models1d.py
similarity index 100%
rename from lmfit/models1d.py
rename to lmfit/old_models1d.py
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index a49c36b..6596ff4 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -24,7 +24,7 @@ class Parameters(OrderedDict):
     add_many()
     """
     def __init__(self, *args, **kwds):
-        OrderedDict.__init__(self)
+        super(Parameters, self).__init__(self)
         self.update(*args, **kwds)
 
     def __setitem__(self, key, value):
@@ -81,6 +81,7 @@ class Parameter(object):
         self.deps   = None
         self.stderr = None
         self.correl = None
+        self.from_internal = lambda val: val
         self._init_bounds()
 
     def _init_bounds(self):
@@ -171,7 +172,10 @@ class Parameter(object):
         """get value, with bounds applied"""
         if (self._val is not nan and
             isinstance(self._val, uncertainties.Variable)):
-            self._val = self._val.nominal_value
+            try:
+                self._val = self._val.nominal_value
+            except AttributeError:
+                pass
 
         if self.min is None:
             self.min = -inf
diff --git a/lmfit/printfuncs.py b/lmfit/printfuncs.py
index 1a04fb2..f74c25c 100644
--- a/lmfit/printfuncs.py
+++ b/lmfit/printfuncs.py
@@ -18,34 +18,63 @@ Changes:
 """
 
 from __future__ import print_function
-
-
-def fit_report(params, modelpars=None, show_correl=True, min_correl=0.1):
+from .parameter import Parameters
+
+def getfloat_attr(obj, attr, fmt='%.3f'):
+    "format an attribute of an object for printing"
+    val = getattr(obj, attr, None)
+    if val is None:
+        return 'unknown'
+    if isinstance(val, int):
+        return '%d' % val
+    if isinstance(val, float):
+        return fmt % val
+    else:
+        return repr(val)
+
+CORREL_HEAD = '[[Correlations]] (unreported correlations are < % .3f)'
+
+def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1):
     """return text of a report for fitted params best-fit values,
     uncertainties and correlations
 
     arguments
     ----------
-       params       Parameters from fit
+       inpars       Parameters from fit or Minizer object returned from a fit.
        modelpars    Optional Known Model Parameters [None]
        show_correl  whether to show list of sorted correlations [True]
        min_correl   smallest correlation absolute value to show [0.1]
 
     """
+    if isinstance(inpars, Parameters):
+        result, params = None, inpars
+    if hasattr(inpars, 'params'):
+        result = inpars
+        params = inpars.params
+
+    parnames = sorted(params)
     parnames = sorted(params)
     buff = []
     add = buff.append
+    if result is not None:
+        add("[[Fit Statistics]]")
+        add("    # function evals   = %s" % getfloat_attr(result, 'nfev'))
+        add("    # data points      = %s" % getfloat_attr(result, 'ndata'))
+        add("    # variables        = %s" % getfloat_attr(result, 'nvarys'))
+        add("    chi-square         = %s" % getfloat_attr(result, 'chisqr'))
+        add("    reduced chi-square = %s" % getfloat_attr(result, 'redchi'))
+
     namelen = max([len(n) for n in parnames])
     add("[[Variables]]")
     for name in parnames:
         par = params[name]
         space = ' '*(namelen+2 - len(name))
-        nout = " %s: %s" % (name, space)
-        initval = 'inital = ?'
+        nout = "%s: %s" % (name, space)
+        inval = 'inital = ?'
         if par.init_value is not None:
-            initval = 'initial = % .7g' % par.init_value
+            inval = 'initial = % .7g' % par.init_value
         if modelpars is not None and name in modelpars:
-            initval = '%s, model_value =% .7g' % (initval, modelpars[name].value)
+            inval = '%s, model_value =% .7g' % (inval, modelpars[name].value)
 
         try:
             sval = '% .7g' % par.value
@@ -60,14 +89,14 @@ def fit_report(params, modelpars=None, show_correl=True, min_correl=0.1):
                 pass
 
         if par.vary:
-            add("    %s %s %s" % (nout, sval, initval))
+            add("    %s %s %s" % (nout, sval, inval))
         elif par.expr is not None:
             add("    %s %s == '%s'" % (nout, sval, par.expr))
         else:
             add("    %s % .7g (fixed)" % (nout, par.value))
 
     if show_correl:
-        add('[[Correlations]] (unreported correlations are < % .3f)' % min_correl)
+        add(CORREL_HEAD % min_correl)
         correls = {}
         for i, name in enumerate(parnames):
             par = params[name]
@@ -75,14 +104,13 @@ def fit_report(params, modelpars=None, show_correl=True, min_correl=0.1):
                 continue
             if hasattr(par, 'correl') and par.correl is not None:
                 for name2 in parnames[i+1:]:
-                    if name != name2 and name2 in par.correl:
+                    if (name != name2 and name2 in par.correl and
+                        par.correl[name2] > min_correl):
                         correls["%s, %s" % (name, name2)] = par.correl[name2]
 
         sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
         sort_correl.reverse()
         for name, val in sort_correl:
-            if abs(val) < min_correl:
-                break
             lspace = max(1, 25 - len(name))
             add('    C(%s)%s = % .3f ' % (name, (' '*30)[:lspace], val))
     return '\n'.join(buff)
@@ -108,9 +136,11 @@ def ci_report(ci):
     title_shown = False
     for name, row in ci.items():
         if not title_shown:
-            add("".join([''.rjust(maxlen)]+[i.rjust(10) for i in map(convp, row)]))
+            add("".join([''.rjust(maxlen)]+[i.rjust(10)
+                                            for i in map(convp, row)]))
             title_shown = True
-        add("".join([name.rjust(maxlen)]+[i.rjust(10) for i in map(conv,  row)]))
+        add("".join([name.rjust(maxlen)]+[i.rjust(10)
+                                          for i in map(conv,  row)]))
     return '\n'.join(buff)
 
 
diff --git a/lmfit/specified_models.py b/lmfit/specified_models.py
deleted file mode 100644
index 3c7b014..0000000
--- a/lmfit/specified_models.py
+++ /dev/null
@@ -1,196 +0,0 @@
-import numpy as np
-from scipy.special import gamma, gammaln, beta, betaln, erf, erfc, wofz
-from numpy import pi
-from . import Model
-from .utilfuncs import (gaussian, normalized_gaussian, exponential,
-                        powerlaw, linear, parabolic)
-
-
-class DimensionalError(Exception):
-    pass
-
-
-def _validate_1d(independent_vars):
-    if len(independent_vars) != 1:
-        raise DimensionalError(
-            "This model requires exactly one independent variable.")
-
-
-def _suffixer(suffix, coded_param_names):
-    """Return a dictionary relating parmeters' hard-coded names to their
-    (possibly) suffixed names."""
-    if suffix is None:
-        param_names = coded_param_names
-    else:
-        param_names = map(lambda p: p + suffix, coded_param_names)
-    return dict(zip(coded_param_names, param_names))
-
-
-class BaseModel(Model):
-    """Whereas Model takes a user-provided function, BaseModel is
-    subclassed with a hard-coded function."""
-
-    def _parse_params(self):
-        # overrides method of Model that inspects func
-        param_names = _suffixer(self.suffix, self._param_names)
-        self.param_names = set(param_names.values())  # used by Model
-        return param_names  # a lookup dictionary
-
-
-COMMON_DOC = """
-
-Parameters
-----------
-independent_vars: list of strings to be set as variable names
-missing: 'none', 'drop', or 'raise'
-    'none': Do not check for null or missing values.
-    'drop': Drop null or missing observations in data.
-        Use pandas.isnull if pandas is available; otherwise,
-        silently fall back to numpy.isnan.
-    'raise': Raise a (more helpful) exception when data contains null
-        or missing values.
-suffix: string to append to paramter names, needed to add two Models that
-    have parameter names in common. None by default.
-"""
-
-
-class Parabolic(BaseModel):
-    __doc__ = parabolic.__doc__ + COMMON_DOC
-    def __init__(self, independent_vars, missing='none', suffix=None):
-        _validate_1d(independent_vars)
-        var_name, = independent_vars
-        self.suffix = suffix
-        self._param_names = ['a', 'b', 'c']
-        p = self._parse_params()
-        def func(**kwargs):
-            a = kwargs[p['a']]
-            b = kwargs[p['b']]
-            c = kwargs[p['c']]
-            var = kwargs[var_name]
-            return parabolic(var, a, b, c)
-        super(Parabolic, self).__init__(func, independent_vars, missing)
-
-
-Quadratic = Parabolic  # synonym
-
-
-class Linear(BaseModel):
-    __doc__ = linear.__doc__ + COMMON_DOC
-    def __init__(self, independent_vars, missing='none', suffix=None):
-        _validate_1d(independent_vars)
-        var_name, = independent_vars
-        self.suffix = suffix
-        self._param_names = ['slope', 'intercept']
-        p = self._parse_params()
-        def func(**kwargs):
-            slope = kwargs[p['slope']]
-            intercept = kwargs[p['intercept']]
-            var = kwargs[var_name]
-            return linear(var, slope, intercept)
-        super(Linear, self).__init__(func, independent_vars, missing)
-
-
-class Constant(BaseModel):
-    __doc__ = "x -> c" + COMMON_DOC
-    def __init__(self, independent_vars=[], missing='none', suffix=None):
-        # special case with default []
-        self.suffix = suffix
-        self._param_names = ['c']
-        p = self._parse_params()
-        def func(**kwargs):
-            c = kwargs[p['c']]
-            return c
-        super(Constant, self).__init__(func, independent_vars, missing)
-
-
-class Polynomial(BaseModel):
-    __doc__ = "x -> c0 + c1 * x + c2 * x**2 + ..." + COMMON_DOC
-    def __init__(self, order, independent_vars, missing='none', suffix=None):
-        if not isinstance(order, int):
-            raise TypeError("order must be an integer.")
-        _validate_1d(independent_vars)
-        var_name, = independent_vars
-        self.suffix = suffix
-        self._param_names = ['c' + str(i) for i in range(order + 1)]
-        p = self._parse_params()
-        def func(**kwargs):
-            var = kwargs[var_name]
-            return np.sum([kwargs[p[name]]*var**i for
-                           i, name in enumerate(self._param_names)], 0)
-        super(Polynomial, self).__init__(func, independent_vars, missing)
-
-
-class Exponential(BaseModel):
-    __doc__ = exponential.__doc__ + COMMON_DOC
-    def __init__(self, independent_vars, missing='none', suffix=None):
-        _validate_1d(independent_vars)
-        var_name, = independent_vars
-        self.suffix = suffix
-        self._param_names = ['amplitude', 'decay']
-        p = self._parse_params()
-        def func(**kwargs):
-            amplitude = kwargs[p['amplitude']]
-            decay = kwargs[p['decay']]
-            var = kwargs[var_name]
-            return exponential(var, amplitude, decay)
-        super(Exponential, self).__init__(func, independent_vars, missing)
-
-
-class NormalizedGaussian(BaseModel):
-    __doc__ = normalized_gaussian.__doc__ + COMMON_DOC
-    def __init__(self, independent_vars, missing='none', suffix=None):
-        self.dim = len(independent_vars)
-
-        if self.dim == 1:
-            var_name, = independent_vars
-            self.suffix = suffix
-            self._param_names = ['center', 'sigma']
-            p = self._parse_params()
-            def func(**kwargs):
-                center = kwargs[p['center']]
-                sigma = kwargs[p['sigma']]
-                var = kwargs[var_name]
-                return normalized_gaussian(var, center, sigma)
-        else:
-            raise NotImplementedError("I only do 1d gaussians for now.")
-            # TODO: Detect dimensionality from number of independent vars
-        super(NormalizedGaussian, self).__init__(
-            func, independent_vars, missing)
-
-
-class Gaussian(BaseModel):
-    __doc__ = gaussian.__doc__ + COMMON_DOC
-    def __init__(self, independent_vars, missing='none', suffix=None):
-        self.dim = len(independent_vars)
-
-        if self.dim == 1:
-            var_name, = independent_vars
-            self.suffix = suffix
-            self._param_names = ['height', 'center', 'sigma']
-            p = self._parse_params()
-            def func(**kwargs):
-                height = kwargs[p['height']]
-                center = kwargs[p['center']]
-                sigma = kwargs[p['sigma']]
-                var = kwargs[var_name]
-                return gaussian(var, height, center, sigma)
-        else:
-            raise NotImplementedError("I only do 1d gaussians for now.")
-            # TODO: Detect dimensionality from number of independent vars
-        super(Gaussian, self).__init__(func, independent_vars, missing)
-
-
-class PowerLaw(BaseModel):
-    __doc__ = powerlaw.__doc__ + COMMON_DOC
-    def __init__(self, independent_vars, missing='none', suffix=None):
-        _validate_1d(independent_vars)
-        var_name, = independent_vars
-        self.suffix = suffix
-        self._param_names = ['coefficient', 'exponent']
-        p = self._parse_params()
-        def func(**kwargs):
-            coefficient = kwargs[p['coefficient']]
-            exponent = kwargs[p['exponent']]
-            var = kwargs[var_name]
-            return powerlaw(var, coefficient, exponent)
-        super(PowerLaw, self).__init__(func, independent_vars, missing)
diff --git a/lmfit/uncertainties/test_umath.py b/lmfit/uncertainties/test_umath.py
deleted file mode 100644
index 4b1a5af..0000000
--- a/lmfit/uncertainties/test_umath.py
+++ /dev/null
@@ -1,294 +0,0 @@
-"""
-Tests of the code in uncertainties.umath.
-
-These tests can be run through the Nose testing framework.
-
-(c) 2010-2013 by Eric O. LEBIGOT (EOL).
-"""
-
-from __future__ import division
-
-# Standard modules
-import sys
-import math
-
-# Local modules:
-import uncertainties
-import uncertainties.umath as umath
-import test_uncertainties
-
-from uncertainties import __author__
-
-###############################################################################
-# Unit tests
-
-def test_fixed_derivatives_math_funcs():
-    """
-    Comparison between function derivatives and numerical derivatives.
-
-    This comparison is useful for derivatives that are analytical.
-    """
-
-    for name in umath.many_scalars_to_scalar_funcs:
-        # print "Checking %s..." % name
-        func = getattr(umath, name)
-        # Numerical derivatives of func: the nominal value of func() results
-        # is used as the underlying function:
-        numerical_derivatives = uncertainties.NumericalDerivatives(
-            lambda *args: func(*args))
-        test_uncertainties._compare_derivatives(func, numerical_derivatives)
-
-    # Functions that are not in umath.many_scalars_to_scalar_funcs:
-
-    ##
-    # modf(): returns a tuple:
-    def frac_part_modf(x):
-        return umath.modf(x)[0]
-    def int_part_modf(x):
-        return umath.modf(x)[1]
-
-    test_uncertainties._compare_derivatives(
-        frac_part_modf,
-        uncertainties.NumericalDerivatives(
-            lambda x: frac_part_modf(x)))
-    test_uncertainties._compare_derivatives(
-        int_part_modf,
-        uncertainties.NumericalDerivatives(
-            lambda x: int_part_modf(x)))
-
-    ##
-    # frexp(): returns a tuple:
-    def mantissa_frexp(x):
-        return umath.frexp(x)[0]
-    def exponent_frexp(x):
-        return umath.frexp(x)[1]
-
-    test_uncertainties._compare_derivatives(
-        mantissa_frexp,
-        uncertainties.NumericalDerivatives(
-            lambda x: mantissa_frexp(x)))
-    test_uncertainties._compare_derivatives(
-        exponent_frexp,
-        uncertainties.NumericalDerivatives(
-            lambda x: exponent_frexp(x)))
-
-def test_compound_expression():
-    """
-    Test equality between different formulas.
-    """
-
-    x = uncertainties.ufloat((3, 0.1))
-
-    # Prone to numerical errors (but not much more than floats):
-    assert umath.tan(x) == umath.sin(x)/umath.cos(x)
-
-
-def test_numerical_example():
-    "Test specific numerical examples"
-
-    x = uncertainties.ufloat((3.14, 0.01))
-    result = umath.sin(x)
-    # In order to prevent big errors such as a wrong, constant value
-    # for all analytical and numerical derivatives, which would make
-    # test_fixed_derivatives_math_funcs() succeed despite incorrect
-    # calculations:
-    assert ("%.6f +/- %.6f" % (result.nominal_value, result.std_dev())
-            == "0.001593 +/- 0.010000")
-
-    # Regular calculations should still work:
-    assert("%.11f" % umath.sin(3) == "0.14112000806")
-
-def test_monte_carlo_comparison():
-    """
-    Full comparison to a Monte-Carlo calculation.
-
-    Both the nominal values and the covariances are compared between
-    the direct calculation performed in this module and a Monte-Carlo
-    simulation.
-    """
-
-    try:
-        import numpy
-        import numpy.random
-    except ImportError:
-        import warnings
-        warnings.warn("Test not performed because NumPy is not available")
-        return
-
-    # Works on numpy.arrays of Variable objects (whereas umath.sin()
-    # does not):
-    sin_uarray_uncert = numpy.vectorize(umath.sin, otypes=[object])
-
-    # Example expression (with correlations, and multiple variables combined
-    # in a non-linear way):
-    def function(x, y):
-        """
-        Function that takes two NumPy arrays of the same size.
-        """
-        # The uncertainty due to x is about equal to the uncertainty
-        # due to y:
-        return 10 * x**2 - x * sin_uarray_uncert(y**3)
-
-    x = uncertainties.ufloat((0.2, 0.01))
-    y = uncertainties.ufloat((10, 0.001))
-    function_result_this_module = function(x, y)
-    nominal_value_this_module = function_result_this_module.nominal_value
-
-    # Covariances "f*f", "f*x", "f*y":
-    covariances_this_module = numpy.array(uncertainties.covariance_matrix(
-        (x, y, function_result_this_module)))
-
-    def monte_carlo_calc(n_samples):
-        """
-        Calculate function(x, y) on n_samples samples and returns the
-        median, and the covariances between (x, y, function(x, y)).
-        """
-        # Result of a Monte-Carlo simulation:
-        x_samples = numpy.random.normal(x.nominal_value, x.std_dev(),
-                                        n_samples)
-        y_samples = numpy.random.normal(y.nominal_value, y.std_dev(),
-                                        n_samples)
-        function_samples = function(x_samples, y_samples)
-
-        cov_mat = numpy.cov([x_samples, y_samples], function_samples)
-
-        return (numpy.median(function_samples), cov_mat)
-
-    (nominal_value_samples, covariances_samples) = monte_carlo_calc(1000000)
-
-
-    ## Comparison between both results:
-
-    # The covariance matrices must be close:
-
-    # We rely on the fact that covariances_samples very rarely has
-    # null elements:
-
-    assert numpy.vectorize(test_uncertainties._numbers_close)(
-        covariances_this_module,
-        covariances_samples,
-        0.05).all(), (
-        "The covariance matrices do not coincide between"
-        " the Monte-Carlo simulation and the direct calculation:\n"
-        "* Monte-Carlo:\n%s\n* Direct calculation:\n%s"
-        % (covariances_samples, covariances_this_module)
-        )
-
-    # The nominal values must be close:
-    assert test_uncertainties._numbers_close(
-        nominal_value_this_module,
-        nominal_value_samples,
-        # The scale of the comparison depends on the standard
-        # deviation: the nominal values can differ by a fraction of
-        # the standard deviation:
-        math.sqrt(covariances_samples[2, 2])
-        / abs(nominal_value_samples) * 0.5), (
-        "The nominal value (%f) does not coincide with that of"
-        " the Monte-Carlo simulation (%f), for a standard deviation of %f."
-        % (nominal_value_this_module,
-           nominal_value_samples,
-           math.sqrt(covariances_samples[2, 2]))
-        )
-
-
-def test_math_module():
-    "Operations with the math module"
-
-    x = uncertainties.ufloat((-1.5, 0.1))
-
-    # The exponent must not be differentiated, when calculating the
-    # following (the partial derivative with respect to the exponent
-    # is not defined):
-    assert (x**2).nominal_value == 2.25
-
-    # Regular operations are chosen to be unchanged:
-    assert isinstance(umath.sin(3), float)
-
-    # Python >=2.6 functions:
-
-    if sys.version_info >= (2, 6):
-
-        # factorial() must not be "damaged" by the umath module, so as
-        # to help make it a drop-in replacement for math (even though
-        # factorial() does not work on numbers with uncertainties
-        # because it is restricted to integers, as for
-        # math.factorial()):
-        assert umath.factorial(4) == 24
-
-        # Boolean functions:
-        assert not umath.isinf(x)
-
-        # Comparison, possibly between an AffineScalarFunc object and a
-        # boolean, which makes things more difficult for this code:
-        assert umath.isinf(x) == False
-
-        # fsum is special because it does not take a fixed number of
-        # variables:
-        assert umath.fsum([x, x]).nominal_value == -3
-
-    # The same exceptions should be generated when numbers with uncertainties
-    # are used:
-
-    ## !! The Nose testing framework seems to catch an exception when
-    ## it is aliased: "exc = OverflowError; ... except exc:..."
-    ## surprisingly catches OverflowError. So, tests are written in a
-    ## version-specific manner (until the Nose issue is resolved).
-
-    if sys.version_info < (2, 6):
-
-        try:
-            math.log(0)
-        except OverflowError(err_math):  # "as", for Python 2.6+
-            pass
-        else:
-            raise Exception('OverflowError exception expected')
-        try:
-            umath.log(0)
-        except OverflowError(err_ufloat):  # "as", for Python 2.6+
-            assert err_math.args == err_ufloat.args
-        else:
-            raise Exception('OverflowError exception expected')
-        try:
-            umath.log(uncertainties.ufloat((0, 0)))
-        except OverflowError(err_ufloat):  # "as", for Python 2.6+
-            assert err_math.args == err_ufloat.args
-        else:
-            raise Exception('OverflowError exception expected')
-        try:
-            umath.log(uncertainties.ufloat((0, 1)))
-        except OverflowError(err_ufloat):  # "as", for Python 2.6+
-            assert err_math.args == err_ufloat.args
-        else:
-            raise Exception('OverflowError exception expected')
-
-    elif sys.version_info < (3,):
-
-        try:
-            math.log(0)
-        except ValueError(err_math):
-            pass
-        else:
-            raise Exception('ValueError exception expected')
-        try:
-            umath.log(0)
-        except ValueError(err_ufloat):
-            assert err_math.args == err_ufloat.args
-        else:
-            raise Exception('ValueError exception expected')
-        try:
-            umath.log(uncertainties.ufloat((0, 0)))
-        except ValueError(err_ufloat):
-            assert err_math.args == err_ufloat.args
-        else:
-            raise Exception('ValueError exception expected')
-        try:
-            umath.log(uncertainties.ufloat((0, 1)))
-        except ValueError(err_ufloat):
-            assert err_math.args == err_ufloat.args
-        else:
-            raise Exception('ValueError exception expected')
-
-    else:  # Python 3+
-
-        # !!! The tests should be made to work with Python 3 too!
-        pass
diff --git a/lmfit/uncertainties/test_uncertainties.py b/lmfit/uncertainties/test_uncertainties.py
deleted file mode 100644
index 73879e8..0000000
--- a/lmfit/uncertainties/test_uncertainties.py
+++ /dev/null
@@ -1,972 +0,0 @@
-# coding=utf-8
-
-"""
-Tests of the code in uncertainties/__init__.py.
-
-These tests can be run through the Nose testing framework.
-
-(c) 2010-2013 by Eric O. LEBIGOT (EOL).
-"""
-
-from __future__ import division, print_function
-
-# Standard modules
-import copy
-import weakref
-import math
-import random
-import sys
-
-# 3rd-party modules
-# import nose.tools
-
-# Local modules
-
-import uncertainties
-from uncertainties import ufloat, AffineScalarFunc, umath
-
-from uncertainties import __author__
-
-# The following information is useful for making sure that the right
-# version of Python is running the tests (for instance with the Travis
-# Continuous Integration system):
-# print "Testing with Python", sys.version
-
-###############################################################################
-
-# Utilities for unit testing
-
-def _numbers_close(x, y, tolerance=1e-6):
-    """
-    Returns True if the given (real) numbers are close enough.
-
-    The given tolerance is the relative difference allowed, or the absolute
-    difference, if one of the numbers is 0.
-    """
-
-    # Instead of using a try and ZeroDivisionError, we do a test,
-    # NaN could appear silently:
-
-    if x != 0 and y != 0:
-        return abs(1-y/x) < tolerance
-    else:
-        if x == 0:
-            return abs(y) < tolerance
-        else:
-            return abs(x) < tolerance
-
-class DerivativesDiffer(Exception):
-    pass
-
-
-def _compare_derivatives(func, numerical_derivatives,
-                         num_args_list=None):
-    """
-    Checks the derivatives of a function 'func' (as returned by the
-    wrap() wrapper), by comparing them to the
-    'numerical_derivatives' functions.
-
-    Raises a DerivativesDiffer exception in case of problem.
-
-    These functions all take the number of arguments listed in
-    num_args_list.  If num_args is None, it is automatically obtained.
-
-    Tests are done on random arguments.
-    """
-
-    # print "Testing", func.__name__
-
-    if not num_args_list:
-
-        # Detecting automatically the correct number of arguments is not
-        # always easy (because not all values are allowed, etc.):
-
-        num_args_table = {
-            'atanh': [1],
-            'log': [1, 2]  # Both numbers of arguments are tested
-            }
-        if func.__name__ in num_args_table:
-            num_args_list = num_args_table[func.__name__]
-        else:
-
-            num_args_list = []
-
-            # We loop until we find reasonable function arguments:
-            # We get the number of arguments by trial and error:
-            for num_args in range(10):
-                try:
-                    #! Giving integer arguments is good for preventing
-                    # certain functions from failing even though num_args
-                    # is their correct number of arguments
-                    # (e.g. math.ldexp(x, i), where i must be an integer)
-                    func(*(1,)*num_args)
-                except TypeError:
-                    pass  # Not the right number of arguments
-                else:  # No error
-                    # num_args is a good number of arguments for func:
-                    num_args_list.append(num_args)
-
-            if not num_args_list:
-                raise Exception("Can't find a reasonable number of arguments"
-                                " for function '%s'." % func.__name__)
-
-    for num_args in num_args_list:
-
-        # Argument numbers that will have a random integer value:
-        integer_arg_nums = set()
-
-        if func.__name__ == 'ldexp':
-            # The second argument must be an integer:
-            integer_arg_nums.add(1)
-
-        while True:
-            try:
-
-                # We include negative numbers, for more thorough tests:
-                args = [
-                    random.choice(range(-10, 10))
-                    if arg_num in integer_arg_nums
-                    else uncertainties.Variable(random.random()*4-2, 0)
-                    for arg_num in range(num_args)]
-
-                # 'args', but as scalar values:
-                args_scalar = [uncertainties.nominal_value(v)
-                               for v in args]
-
-                func_approx = func(*args)
-
-                # Some functions yield simple Python constants, after
-                # wrapping in wrap(): no test has to be performed.
-                # Some functions also yield tuples...
-                if isinstance(func_approx, AffineScalarFunc):
-
-                    # We compare all derivatives:
-                    for (arg_num, (arg, numerical_deriv)) in (
-                        enumerate(zip(args, numerical_derivatives))):
-
-                        # Some arguments might not be differentiable:
-                        if isinstance(arg, int):
-                            continue
-
-                        fixed_deriv_value = func_approx.derivatives[arg]
-
-                        num_deriv_value = numerical_deriv(*args_scalar)
-
-                        # This message is useful: the user can see that
-                        # tests are really performed (instead of not being
-                        # performed, silently):
-                        print( "Testing %s at %s, arg #%d" % (
-                            func.__name__, args, arg_num))
-
-                        if not _numbers_close(fixed_deriv_value,
-                                              num_deriv_value, 1e-4):
-
-                            # It is possible that the result is NaN:
-
-                            # ! Python 2.6+: this would be
-                            # not math.isnan(func_approx):
-                            if func_approx == func_approx:
-                                raise DerivativesDiffer(
-                                    "Derivative #%d of function '%s' may be"
-                                    " wrong: at args = %s,"
-                                    " value obtained = %.16f,"
-                                    " while numerical approximation = %.16f."
-                                    % (arg_num, func.__name__, args,
-                                       fixed_deriv_value, num_deriv_value))
-
-            except ValueError(err):  # Arguments out of range, or of wrong type
-                # Factorial(real) lands here:
-                if str(err).startswith('factorial'):
-                    integer_arg_nums = set([0])
-                continue  # We try with different arguments
-            # Some arguments might have to be integers, for instance:
-            except TypeError:
-                if len(integer_arg_nums) == num_args:
-                    raise Exception("Incorrect testing procedure: unable to "
-                                    "find correct argument values for %s."
-                                    % func.__name__)
-
-                # Another argument might be forced to be an integer:
-                integer_arg_nums.add(random.choice(range(num_args)))
-            else:
-                # We have found reasonable arguments, and the test passed:
-                break
-
-###############################################################################
-
-# Test of correctness of the fixed (usually analytical) derivatives:
-def test_fixed_derivatives_basic_funcs():
-    """
-    Pre-calculated derivatives for operations on AffineScalarFunc.
-    """
-
-    def check_op(op, num_args):
-        """
-        Makes sure that the derivatives for function '__op__' of class
-        AffineScalarFunc, which takes num_args arguments, are correct.
-
-        If num_args is None, a correct value is calculated.
-        """
-
-        op_string = "__%s__" % op
-        func = getattr(AffineScalarFunc, op_string)
-        numerical_derivatives = uncertainties.NumericalDerivatives(
-            # The __neg__ etc. methods of AffineScalarFunc only apply,
-            # by definition, to AffineScalarFunc objects: we first map
-            # possible scalar arguments (used for calculating
-            # derivatives) to AffineScalarFunc objects:
-            lambda *args: func(*map(uncertainties.to_affine_scalar, args)))
-        _compare_derivatives(func, numerical_derivatives, [num_args])
-
-    # Operators that take 1 value:
-    for op in uncertainties._modified_operators:
-        check_op(op, 1)
-
-    # Operators that take 2 values:
-    for op in uncertainties._modified_ops_with_reflection:
-        check_op(op, 2)
-
-# Additional, more complex checks, for use with the nose unit testing
-# framework.
-
-def test_copy():
-    "Standard copy module integration"
-    import gc
-
-    x = ufloat((3, 0.1))
-    assert x == x
-
-    y = copy.copy(x)
-    assert x != y
-    assert not(x == y)
-    assert y in y.derivatives.keys()  # y must not copy the dependence on x
-
-    z = copy.deepcopy(x)
-    assert x != z
-
-    # Copy tests on expressions:
-    t = x + 2*z
-    # t depends on x:
-    assert x in t.derivatives
-
-    # The relationship between the copy of an expression and the
-    # original variables should be preserved:
-    t_copy = copy.copy(t)
-    # Shallow copy: the variables on which t depends are not copied:
-    assert x in t_copy.derivatives
-    assert (uncertainties.covariance_matrix([t, z]) ==
-            uncertainties.covariance_matrix([t_copy, z]))
-
-    # However, the relationship between a deep copy and the original
-    # variables should be broken, since the deep copy created new,
-    # independent variables:
-    t_deepcopy = copy.deepcopy(t)
-    assert x not in t_deepcopy.derivatives
-    assert (uncertainties.covariance_matrix([t, z]) !=
-            uncertainties.covariance_matrix([t_deepcopy, z]))
-
-    # Test of implementations with weak references:
-
-    # Weak references: destroying a variable should never destroy the
-    # integrity of its copies (which would happen if the copy keeps a
-    # weak reference to the original, in its derivatives member: the
-    # weak reference to the original would become invalid):
-    del x
-
-    gc.collect()
-
-    assert y in y.derivatives.keys()
-
-def test_pickling():
-    "Standard pickle module integration."
-
-    import pickle
-
-    x = ufloat((2, 0.1))
-
-    x_unpickled = pickle.loads(pickle.dumps(x))
-
-    assert x != x_unpickled  # Pickling creates copies
-
-    ## Tests with correlations and AffineScalarFunc objects:
-    f = 2*x
-    assert isinstance(f, AffineScalarFunc)
-    (f_unpickled, x_unpickled2) = pickle.loads(pickle.dumps((f, x)))
-    # Correlations must be preserved:
-    assert f_unpickled - x_unpickled2 - x_unpickled2 == 0
-
-
-def test_int_div():
-    "Integer division"
-    # We perform all operations on floats, because derivatives can
-    # otherwise be meaningless:
-    x = ufloat((3.9, 2))//2
-    assert x.nominal_value == 1.
-    # All errors are supposed to be small, so the ufloat()
-    # in x violates the assumption.  Therefore, the following is
-    # correct:
-    assert x.std_dev() == 0.0
-
-def test_comparison_ops():
-    "Test of comparison operators"
-
-    import random
-
-    # Operations on quantities equivalent to Python numbers must still
-    # be correct:
-    a = ufloat((-3, 0))
-    b = ufloat((10, 0))
-    c = ufloat((10, 0))
-    assert a < b
-    assert a < 3
-    assert 3 < b  # This is first given to int.__lt__()
-    assert b == c
-
-    x = ufloat((3, 0.1))
-
-    # One constraint is that usual Python code for inequality testing
-    # still work in a reasonable way (for instance, it is generally
-    # desirable that functions defined by different formulas on
-    # different intervals can still do "if 0 < x < 1:...".  This
-    # supposes again that errors are "small" (as for the estimate of
-    # the standard error).
-    assert x > 1
-
-    # The limit case is not obvious:
-    assert not(x >= 3)
-    assert not(x < 3)
-
-    assert x == x
-    # Comparaison between Variable and AffineScalarFunc:
-    assert x == x + 0
-    # Comparaison between 2 _different_ AffineScalarFunc objects
-    # representing the same value:
-    assert x/2 == x/2
-    # With uncorrelated result that have the same behavior (value and
-    # standard error):
-    assert 2*ufloat((1, 0.1)) != ufloat((2, 0.2))
-    # Comparaison between 2 _different_ Variable objects
-    # that are uncorrelated:
-    assert x != ufloat((3, 0.1))
-
-    assert x != ufloat((3, 0.2))
-
-    # Comparison to other types should work:
-    assert x != None  # Not comparable
-    assert x-x == 0  # Comparable, even though the types are different
-    assert x != [1, 2]
-
-
-    ####################
-
-    # Checks of the semantics of logical operations: they return True
-    # iff they are always True when the parameters vary in an
-    # infinitesimal interval inside sigma (sigma == 0 is a special
-    # case):
-
-    def test_all_comparison_ops(x, y):
-        """
-        Takes two Variable objects.
-
-        Fails if any comparison operation fails to follow the proper
-        semantics: a comparison only returns True if the correspond float
-        comparison results are True for all the float values taken by
-        the variables (of x and y) when they vary in an infinitesimal
-        neighborhood within their uncertainty.
-
-        This test is stochastic: it may, exceptionally, fail for
-        correctly implemented comparison operators.
-        """
-
-        import random
-
-        def random_float(var):
-            """
-            Returns a random value for Variable var, in an
-            infinitesimal interval withing its uncertainty.  The case
-            of a zero uncertainty is special.
-            """
-            return ((random.random()-0.5) * min(var.std_dev(), 1e-5)
-                    + var.nominal_value)
-
-        # All operations are tested:
-        for op in ("__%s__" % name
-                   for name in('ne', 'eq', 'lt', 'le', 'gt', 'ge')):
-
-            float_func = getattr(float, op)
-
-            # Determination of the correct truth value of func(x, y):
-
-            sampled_results = []
-
-            # The "main" value is an important particular case, and
-            # the starting value for the final result
-            # (correct_result):
-
-            sampled_results.append(float_func(x.nominal_value, y.nominal_value))
-
-            for check_num in range(50):  # Many points checked
-                sampled_results.append(float_func(random_float(x),
-                                                  random_float(y)))
-
-            min_result = min(sampled_results)
-            max_result = max(sampled_results)
-
-            if min_result == max_result:
-                correct_result = min_result
-            else:
-
-                # Almost all results must be True, for the final value
-                # to be True:
-                num_min_result = sampled_results.count(min_result)
-
-                # 1 exception is considered OK:
-                correct_result = (num_min_result == 1)
-
-            try:
-                assert correct_result == getattr(x, op)(y)
-            except AssertionError:
-                print( "Sampling results:", sampled_results)
-                raise Exception("Semantic value of %s %s (%s) %s not"
-                                " correctly reproduced."
-                                % (x, op, y, correct_result))
-
-    # With different numbers:
-    test_all_comparison_ops(ufloat((3, 0.1)),
-                            ufloat((-2, 0.1)))
-    test_all_comparison_ops(ufloat((0, 0)),  # Special number
-                            ufloat((1, 1)))
-    test_all_comparison_ops(ufloat((0, 0)),  # Special number
-                            ufloat((0, 0.1)))
-    # With identical numbers:
-    test_all_comparison_ops(ufloat((0, 0)),
-                            ufloat((0, 0)))
-    test_all_comparison_ops(ufloat((1, 1)),
-                            ufloat((1, 1)))
-
-
-def test_logic():
-    "Boolean logic: __nonzero__, bool."
-
-    x = ufloat((3, 0))
-    y = ufloat((0, 0))
-    z = ufloat((0, 0.1))
-    t = ufloat((-1, 2))
-
-    assert bool(x) == True
-    assert bool(y) == False
-    assert bool(z) == True
-    assert bool(t) == True  # Only infinitseimal neighborhood are used
-
-
-
-def test_basic_access_to_data():
-    "Access to data from Variable and AffineScalarFunc objects."
-
-    x = ufloat((3.14, 0.01), "x var")
-    assert x.tag == "x var"
-    assert x.nominal_value == 3.14
-    assert x.std_dev() == 0.01
-
-    # Case of AffineScalarFunc objects:
-    y = x + 0
-    assert type(y) == AffineScalarFunc
-    assert y.nominal_value == 3.14
-    assert y.std_dev() == 0.01
-
-    # Details on the sources of error:
-    a = ufloat((-1, 0.001))
-    y = 2*x + 3*x + 2 + a
-    error_sources = y.error_components()
-    assert len(error_sources) == 2  # 'a' and 'x'
-    assert error_sources[x] == 0.05
-    assert error_sources[a] == 0.001
-
-    # Derivative values should be available:
-    assert y.derivatives[x] == 5
-
-    # Modification of the standard deviation of variables:
-    x.set_std_dev(1)
-    assert y.error_components()[x] == 5  # New error contribution!
-
-    # Calculation of deviations in units of the standard deviations:
-    assert 10/x.std_dev() == x.std_score(10 + x.nominal_value)
-
-    # "In units of the standard deviation" is not always meaningfull:
-    x.set_std_dev(0)
-    try:
-        x.std_score(1)
-    except ValueError:
-        pass  # Normal behavior
-
-def test_correlations():
-    "Correlations between variables"
-
-    a = ufloat((1, 0))
-    x = ufloat((4, 0.1))
-    y = x*2 + a
-    # Correlations cancel "naive" additions of uncertainties:
-    assert y.std_dev() != 0
-    normally_zero = y - (x*2 + 1)
-    assert normally_zero.nominal_value == 0
-    assert normally_zero.std_dev() == 0
-
-def test_str_input():
-
-    "Input of numbers with uncertainties as a string"
-
-    # String representation, and numerical values:
-    tests = {
-        "-1.23(3.4)": (-1.23, 3.4),  # (Nominal value, error)
-        "-1.34(5)": (-1.34, 0.05),
-        "1(6)": (1, 6),
-        "3(4.2)": (3, 4.2),
-        "-9(2)": (-9, 2),
-        "1234567(1.2)": (1234567, 1.2),
-        "12.345(15)": (12.345, 0.015),
-        "-12.3456(78)e-6": (-12.3456e-6, 0.0078e-6),
-        "0.29": (0.29, 0.01),
-        "31.": (31, 1),
-        "-31.": (-31, 1),
-        # The following tests that the ufloat() routine does
-        # not consider '31' like the tuple ('3', '1'), which would
-        # make it expect two numbers (instead of 2 1-character
-        # strings):
-        "31": (31, 1),
-        "-3.1e10": (-3.1e10, 0.1e10),
-        "169.0(7)": (169, 0.7),
-        "-0.1+/-1": (-0.1, 1),
-        "-13e-2+/-1e2": (-13e-2, 1e2),
-        '-14.(15)': (-14, 15),
-        '-100.0(15)': (-100, 1.5),
-        '14.(15)': (14, 15)
-        }
-
-    for (representation, values) in tests.iteritems():
-
-        num = ufloat(representation)
-
-        assert _numbers_close(num.nominal_value, values[0])
-        assert _numbers_close(num.std_dev(), values[1])
-
-
-def test_no_coercion():
-    """
-    Coercion of Variable object to a simple float.
-
-    The coercion should be impossible, like for complex numbers.
-    """
-
-    x = ufloat((4, 1))
-    try:
-        assert float(x) == 4
-    except TypeError:
-        pass
-    else:
-        raise Exception("Conversion to float() should fail with TypeError")
-
-def test_wrapped_func():
-    """
-    Test uncertainty-aware functions obtained through wrapping.
-    """
-
-    # This function can be wrapped so that it works when 'angle' has
-    # an uncertainty (math.cos does not handle numbers with
-    # uncertainties):
-    def f(angle, list_var):
-        return math.cos(angle) + sum(list_var)
-
-    f_wrapped = uncertainties.wrap(f)
-    my_list = [1, 2, 3]
-
-    # Test of a wrapped function that only calls the original function:
-    assert f_wrapped(0, my_list) == 1 + sum(my_list)
-
-    # As a precaution, the wrapped function does not venture into
-    # calculating f with uncertainties when one of the argument is not
-    # a simple number, because this argument might contain variables:
-    angle = ufloat((0, 0.1))
-
-    assert f_wrapped(angle, [angle, angle]) == NotImplemented
-    assert f_wrapped(angle, my_list) == NotImplemented
-
-def test_wrapped_func_with_kwargs():
-    """
-    Test wrapped functions with keyword args
-    """
-    def cos_plain(angle):
-        return math.cos(angle)
-
-    def cos_kwargs(angle, **kwargs):
-        return math.cos(angle)
-
-    def use_kwargs(angle, cos=True):
-        if cos:
-            return math.cos(angle)
-        else:
-            return math.sin(angle)
-
-    # wrappings of these functions
-    wrap_cos_plain  = uncertainties.wrap(cos_plain)
-    wrap_cos_wderiv = uncertainties.wrap(cos_plain, [math.cos])
-    wrap_cos_kwargs = uncertainties.wrap(cos_kwargs)
-    wrap_use_kwargs = uncertainties.wrap(use_kwargs)
-    umath_cos       = umath.cos
-    umath_sin       = umath.sin
-
-    # now test that the wrapped functions give the same results
-    # as the umath versions for a variety of input values
-    for a in (ufloat((0.2, 0.01)),  ufloat((0.7, 0.00001)),
-              #ufloat((0.9, 0.3)),   ufloat((1.e-4, 0.3)),
-              #ufloat((200.0, 0.3)), ufloat((1.e5, 0.3)),
-              #0, 2, 1.25, 0.0, 1.e-5, 0.707, 1.5708
-              ):
-        ucos = umath_cos(a)
-        usin = umath_sin(a)
-        assert _numbers_close(ucos, wrap_cos_plain(a))
-        assert _numbers_close(ucos, wrap_cos_wderiv(a))
-        assert _numbers_close(ucos, wrap_cos_kwargs(a))
-        assert _numbers_close(ucos, wrap_cos_kwargs(a, opt=None))
-        assert _numbers_close(ucos, wrap_cos_kwargs(a, opt=None, opt2=True))
-        assert _numbers_close(ucos, wrap_use_kwargs(a, cos=True))
-        assert _numbers_close(usin, wrap_use_kwargs(a, cos=False))
-
-    # affirm that calling a wrapped function with unsupported
-    # keyword args raises a TypeError
-    raised = False
-    try:
-        wrap_use_kwargs(a, other=False)
-    except TypeError:
-        raised = True
-    assert raised
-
-###############################################################################
-
-def test_access_to_std_dev():
-    "Uniform access to the standard deviation"
-
-    x = ufloat((1, 0.1))
-    y = 2*x
-
-    # std_dev for Variable and AffineScalarFunc objects:
-    assert uncertainties.std_dev(x) == x.std_dev()
-    assert uncertainties.std_dev(y) == y.std_dev()
-
-    # std_dev for other objects:
-    assert uncertainties.std_dev([]) == 0
-    assert uncertainties.std_dev(None) == 0
-
-###############################################################################
-
-def test_covariances():
-    "Covariance matrix"
-
-    x = ufloat((1, 0.1))
-    y = -2*x+10
-    z = -3*x
-    covs = uncertainties.covariance_matrix([x, y, z])
-    # Diagonal elements are simple:
-    assert _numbers_close(covs[0][0], 0.01)
-    assert _numbers_close(covs[1][1], 0.04)
-    assert _numbers_close(covs[2][2], 0.09)
-    # Non-diagonal elements:
-    assert _numbers_close(covs[0][1], -0.02)
-
-###############################################################################
-
-def test_power():
-    '''
-    Checks special cases of x**p.
-
-    The value x = 0 is special, as are positive, null and negative
-    and integral values of p.
-    '''
-
-    zero = ufloat((0, 0))
-    one = ufloat((1, 0))
-    p = ufloat((0.3, 0.01))
-
-    # assert 0**p == 0  # !!! Should pass
-    # assert zero**p == 0  # !!! Should pass
-
-    # Should raise the same errors as float operations:
-    try:
-        0**(-p)
-    except ZeroDivisionError:
-        pass
-    else:
-        raise Exception('An proper exception should have been raised')
-
-    try:
-        zero**(-p)
-    except ZeroDivisionError:
-        pass
-    else:
-        raise Exception('An proper exception should have been raised')
-
-    if sys.version_info >= (2, 6):
-
-        # Reference: http://docs.python.org/library/math.html#math.pow
-
-        # …**0 == 1.0:
-        assert p**0 == 1.0
-        # assert zero**0 == 1.0  # !!! Should pass
-        assert (-p)**0 == 1.0
-        # …**zero:
-        # assert (-10.3)**zero == 1.0  # !!! Should pass
-        # assert 0**zero == 1.0  # !!! Should pass
-        assert 0.3**zero == 1.0
-        # assert float('nan')**zero == 1.0  # !!! Should pass
-        # assert (-p)**zero == 1.0  # !!! Should pass
-        # assert zero**zero == 1.0  # !!! Should pass
-        assert p**zero == 1.0
-
-        # one**… == 1.0
-        assert one**-3 == 1.0
-        assert one**-3.1 == 1.0
-        assert one**0 == 1.0
-        assert one**3 == 1.0
-        assert one**3.1 == 1.0
-        # assert one**float('nan') == 1.0  # !!! Should pass
-        # … with two numbers with uncertainties:
-        assert one**(-p) == 1.0
-        assert one**zero == 1.0
-        assert one**p == 1.0
-        # 1**… == 1.0:
-        assert 1.**(-p) == 1.0
-        assert 1.**zero == 1.0
-        assert 1.**p == 1.0
-
-
-    # Negative numbers with unceratinty can be exponentiated to an integral
-    # power:
-    assert (ufloat((-1.1, 0.1))**-9).nominal_value == (-1.1)**-9
-
-    # Case of numbers with no uncertainty: should give the same result
-    # as numbers with uncertainties:
-    assert ufloat((-1, 0))**9 == (-1)**9
-    assert ufloat((-1.1, 0))**9 == (-1.1)**9
-
-    # Negative numbers cannot be raised to a non-integral power, in
-    # Python 2 (in Python 3, complex numbers are returned; this cannot
-    # (yet) be represented in the uncertainties package, because it
-    # does not handle complex numbers):
-    if sys.version_info < (3,):
-        try:
-            ufloat((-1, 0))**9.1
-        except Exception( err_ufloat):  # "as", for Python 2.6+
-            pass
-        else:
-            raise Exception('An exception should have been raised')
-        try:
-            (-1)**9.1
-        except Exception( err_float):  # "as" for Python 2.6+
-            # UFloat and floats should raise the same error:
-            assert err_ufloat.args == err_float.args
-        else:
-            raise Exception('An exception should have been raised')
-
-
-###############################################################################
-
-# The tests below require NumPy, which is an optional package:
-try:
-    import numpy
-except ImportError:
-    pass
-else:
-
-    def matrices_close(m1, m2, precision=1e-4):
-        """
-        Returns True iff m1 and m2 are almost equal, where elements
-        can be either floats or AffineScalarFunc objects.
-
-        m1, m2 -- NumPy matrices.
-        precision -- precision passed through to
-        uncertainties.test_uncertainties._numbers_close().
-        """
-
-        # ! numpy.allclose() is similar to this function, but does not
-        # work on arrays that contain numbers with uncertainties, because
-        # of the isinf() function.
-
-        for (elmt1, elmt2) in zip(m1.flat, m2.flat):
-
-            # For a simpler comparison, both elements are
-            # converted to AffineScalarFunc objects:
-            elmt1 = uncertainties.to_affine_scalar(elmt1)
-            elmt2 = uncertainties.to_affine_scalar(elmt2)
-
-            if not _numbers_close(elmt1.nominal_value,
-                                  elmt2.nominal_value, precision):
-                return False
-
-            if not _numbers_close(elmt1.std_dev(),
-                                  elmt2.std_dev(), precision):
-                return False
-        return True
-
-
-    def test_numpy_comparison():
-        "Comparison with a Numpy array."
-
-        x = ufloat((1, 0.1))
-
-        # Comparison with a different type:
-        assert x != [x, x]
-
-        # NumPy arrays can be compared, through element-wise
-        # comparisons.  Numbers with uncertainties should yield the
-        # same kind of results as pure floats (i.e., a NumPy array,
-        # etc.).
-
-        # We test the comparison operators both for the uncertainties
-        # package *and* the NumPy package:
-
-        # Equalities, etc.:
-        assert len(x == numpy.arange(10)) == 10
-        assert len(numpy.arange(10) == x) == 10
-        assert len(x != numpy.arange(10)) == 10
-        assert len(numpy.arange(10) != x) == 10
-        assert len(x == numpy.array([x, x, x])) == 3
-        assert len(numpy.array([x, x, x]) == x) == 3
-        assert numpy.all(x == numpy.array([x, x, x]))
-
-        # Inequalities:
-        assert len(x < numpy.arange(10)) == 10
-        assert len(numpy.arange(10) > x) == 10
-        assert len(x <= numpy.arange(10)) == 10
-        assert len(numpy.arange(10) >= x) == 10
-        assert len(x > numpy.arange(10)) == 10
-        assert len(numpy.arange(10) < x) == 10
-        assert len(x >= numpy.arange(10)) == 10
-        assert len(numpy.arange(10) <= x) == 10
-
-        # More detailed test, that shows that the comparisons are
-        # meaningful (x >= 0, but not x <= 1):
-        assert numpy.all((x >= numpy.arange(3)) == [True, False, False])
-
-    def test_correlated_values():
-        """
-        Correlated variables.
-        Test through the input of the (full) covariance matrix.
-        """
-
-        u = uncertainties.ufloat((1, 0.1))
-        cov = uncertainties.covariance_matrix([u])
-        # "1" is used instead of u.nominal_value because
-        # u.nominal_value might return a float.  The idea is to force
-        # the new variable u2 to be defined through an integer nominal
-        # value:
-        u2, = uncertainties.correlated_values([1], cov)
-        expr = 2*u2  # Calculations with u2 should be possible, like with u
-
-        ####################
-
-        # Covariances between output and input variables:
-
-        x = ufloat((1, 0.1))
-        y = ufloat((2, 0.3))
-        z = -3*x+y
-
-        covs = uncertainties.covariance_matrix([x, y, z])
-
-        # Test of the diagonal covariance elements:
-        assert matrices_close(
-            numpy.array([v.std_dev()**2 for v in (x, y, z)]),
-            numpy.array(covs).diagonal())
-
-        # "Inversion" of the covariance matrix: creation of new
-        # variables:
-        (x_new, y_new, z_new) = uncertainties.correlated_values(
-            [x.nominal_value, y.nominal_value, z.nominal_value],
-            covs,
-            tags = ['x', 'y', 'z'])
-
-        # Even the uncertainties should be correctly reconstructed:
-        assert matrices_close(numpy.array((x, y, z)),
-                              numpy.array((x_new, y_new, z_new)))
-
-        # ... and the covariances too:
-        assert matrices_close(
-            numpy.array(covs),
-            numpy.array(uncertainties.covariance_matrix([x_new, y_new, z_new])))
-
-        assert matrices_close(
-            numpy.array([z_new]), numpy.array([-3*x_new+y_new]))
-
-        ####################
-
-        # ... as well as functional relations:
-
-        u = ufloat((1, 0.05))
-        v = ufloat((10, 0.1))
-        sum_value = u+2*v
-
-        # Covariance matrices:
-        cov_matrix = uncertainties.covariance_matrix([u, v, sum_value])
-
-        # Correlated variables can be constructed from a covariance
-        # matrix, if NumPy is available:
-        (u2, v2, sum2) = uncertainties.correlated_values(
-            [x.nominal_value for x in [u, v, sum_value]],
-            cov_matrix)
-
-        # matrices_close() is used instead of _numbers_close() because
-        # it compares uncertainties too:
-        assert matrices_close(numpy.array([u]), numpy.array([u2]))
-        assert matrices_close(numpy.array([v]), numpy.array([v2]))
-        assert matrices_close(numpy.array([sum_value]), numpy.array([sum2]))
-        assert matrices_close(numpy.array([0]),
-                              numpy.array([sum2-(u2+2*v2)]))
-
-
-    def test_correlated_values_correlation_mat():
-        '''
-        Tests the input of correlated value.
-
-        Test through their correlation matrix (instead of the
-        covariance matrix).
-        '''
-
-        x = ufloat((1, 0.1))
-        y = ufloat((2, 0.3))
-        z = -3*x+y
-
-        cov_mat = uncertainties.covariance_matrix([x, y, z])
-
-        std_devs = numpy.sqrt(numpy.array(cov_mat).diagonal())
-
-        corr_mat = cov_mat/std_devs/std_devs[numpy.newaxis].T
-
-        # We make sure that the correlation matrix is indeed diagonal:
-        assert (corr_mat-corr_mat.T).max() <= 1e-15
-        # We make sure that there are indeed ones on the diagonal:
-        assert (corr_mat.diagonal()-1).max() <= 1e-15
-
-        # We try to recover the correlated variables through the
-        # correlation matrix (not through the covariance matrix):
-
-        nominal_values = [v.nominal_value for v in (x, y, z)]
-        std_devs = [v.std_dev() for v in (x, y, z)]
-        x2, y2, z2 = uncertainties.correlated_values_norm(
-            zip(nominal_values, std_devs), corr_mat)
-
-        # matrices_close() is used instead of _numbers_close() because
-        # it compares uncertainties too:
-
-        # Test of individual variables:
-        assert matrices_close(numpy.array([x]), numpy.array([x2]))
-        assert matrices_close(numpy.array([y]), numpy.array([y2]))
-        assert matrices_close(numpy.array([z]), numpy.array([z2]))
-
-        # Partial correlation test:
-        assert matrices_close(numpy.array([0]), numpy.array([z2-(-3*x2+y2)]))
-
-        # Test of the full covariance matrix:
-        assert matrices_close(
-            numpy.array(cov_mat),
-            numpy.array(uncertainties.covariance_matrix([x2, y2, z2])))
-
-
-test_wrapped_func_with_kwargs()
diff --git a/lmfit/uncertainties/umath.py b/lmfit/uncertainties/umath.py
index 8b0e665..e0608c8 100644
--- a/lmfit/uncertainties/umath.py
+++ b/lmfit/uncertainties/umath.py
@@ -44,9 +44,7 @@ import itertools
 import functools
 
 # Local modules
-import uncertainties
-
-from uncertainties import __author__, to_affine_scalar, AffineScalarFunc
+from __init__ import wrap, set_doc, __author__, to_affine_scalar, AffineScalarFunc
 
 ###############################################################################
 
@@ -201,7 +199,7 @@ for name in dir(math):
     func = getattr(math, name)
 
     setattr(this_module, name,
-            wraps(uncertainties.wrap(func, derivatives), func))
+            wraps(wrap(func, derivatives), func))
 
     many_scalars_to_scalar_funcs.append(name)
 
@@ -245,7 +243,7 @@ if sys.version_info[:2] >= (2, 6):
 
         flat_fsum = lambda *args: original_func(args)
 
-        flat_fsum_wrap = uncertainties.wrap(
+        flat_fsum_wrap = wrap(
             flat_fsum, itertools.repeat(lambda *args: 1))
 
         return wraps(lambda arg_list: flat_fsum_wrap(*arg_list),
@@ -255,14 +253,14 @@ if sys.version_info[:2] >= (2, 6):
     non_std_wrapped_funcs.append('fsum')
 
 
- at uncertainties.set_doc(math.modf.__doc__)
+ at set_doc(math.modf.__doc__)
 def modf(x):
     """
     Version of modf that works for numbers with uncertainty, and also
     for regular numbers.
     """
 
-    # The code below is inspired by uncertainties.wrap().  It is
+    # The code below is inspired by wrap().  It is
     # simpler because only 1 argument is given, and there is no
     # delegation to other functions involved (as for __mul__, etc.).
 
@@ -282,14 +280,14 @@ def modf(x):
 many_scalars_to_scalar_funcs.append('modf')
 
 
- at uncertainties.set_doc(math.ldexp.__doc__)
+ at set_doc(math.ldexp.__doc__)
 def ldexp(x, y):
-    # The code below is inspired by uncertainties.wrap().  It is
+    # The code below is inspired by wrap().  It is
     # simpler because only 1 argument is given, and there is no
     # delegation to other functions involved (as for __mul__, etc.).
 
     # Another approach would be to add an additional argument to
-    # uncertainties.wrap() so that some arguments are automatically
+    # wrap() so that some arguments are automatically
     # considered as constants.
 
     aff_func = to_affine_scalar(x)  # y must be an integer, for math.ldexp
@@ -314,14 +312,14 @@ def ldexp(x, y):
 many_scalars_to_scalar_funcs.append('ldexp')
 
 
- at uncertainties.set_doc(math.frexp.__doc__)
+ at set_doc(math.frexp.__doc__)
 def frexp(x):
     """
     Version of frexp that works for numbers with uncertainty, and also
     for regular numbers.
     """
 
-    # The code below is inspired by uncertainties.wrap().  It is
+    # The code below is inspired by wrap().  It is
     # simpler because only 1 argument is given, and there is no
     # delegation to other functions involved (as for __mul__, etc.).
 
diff --git a/lmfit/uncertainties/unumpy/__init__.py b/lmfit/uncertainties/unumpy/__init__.py
deleted file mode 100644
index 9dfb7fd..0000000
--- a/lmfit/uncertainties/unumpy/__init__.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""
-Utilities for NumPy arrays and matrices that contain numbers with
-uncertainties.
-
-This package contains:
-
-1) utilities that help with the creation and manipulation of NumPy
-arrays and matrices of numbers with uncertainties;
-
-2) generalizations of multiple NumPy functions so that they also work
-with arrays that contain numbers with uncertainties.
-
-- Arrays of numbers with uncertainties can be built as follows:
-
-  arr = unumpy.uarray(([1, 2], [0.01, 0.002]))  # (values, uncertainties)
-
-NumPy arrays of numbers with uncertainties can also be built directly
-through NumPy, thanks to NumPy's support of arrays of arbitrary objects:
-
-  arr = numpy.array([uncertainties.ufloat((1, 0.1)),...])
-
-- Matrices of numbers with uncertainties are best created in one of
-two ways:
-
-  mat = unumpy.umatrix(([1, 2], [0.01, 0.002]))  # (values, uncertainties)
-
-Matrices can also be built by converting arrays of numbers with
-uncertainties, through the unumpy.matrix class:
-
-  mat = unumpy.matrix(arr)
-
-unumpy.matrix objects behave like numpy.matrix objects of numbers with
-uncertainties, but with better support for some operations (such as
-matrix inversion):
-
-  # The inverse or pseudo-inverse of a unumpy.matrix can be calculated:
-  print mat.I  # Would not work with numpy.matrix([[ufloat(...),...]]).I
-
-- Nominal values and uncertainties of arrays can be directly accessed:
-
-  print unumpy.nominal_values(arr)  # [ 1.  2.]
-  print unumpy.std_devs(mat)  # [ 0.01   0.002]
-
-- This module defines uncertainty-aware mathematical functions that
-generalize those from uncertainties.umath so that they work on NumPy
-arrays of numbers with uncertainties instead of just scalars:
-
-  print unumpy.cos(arr)  # Array with the cosine of each element
-
-NumPy's function names are used, and not those of the math module (for
-instance, unumpy.arccos is defined, like in NumPy, and is not named
-acos like in the standard math module).
-
-The definitions of the mathematical quantities calculated by these
-functions are available in the documentation of uncertainties.umath.
-
-- The unumpy.ulinalg module contains more uncertainty-aware functions
-for arrays that contain numbers with uncertainties (see the
-documentation for this module).
-
-This module requires the NumPy package.
-
-(c) 2009-2013 by Eric O. LEBIGOT (EOL) <eric.lebigot at normalesup.org>.
-Please send feature requests, bug reports, or feedback to this address.
-
-This software is released under a dual license.  (1) The BSD license.
-(2) Any other license, as long as it is obtained from the original
-author."""
-
-# Local modules:
-from core import *
-from uncertainties.unumpy import core
-from uncertainties.unumpy import ulinalg  # Local sub-module
-
-from uncertainties import __author__
-
-# __all__ is set so that pydoc shows all important functions:
-__all__ = core.__all__
-# "import numpy" makes numpy.linalg available.  This behavior is
-# copied here, for maximum compatibility:
-__all__.append('ulinalg')
-
diff --git a/lmfit/uncertainties/unumpy/core.py b/lmfit/uncertainties/unumpy/core.py
deleted file mode 100644
index 8e77450..0000000
--- a/lmfit/uncertainties/unumpy/core.py
+++ /dev/null
@@ -1,612 +0,0 @@
-"""
-Core functions used by unumpy and some of its submodules.
-
-(c) 2010-2013 by Eric O. LEBIGOT (EOL).
-"""
-
-# The functions found in this module cannot be defined in unumpy or
-# its submodule: this creates import loops, when unumpy explicitly
-# imports one of the submodules in order to make it available to the
-# user.
-
-from __future__ import division
-
-# Standard modules:
-import sys
-
-# 3rd-party modules:
-import numpy
-from numpy.core import numeric
-
-# Local modules:
-import uncertainties
-from uncertainties import umath
-
-from uncertainties import __author__
-
-__all__ = [
-    # Factory functions:
-    'uarray', 'umatrix',
-
-    # Utilities:
-    'nominal_values', 'std_devs',
-
-    # Classes:
-    'matrix'
-    ]
-
-###############################################################################
-# Utilities:
-
-# nominal_values() and std_devs() are defined as functions (instead of
-# as additional methods of the unumpy.matrix class) because the user
-# might well directly build arrays of numbers with uncertainties
-# without going through the factory functions found in this module
-# (uarray() and umatrix()).  Thus,
-# numpy.array([uncertainties.ufloat((1, 0.1))]) would not
-# have a nominal_values() method.  Adding such a method to, say,
-# unumpy.matrix, would break the symmetry between NumPy arrays and
-# matrices (no nominal_values() method), and objects defined in this
-# module.
-
-# ! Warning: the __doc__ is set, but help(nominal_values) does not
-# display it, but instead displays the documentation for the type of
-# nominal_values (i.e. the documentation of its class):
-
-to_nominal_values = numpy.vectorize(
-    uncertainties.nominal_value,
-    otypes=[float],  # Because vectorize() has side effects (dtype setting)
-    doc=("Applies uncertainties.nominal_value to the elements of"
-         " a NumPy (or unumpy) array (this includes matrices)."))
-
-to_std_devs = numpy.vectorize(
-    uncertainties.std_dev,
-    otypes=[float],  # Because vectorize() has side effects (dtype setting)
-    doc=("Returns the standard deviation of the numbers with uncertainties"
-         " contained in a NumPy array, or zero for other objects."))
-
-def unumpy_to_numpy_matrix(arr):
-    """
-    If arr in a unumpy.matrix, it is converted to a numpy.matrix.
-    Otherwise, it is returned unchanged.
-    """
-
-    return arr.view(numpy.matrix) if isinstance(arr, matrix) else arr
-
-def nominal_values(arr):
-    """
-    Returns the nominal values of the numbers in NumPy array arr.
-
-    Elements that are not uncertainties.AffineScalarFunc are passed
-    through untouched (because a numpy.array can contain numbers with
-    uncertainties and pure floats simultaneously).
-
-    If arr is of type unumpy.matrix, the returned array is a
-    numpy.matrix, because the resulting matrix does not contain
-    numbers with uncertainties.
-    """
-
-    return unumpy_to_numpy_matrix(to_nominal_values(arr))
-
-def std_devs(arr):
-    """
-    Returns the standard deviations of the numbers in NumPy array arr.
-
-    Elements that are not uncertainties.AffineScalarFunc are given a
-    zero uncertainty ((because a numpy.array can contain numbers with
-    uncertainties and pure floats simultaneously)..
-
-    If arr is of type unumpy.matrix, the returned array is a
-    numpy.matrix, because the resulting matrix does not contain
-    numbers with uncertainties.
-    """
-
-    return unumpy_to_numpy_matrix(to_std_devs(arr))
-
-###############################################################################
-
-def derivative(u, var):
-    """
-    Returns the derivative of u along var, if u is an
-    uncertainties.AffineScalarFunc instance, and if var is one of the
-    variables on which it depends.  Otherwise, return 0.
-    """
-    if isinstance(u, uncertainties.AffineScalarFunc):
-        try:
-            return u.derivatives[var]
-        except KeyError:
-            return 0.
-    else:
-        return 0.
-
-def wrap_array_func(func):
-    """
-    Returns a version of the function func() that works even when
-    func() is given a NumPy array that contains numbers with
-    uncertainties.
-
-    func() is supposed to return a NumPy array.
-
-    This wrapper is similar to uncertainties.wrap(), except that it
-    handles an array argument instead of float arguments.
-
-    func -- version that takes and returns a single NumPy array.
-    """
-
-    @uncertainties.set_doc("""\
-    Version of %s(...) that works even when its first argument is a NumPy
-    array that contains numbers with uncertainties.
-
-    Warning: elements of the first argument array that are not
-    AffineScalarFunc objects must not depend on uncertainties.Variable
-    objects in any way.  Otherwise, the dependence of the result in
-    uncertainties.Variable objects will be incorrect.
-
-    Original documentation:
-    %s""" % (func.__name__, func.__doc__))
-    def wrapped_func(arr, *args):
-        # Nominal value:
-        arr_nominal_value = nominal_values(arr)
-        func_nominal_value = func(arr_nominal_value, *args)
-
-        # The algorithm consists in numerically calculating the derivatives
-        # of func:
-
-        # Variables on which the array depends are collected:
-        variables = set()
-        for element in arr.flat:
-            # floats, etc. might be present
-            if isinstance(element, uncertainties.AffineScalarFunc):
-                variables |= set(element.derivatives.iterkeys())
-
-        # If the matrix has no variables, then the function value can be
-        # directly returned:
-        if not variables:
-            return func_nominal_value
-
-        # Calculation of the derivatives of each element with respect
-        # to the variables.  Each element must be independent of the
-        # others.  The derivatives have the same shape as the output
-        # array (which might differ from the shape of the input array,
-        # in the case of the pseudo-inverse).
-        derivatives = numpy.vectorize(lambda _: {})(func_nominal_value)
-        for var in variables:
-
-            # A basic assumption of this package is that the user
-            # guarantees that uncertainties cover a zone where
-            # evaluated functions are linear enough.  Thus, numerical
-            # estimates of the derivative should be good over the
-            # standard deviation interval.  This is true for the
-            # common case of a non-zero standard deviation of var.  If
-            # the standard deviation of var is zero, then var has no
-            # impact on the uncertainty of the function func being
-            # calculated: an incorrect derivative has no impact.  One
-            # scenario can give incorrect results, however, but it
-            # should be extremely uncommon: the user defines a
-            # variable x with 0 standard deviation, sets y = func(x)
-            # through this routine, changes the standard deviation of
-            # x, and prints y; in this case, the uncertainty on y
-            # might be incorrect, because this program had no idea of
-            # the scale on which func() is linear, when it calculated
-            # the numerical derivative.
-
-            # The standard deviation might be numerically too small
-            # for the evaluation of the derivative, though: we set the
-            # minimum variable shift.
-
-            shift_var = max(var._std_dev/1e5, 1e-8*abs(var._nominal_value))
-            # An exceptional case is that of var being exactly zero.
-            # In this case, an arbitrary shift is used for the
-            # numerical calculation of the derivative.  The resulting
-            # derivative value might be quite incorrect, but this does
-            # not matter as long as the uncertainty of var remains 0,
-            # since it is, in this case, a constant.
-            if not shift_var:
-                shift_var = 1e-8
-
-            # Shift of all the elements of arr when var changes by shift_var:
-            shift_arr = array_derivative(arr, var)*shift_var
-
-            # Origin value of array arr when var is shifted by shift_var:
-            shifted_arr_values = arr_nominal_value + shift_arr
-            func_shifted = func(shifted_arr_values, *args)
-            numerical_deriv = (func_shifted-func_nominal_value)/shift_var
-
-            # Update of the list of variables and associated
-            # derivatives, for each element:
-            for (derivative_dict, derivative_value) in (
-                zip(derivatives.flat, numerical_deriv.flat)):
-
-                if derivative_value:
-                    derivative_dict[var] = derivative_value
-
-        # numbers with uncertainties are build from the result:
-        return numpy.vectorize(uncertainties.AffineScalarFunc)(
-            func_nominal_value, derivatives)
-
-    # It is easier to work with wrapped_func, which represents a
-    # wrapped version of 'func', when it bears the same name as
-    # 'func' (the name is used by repr(wrapped_func)).
-    wrapped_func.__name__ = func.__name__
-
-    return wrapped_func
-
-###############################################################################
-# Arrays
-
-# Vectorized creation of an array of variables:
-
-# ! Looking up uncertainties.Variable beforehand through '_Variable =
-# uncertainties.Variable' does not result in a significant speed up:
-
-_uarray = numpy.vectorize(lambda v, s: uncertainties.Variable(v, s),
-                          otypes=[object])
-
-def uarray((values, std_devs)):
-    """
-    Returns a NumPy array of numbers with uncertainties
-    initialized with the given nominal values and standard
-    deviations.
-
-    values, std_devs -- valid arguments for numpy.array, with
-    identical shapes (list of numbers, list of lists, numpy.ndarray,
-    etc.).
-    """
-
-    return _uarray(values, std_devs)
-
-###############################################################################
-
-def array_derivative(array_like, var):
-    """
-    Returns the derivative of the given array with respect to the
-    given variable.
-
-    The returned derivative is a Numpy ndarray of the same shape as
-    array_like, that contains floats.
-
-    array_like -- array-like object (list, etc.)  that contains
-    scalars or numbers with uncertainties.
-
-    var -- Variable object.
-    """
-    return numpy.vectorize(lambda u: derivative(u, var),
-                           # The type is set because an
-                           # integer derivative should not
-                           # set the output type of the
-                           # array:
-                           otypes=[float])(array_like)
-
-def func_with_deriv_to_uncert_func(func_with_derivatives):
-    """
-    Returns a function that can be applied to array-like objects that
-    contain numbers with uncertainties (lists, lists of lists, Numpy
-    arrays, etc.).
-
-    func_with_derivatives -- defines a function that takes array-like
-    objects containing scalars and returns an array.  Both the value
-    and the derivatives of this function with respect to multiple
-    scalar parameters are calculated by func_with_derivatives().
-
-    func_with_derivatives(arr, input_type, derivatives, *args) returns
-    an iterator.  The first element is the value of the function at
-    point 'arr' (with the correct type).  The following elements are
-    arrays that represent the derivative of the function for each
-    derivative array from the iterator 'derivatives'.
-
-      func_with_derivatives takes the following arguments:
-
-      arr -- Numpy ndarray of scalars where the function must be
-      evaluated.
-
-      input_type -- type of the input array-like object.  This type is
-      used for determining the type that the function should return.
-
-      derivatives -- iterator that returns the derivatives of the
-      argument of the function with respect to multiple scalar
-      variables.  func_with_derivatives() returns the derivatives of
-      the defined function with respect to these variables.
-
-      args -- additional arguments that define the result (example:
-      for the pseudo-inverse numpy.linalg.pinv: numerical cutoff).
-
-    Examples of func_with_derivatives: inv_with_derivatives().
-    """
-
-    def wrapped_func(array_like, *args):
-        """
-        array_like -- array-like object that contains numbers with
-        uncertainties (list, Numpy ndarray or matrix, etc.).
-
-        args -- additional arguments that are passed directly to
-        func_with_derivatives.
-        """
-
-        # So that .flat works even if array_like is a list.  Later
-        # useful for faster code:
-        array_version = numpy.asarray(array_like)
-
-        # Variables on which the array depends are collected:
-        variables = set()
-        for element in array_version.flat:
-            # floats, etc. might be present
-            if isinstance(element, uncertainties.AffineScalarFunc):
-                variables |= set(element.derivatives.iterkeys())
-
-        array_nominal = nominal_values(array_version)
-        # Function value, and derivatives at array_nominal (the
-        # derivatives are with respect to the variables contained in
-        # array_like):
-        func_and_derivs = func_with_derivatives(
-            array_nominal,
-            type(array_like),
-            (array_derivative(array_version, var) for var in variables),
-            *args)
-
-        func_nominal_value = func_and_derivs.next()
-
-        if not variables:
-            return func_nominal_value
-
-        # The result is built progressively, with the contribution of
-        # each variable added in turn:
-
-        # Calculation of the derivatives of the result with respect to
-        # the variables.
-        derivatives = numpy.array(
-            [{} for _ in xrange(func_nominal_value.size)], dtype=object)
-        derivatives.resize(func_nominal_value.shape)
-
-        # Memory-efficient approach.  A memory-hungry approach would
-        # be to calculate the matrix derivatives will respect to all
-        # variables and then combine them into a matrix of
-        # AffineScalarFunc objects.  The approach followed here is to
-        # progressively build the matrix of derivatives, by
-        # progressively adding the derivatives with respect to
-        # successive variables.
-        for (var, deriv_wrt_var) in zip(variables, func_and_derivs):
-
-            # Update of the list of variables and associated
-            # derivatives, for each element:
-            for (derivative_dict, derivative_value) in zip(
-                derivatives.flat, deriv_wrt_var.flat):
-                if derivative_value:
-                    derivative_dict[var] = derivative_value
-
-        # An array of numbers with uncertainties are built from the
-        # result:
-        result = numpy.vectorize(uncertainties.AffineScalarFunc)(
-            func_nominal_value, derivatives)
-
-        # Numpy matrices that contain numbers with uncertainties are
-        # better as unumpy matrices:
-        if isinstance(result, numpy.matrix):
-            result = result.view(matrix)
-
-        return result
-
-    return wrapped_func
-
-########## Matrix inverse
-
-def inv_with_derivatives(arr, input_type, derivatives):
-    """
-    Defines the matrix inverse and its derivatives.
-
-    See the definition of func_with_deriv_to_uncert_func() for its
-    detailed semantics.
-    """
-
-    inverse = numpy.linalg.inv(arr)
-    # The inverse of a numpy.matrix is a numpy.matrix.  It is assumed
-    # that numpy.linalg.inv is such that other types yield
-    # numpy.ndarrays:
-    if issubclass(input_type, numpy.matrix):
-        inverse = inverse.view(numpy.matrix)
-    yield inverse
-
-    # It is mathematically convenient to work with matrices:
-    inverse_mat = numpy.asmatrix(inverse)
-
-    # Successive derivatives of the inverse:
-    for derivative in derivatives:
-        derivative_mat = numpy.asmatrix(derivative)
-        yield -inverse_mat * derivative_mat * inverse_mat
-
-_inv = func_with_deriv_to_uncert_func(inv_with_derivatives)
-_inv.__doc__ = """\
-    Version of numpy.linalg.inv that works with array-like objects
-    that contain numbers with uncertainties.
-
-    The result is a unumpy.matrix if numpy.linalg.pinv would return a
-    matrix for the array of nominal values.
-
-    Analytical formulas are used.
-
-    Original documentation:
-    %s
-    """ % numpy.linalg.inv.__doc__
-
-########## Matrix pseudo-inverse
-
-def pinv_with_derivatives(arr, input_type, derivatives, rcond):
-    """
-    Defines the matrix pseudo-inverse and its derivatives.
-
-    Works with real or complex matrices.
-
-    See the definition of func_with_deriv_to_uncert_func() for its
-    detailed semantics.
-    """
-
-    inverse = numpy.linalg.pinv(arr, rcond)
-    # The pseudo-inverse of a numpy.matrix is a numpy.matrix.  It is
-    # assumed that numpy.linalg.pinv is such that other types yield
-    # numpy.ndarrays:
-    if issubclass(input_type, numpy.matrix):
-        inverse = inverse.view(numpy.matrix)
-    yield inverse
-
-    # It is mathematically convenient to work with matrices:
-    inverse_mat = numpy.asmatrix(inverse)
-
-    # Formula (4.12) from The Differentiation of Pseudo-Inverses and
-    # Nonlinear Least Squares Problems Whose Variables
-    # Separate. Author(s): G. H. Golub and V. Pereyra. Source: SIAM
-    # Journal on Numerical Analysis, Vol. 10, No. 2 (Apr., 1973),
-    # pp. 413-432
-
-    # See also
-    # http://mathoverflow.net/questions/25778/analytical-formula-for-numerical-derivative-of-the-matrix-pseudo-inverse
-
-    # Shortcuts.  All the following factors should be numpy.matrix objects:
-    PA = arr*inverse_mat
-    AP = inverse_mat*arr
-    factor21 = inverse_mat*inverse_mat.H
-    factor22 = numpy.eye(arr.shape[0])-PA
-    factor31 = numpy.eye(arr.shape[1])-AP
-    factor32 = inverse_mat.H*inverse_mat
-
-    # Successive derivatives of the inverse:
-    for derivative in derivatives:
-        derivative_mat = numpy.asmatrix(derivative)
-        term1 = -inverse_mat*derivative_mat*inverse_mat
-        derivative_mat_H = derivative_mat.H
-        term2 = factor21*derivative_mat_H*factor22
-        term3 = factor31*derivative_mat_H*factor32
-        yield term1+term2+term3
-
-# Default rcond argument for the generalization of numpy.linalg.pinv:
-try:
-    # Python 2.6+:
-    _pinv_default = numpy.linalg.pinv.__defaults__[0]
-except AttributeError:
-    _pinv_default = 1e-15
-
-_pinv_with_uncert = func_with_deriv_to_uncert_func(pinv_with_derivatives)
-
- at uncertainties.set_doc("""
-    Version of numpy.linalg.pinv that works with array-like objects
-    that contain numbers with uncertainties.
-
-    The result is a unumpy.matrix if numpy.linalg.pinv would return a
-    matrix for the array of nominal values.
-
-    Analytical formulas are used.
-
-    Original documentation:
-    %s
-    """ % numpy.linalg.pinv.__doc__)
-def _pinv(array_like, rcond=_pinv_default):
-    return _pinv_with_uncert(array_like, rcond)
-
-########## Matrix class
-
-class matrix(numpy.matrix):
-    # The name of this class is the same as NumPy's, which is why it
-    # does not follow PEP 8.
-    """
-    Class equivalent to numpy.matrix, but that behaves better when the
-    matrix contains numbers with uncertainties.
-    """
-
-    def __rmul__(self, other):
-        # ! NumPy's matrix __rmul__ uses an apparently a restrictive
-        # dot() function that cannot handle the multiplication of a
-        # scalar and of a matrix containing objects (when the
-        # arguments are given in this order).  We go around this
-        # limitation:
-        if numeric.isscalar(other):
-            return numeric.dot(self, other)
-        else:
-            return numeric.dot(other, self)  # The order is important
-
-    # The NumPy doc for getI is empty:
-    # @uncertainties.set_doc(numpy.matrix.getI.__doc__)
-    def getI(self):
-        "Matrix inverse of pseudo-inverse"
-
-        # numpy.matrix.getI is OK too, but the rest of the code assumes that
-        # numpy.matrix.I is a property object anyway:
-
-        M, N = self.shape
-        if M == N:
-            func = _inv
-        else:
-            func = _pinv
-        return func(self)
-
-
-    # ! In Python >= 2.6, this could be simplified as:
-    # I = numpy.matrix.I.getter(__matrix_inverse)
-    I = property(getI, numpy.matrix.I.fset, numpy.matrix.I.fdel,
-                 numpy.matrix.I.__doc__)
-
-    @property
-    def nominal_values(self):
-        """
-        Nominal value of all the elements of the matrix.
-        """
-        return nominal_values(self)
-
-    std_devs = std_devs
-
-def umatrix(*args):
-    """
-    Constructs a matrix that contains numbers with uncertainties.
-
-    The input data is the same as for uarray(...): a tuple with the
-    nominal values, and the standard deviations.
-
-    The returned matrix can be inverted, thanks to the fact that it is
-    a unumpy.matrix object instead of a numpy.matrix one.
-    """
-
-    return uarray(*args).view(matrix)
-
-###############################################################################
-
-def define_vectorized_funcs():
-    """
-    Defines vectorized versions of functions from uncertainties.umath.
-
-    Some functions have their name translated, so as to follow NumPy's
-    convention (example: math.acos -> numpy.arccos).
-    """
-
-    this_module = sys.modules[__name__]
-    # NumPy does not always use the same function names as the math
-    # module:
-    func_name_translations = dict(
-        (f_name, 'arc'+f_name[1:])
-        for f_name in ['acos', 'acosh', 'asin', 'atan', 'atan2', 'atanh'])
-
-    new_func_names = [func_name_translations.get(function_name, function_name)
-                      for function_name in umath.many_scalars_to_scalar_funcs]
-
-    for (function_name, unumpy_name) in zip(
-        umath.many_scalars_to_scalar_funcs, new_func_names):
-
-        # ! The newly defined functions (uncertainties.unumpy.cos, etc.)
-        # do not behave exactly like their NumPy equivalent (numpy.cos,
-        # etc.): cos(0) gives an array() and not a
-        # numpy.float... (equality tests succeed, though).
-        func = getattr(umath, function_name)
-        setattr(
-            this_module, unumpy_name,
-            numpy.vectorize(func,
-                            # If by any chance a function returns,
-                            # in a particular case, an integer,
-                            # side-effects in vectorize() would
-                            # fix the resulting dtype to integer,
-                            # which is not what is wanted:
-                            otypes=[object],
-                            doc="""\
-Vectorized version of umath.%s.
-
-Original documentation:
-%s""" % (function_name, func.__doc__)))
-
-        __all__.append(unumpy_name)
-
-define_vectorized_funcs()
diff --git a/lmfit/uncertainties/unumpy/test_ulinalg.py b/lmfit/uncertainties/unumpy/test_ulinalg.py
deleted file mode 100644
index 367d2de..0000000
--- a/lmfit/uncertainties/unumpy/test_ulinalg.py
+++ /dev/null
@@ -1,87 +0,0 @@
-"""
-Tests for uncertainties.unumpy.ulinalg.
-
-These tests can be run through the Nose testing framework.
-
-(c) 2010-2013 by Eric O. LEBIGOT (EOL) <eric.lebigot at normalesup.org>.
-"""
-
-# Some tests are already performed in test_unumpy (unumpy contains a
-# matrix inversion, for instance).  They are not repeated here.
-
-from __future__ import division
-
-try:
-    import numpy
-except ImportError:
-    import sys
-    sys.exit()  # There is no reason to test the interface to NumPy
-
-from uncertainties import unumpy, ufloat
-from uncertainties.unumpy.test_unumpy import matrices_close
-
-from uncertainties import __author__
-
-def test_list_inverse():
-    "Test of the inversion of a square matrix"
-
-    mat_list = [[1, 1], [1, 0]]
-
-    # numpy.linalg.inv(mat_list) does calculate the inverse even
-    # though mat_list is a list of lists (and not a matrix).  Can
-    # ulinalg do the same?  Here is a test:
-    mat_list_inv = unumpy.ulinalg.inv(mat_list)
-
-    # More type testing:
-    mat_matrix = numpy.asmatrix(mat_list)
-    assert isinstance(unumpy.ulinalg.inv(mat_matrix),
-                      type(numpy.linalg.inv(mat_matrix)))
-
-    # unumpy.ulinalg should behave in the same way as numpy.linalg,
-    # with respect to types:
-    mat_list_inv_numpy = numpy.linalg.inv(mat_list)
-    assert type(mat_list_inv) == type(mat_list_inv_numpy)
-
-    # The resulting matrix does not have to be a matrix that can
-    # handle uncertainties, because the input matrix does not have
-    # uncertainties:
-    assert not isinstance(mat_list_inv, unumpy.matrix)
-
-    # Individual element check:
-    assert isinstance(mat_list_inv[1, 1], float)
-    assert mat_list_inv[1, 1] == -1
-
-    x = ufloat((1, 0.1))
-    y = ufloat((2, 0.1))
-    mat = unumpy.matrix([[x, x], [y, 0]])
-
-    # Internal consistency: ulinalg.inv() must coincide with the
-    # unumpy.matrix inverse, for square matrices (.I is the
-    # pseudo-inverse, for non-square matrices, but inv() is not).
-    assert matrices_close(unumpy.ulinalg.inv(mat), mat.I)
-
-
-def test_list_pseudo_inverse():
-    "Test of the pseudo-inverse"
-
-    x = ufloat((1, 0.1))
-    y = ufloat((2, 0.1))
-    mat = unumpy.matrix([[x, x], [y, 0]])
-
-    # Internal consistency: the inverse and the pseudo-inverse yield
-    # the same result on square matrices:
-    assert matrices_close(mat.I, unumpy.ulinalg.pinv(mat), 1e-4)
-    assert matrices_close(unumpy.ulinalg.inv(mat),
-                          # Support for the optional pinv argument is
-                          # tested:
-                          unumpy.ulinalg.pinv(mat, 1e-15), 1e-4)
-
-    # Non-square matrices:
-    x = ufloat((1, 0.1))
-    y = ufloat((2, 0.1))
-    mat1 = unumpy.matrix([[x, y]])  # "Long" matrix
-    mat2 = unumpy.matrix([[x, y], [1, 3+x], [y, 2*x]])  # "Tall" matrix
-
-    # Internal consistency:
-    assert matrices_close(mat1.I, unumpy.ulinalg.pinv(mat1, 1e-10))
-    assert matrices_close(mat2.I, unumpy.ulinalg.pinv(mat2, 1e-8))
diff --git a/lmfit/uncertainties/unumpy/test_unumpy.py b/lmfit/uncertainties/unumpy/test_unumpy.py
deleted file mode 100644
index 484e2ea..0000000
--- a/lmfit/uncertainties/unumpy/test_unumpy.py
+++ /dev/null
@@ -1,265 +0,0 @@
-"""
-Tests of the code in uncertainties/unumpy/__init__.py.
-
-These tests can be run through the Nose testing framework.
-
-(c) 2010-2013 by Eric O. LEBIGOT (EOL).
-"""
-
-from __future__ import division
-
-# 3rd-party modules:
-try:
-    import numpy
-except ImportError:
-    import sys
-    sys.exit()  # There is no reason to test the interface to NumPy
-
-# Local modules:
-import uncertainties
-from uncertainties import ufloat, unumpy, test_uncertainties
-from uncertainties.unumpy import core
-from uncertainties.test_uncertainties import _numbers_close, matrices_close
-from uncertainties import __author__
-
-def test_numpy():
-
-    """
-    Interaction with NumPy, including matrix inversion and correlated_values.
-    """
-
-    arr = numpy.array(range(3))
-    num = ufloat((3.14, 0.01))
-
-    # NumPy arrays can be multiplied by Variable objects,
-    # whatever the order of the operands:
-    prod1 = arr*num
-    prod2 = num*arr
-    # Additional check:
-    assert (prod1 == prod2).all()
-
-    # Operations with arrays work (they are first handled by NumPy,
-    # then by this module):
-    prod1*prod2  # This should be calculable
-    assert not (prod1-prod2).any()  # All elements must be 0
-
-    # Comparisons work too:
-
-    # Usual behavior:
-    assert len(arr[arr > 1.5]) == 1
-    # Comparisons with Variable objects:
-    assert len(arr[arr > ufloat((1.5, 0.1))]) == 1
-
-    assert len(prod1[prod1 < prod1*prod2]) == 2
-
-    # The following can be calculated (special NumPy abs() function):
-    numpy.abs(arr + ufloat((-1, 0.1)))
-
-    # The following does not completely work, because NumPy does not
-    # implement numpy.exp on an array of general objects, apparently:
-    assert numpy.exp(arr).all()  # All elements > 0
-    # Equivalent with an array of AffineScalarFunc objects:
-    try:
-        numpy.exp(arr + ufloat((0, 0)))
-    except AttributeError:
-        pass  # ! This is usual (but could be avoided)
-    else:
-        raise Exception("numpy.exp unexpectedly worked")
-
-def test_matrix():
-    "Matrices of numbers with uncertainties"
-    # Matrix inversion:
-
-    # Matrix with a mix of Variable objects and regular
-    # Python numbers:
-
-    m = unumpy.matrix([[ufloat((10, 1)), -3.1],
-                       [0, ufloat((3, 0))]])
-    m_nominal_values = unumpy.nominal_values(m)
-
-    # Test of the nominal_value attribute:
-    assert numpy.all(m_nominal_values == m.nominal_values)
-
-    assert type(m[0, 0]) == uncertainties.Variable
-
-    # Test of scalar multiplication, both sides:
-    3*m
-    m*3
-
-def _derivatives_close(x, y):
-    """
-    Returns True iff the AffineScalarFunc objects x and y have
-    derivatives that are close to each other (they must depend
-    on the same variables).
-    """
-
-    # x and y must depend on the same variables:
-    if set(x.derivatives) != set(y.derivatives):
-        return False  # Not the same variables
-
-    return all(_numbers_close(x.derivatives[var], y.derivatives[var])
-               for var in x.derivatives)
-
-def test_inverse():
-    "Tests of the matrix inverse"
-
-    m = unumpy.matrix([[ufloat((10, 1)), -3.1],
-                       [0, ufloat((3, 0))]])
-    m_nominal_values = unumpy.nominal_values(m)
-
-    # "Regular" inverse matrix, when uncertainties are not taken
-    # into account:
-    m_no_uncert_inv = m_nominal_values.I
-
-    # The matrix inversion should not yield numbers with uncertainties:
-    assert m_no_uncert_inv.dtype == numpy.dtype(float)
-
-    # Inverse with uncertainties:
-    m_inv_uncert = m.I  # AffineScalarFunc elements
-    # The inverse contains uncertainties: it must support custom
-    # operations on matrices with uncertainties:
-    assert isinstance(m_inv_uncert, unumpy.matrix)
-    assert type(m_inv_uncert[0, 0]) == uncertainties.AffineScalarFunc
-
-    # Checks of the numerical values: the diagonal elements of the
-    # inverse should be the inverses of the diagonal elements of
-    # m (because we started with a triangular matrix):
-    assert _numbers_close(1/m_nominal_values[0, 0],
-                          m_inv_uncert[0, 0].nominal_value), "Wrong value"
-
-    assert _numbers_close(1/m_nominal_values[1, 1],
-                          m_inv_uncert[1, 1].nominal_value), "Wrong value"
-
-
-    ####################
-
-    # Checks of the covariances between elements:
-    x = ufloat((10, 1))
-    m = unumpy.matrix([[x, x],
-                       [0, 3+2*x]])
-
-    m_inverse = m.I
-
-    # Check of the properties of the inverse:
-    m_double_inverse = m_inverse.I
-    # The initial matrix should be recovered, including its
-    # derivatives, which define covariances:
-    assert _numbers_close(m_double_inverse[0, 0].nominal_value,
-                          m[0, 0].nominal_value)
-    assert _numbers_close(m_double_inverse[0, 0].std_dev(),
-                          m[0, 0].std_dev())
-
-    assert matrices_close(m_double_inverse, m)
-
-    # Partial test:
-    assert _derivatives_close(m_double_inverse[0, 0], m[0, 0])
-    assert _derivatives_close(m_double_inverse[1, 1], m[1, 1])
-
-    ####################
-
-    # Tests of covariances during the inversion:
-
-    # There are correlations if both the next two derivatives are
-    # not zero:
-    assert m_inverse[0, 0].derivatives[x]
-    assert m_inverse[0, 1].derivatives[x]
-
-    # Correlations between m and m_inverse should create a perfect
-    # inversion:
-    assert matrices_close(m * m_inverse,  numpy.eye(m.shape[0]))
-
-def test_pseudo_inverse():
-    "Tests of the pseudo-inverse"
-
-    # Numerical version of the pseudo-inverse:
-    pinv_num = core.wrap_array_func(numpy.linalg.pinv)
-
-    ##########
-    # Full rank rectangular matrix:
-    m = unumpy.matrix([[ufloat((10, 1)), -3.1],
-                       [0, ufloat((3, 0))],
-                       [1, -3.1]])
-
-    # Numerical and package (analytical) pseudo-inverses: they must be
-    # the same:
-    rcond = 1e-8  # Test of the second argument to pinv()
-    m_pinv_num = pinv_num(m, rcond)
-    m_pinv_package = core._pinv(m, rcond)
-    assert matrices_close(m_pinv_num, m_pinv_package)
-
-    ##########
-    # Example with a non-full rank rectangular matrix:
-    vector = [ufloat((10, 1)), -3.1, 11]
-    m = unumpy.matrix([vector, vector])
-    m_pinv_num = pinv_num(m, rcond)
-    m_pinv_package = core._pinv(m, rcond)
-    assert matrices_close(m_pinv_num, m_pinv_package)
-
-    ##########
-    # Example with a non-full-rank square matrix:
-    m = unumpy.matrix([[ufloat((10, 1)), 0], [3, 0]])
-    m_pinv_num = pinv_num(m, rcond)
-    m_pinv_package = core._pinv(m, rcond)
-    assert matrices_close(m_pinv_num, m_pinv_package)
-
-def test_broadcast_funcs():
-    """
-    Test of mathematical functions that work with NumPy arrays of
-    numbers with uncertainties.
-    """
-
-    x = uncertainties.ufloat((0.2, 0.1))
-    arr = numpy.array([x, 2*x])
-    assert unumpy.cos(arr)[1] == uncertainties.umath.cos(arr[1])
-
-    # Some functions do not bear the same name in the math module and
-    # in NumPy (acos instead of arccos, etc.):
-    assert unumpy.arccos(arr)[1] == uncertainties.umath.acos(arr[1])
-    # The acos() function should not exist in unumpy because it does
-    # not exist in numpy:
-    assert not hasattr(numpy, 'acos')
-    assert not hasattr(unumpy, 'acos')
-
-    # Test of the __all__ variable:
-    assert 'acos' not in unumpy.__all__
-
-def test_array_and_matrix_creation():
-    "Test of custom array creation"
-
-    arr = unumpy.uarray(([1, 2], [0.1, 0.2]))
-
-    assert arr[1].nominal_value == 2
-    assert arr[1].std_dev() == 0.2
-
-    # Same thing for matrices:
-    mat = unumpy.umatrix(([1, 2], [0.1, 0.2]))
-    assert mat[0, 1].nominal_value == 2
-    assert mat[0, 1].std_dev() == 0.2
-
-def test_component_extraction():
-    "Extracting the nominal values and standard deviations from an array"
-
-    arr = unumpy.uarray(([1, 2], [0.1, 0.2]))
-
-    assert numpy.all(unumpy.nominal_values(arr) == [1, 2])
-    assert numpy.all(unumpy.std_devs(arr) == [0.1, 0.2])
-
-    # unumpy matrices, in addition, should have nominal_values that
-    # are simply numpy matrices (not unumpy ones, because they have no
-    # uncertainties):
-    mat = unumpy.matrix(arr)
-    assert numpy.all(unumpy.nominal_values(mat) == [1, 2])
-    assert numpy.all(unumpy.std_devs(mat) == [0.1, 0.2])
-    assert type(unumpy.nominal_values(mat)) == numpy.matrix
-
-
-def test_array_comparisons():
-    "Test of array and matrix comparisons"
-
-    arr = unumpy.uarray(([1, 2], [1, 4]))
-    assert numpy.all((arr == [arr[0], 4]) == [True, False])
-
-    # For matrices, 1D arrays are converted to 2D arrays:
-    mat = unumpy.umatrix(([1, 2], [1, 4]))
-    assert numpy.all((mat == [mat[0, 0], 4]) == [True, False])
-
diff --git a/lmfit/uncertainties/unumpy/ulinalg.py b/lmfit/uncertainties/unumpy/ulinalg.py
deleted file mode 100644
index c284aed..0000000
--- a/lmfit/uncertainties/unumpy/ulinalg.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-This module provides uncertainty-aware functions that generalize some
-of the functions from numpy.linalg.
-
-(c) 2010-2013 by Eric O. LEBIGOT (EOL) <eric.lebigot at normalesup.org>.
-"""
-
-from uncertainties import __author__
-from uncertainties.unumpy import core
-
-# This module cannot import unumpy because unumpy imports this module.
-
-__all__ = ['inv', 'pinv']
-
-inv = core._inv
-pinv = core._pinv
-
diff --git a/lmfit/utilfuncs.py b/lmfit/utilfuncs.py
deleted file mode 100644
index 4bdc8b6..0000000
--- a/lmfit/utilfuncs.py
+++ /dev/null
@@ -1,86 +0,0 @@
-"""Utility mathematical functions and common lineshapes for minimizer
-"""
-import numpy as np
-from numpy.testing import assert_allclose
-from scipy.special import gamma
-
-log2 = np.log(2)
-pi = np.pi
-
-
-def gaussian(x, height, center, sigma):
-    "x -> height * exp(-(x - center)**2 / (const*sigma**2))"
-    const = 2  # for future generalization to N dimensions
-    return height * np.exp(-(x - center)**2 / (const*sigma**2))
-
-
-def normalized_gaussian(x, center, sigma):
-    "x -> 1/(sigma*sqrt(2*pi)) * exp(-(x - center)**2 / (const*sigma**2))"
-    const = 2  # for future generalization to N dimensions
-    normalization = 1/(sigma*np.sqrt(const*pi))
-    return normalization * np.exp(-(x - center)**2 / (const*sigma**2))
-
-
-def exponential(x, amplitude, decay):
-    "x -> amplitude * exp(-x/decay)"
-    return amplitude * np.exp(-x/decay)
-
-
-def powerlaw(x, coefficient, exponent):
-    "x -> coefficient * x**exponent"
-    return coefficient * x**exponent
-
-
-def linear(x, slope, intercept):
-    "x -> slope * x + intercept"
-    return slope * x + intercept
-
-
-def parabolic(x, a, b, c):
-    "x -> a * x**2 + b * x + c"
-    return a * x**2 + b * x + c
-
-
-def loren(x, amp, cen, wid):
-    "lorentzian function: wid = half-width at half-max"
-    return (amp / (1 + ((x-cen)/wid)**2))
-
-
-def loren_area(x, amp, cen, wid):
-    "scaled lorenztian function: wid = half-width at half-max"
-    return loren(x, amp, cen, wid) / (pi*wid)
-
-
-def pvoigt(x, amp, cen, wid, frac):
-    """pseudo-voigt function:
-    (1-frac)*gauss(amp, cen, wid) + frac*loren(amp, cen, wid)"""
-    return amp * (gauss(x, (1-frac), cen, wid) +
-                  loren(x, frac, cen, wid))
-
-
-def pvoigt_area(x, amp, cen, wid, frac):
-    """scaled pseudo-voigt function:
-    (1-frac)*gauss_area(amp, cen, wid) + frac*loren_are(amp, cen, wid)"""
-
-    return amp * (gauss_area(x, (1-frac), cen, wid) +
-                  loren_area(x, frac,     cen, wid))
-
-
-def pearson7(x, amp, cen, wid, expon):
-    """pearson peak function """
-    xp = 1.0 * expon
-    return amp / (1 + (((x-cen)/wid)**2) * (2**(1/xp) - 1))**xp
-
-
-def pearson7_area(x, amp, cen, wid, expon):
-    """scaled pearson peak function """
-    xp = 1.0 * expon
-    scale = gamma(xp) * np.sqrt((2**(1/xp) - 1)) / (gamma(xp-0.5))
-    return scale * pearson7(x, amp, cen, wid, xp) / (wid * np.sqrt(pi))
-
-
-def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
-                         err_msg='', verbose=True):
-    for param_name, value in desired.items():
-        assert_allclose(actual[param_name], value, rtol, atol,
-                        err_msg, verbose)
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..11580ee
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,3 @@
+scipy>=0.13
+numpy>=1.5
+
diff --git a/setup.py b/setup.py
index 7a13b06..eeee2df 100644
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@ questionable. """
 
 setup(name = 'lmfit',
       version = lmfit.__version__,
-      author = 'LM-Fit Development Team',
+      author = 'LMFit Development Team',
       author_email = 'matt.newville at gmail.com',
       url          = 'http://lmfit.github.io/lmfit-py/',
       download_url = 'http://lmfit.github.io//lmfit-py/',
diff --git a/examples/NISTModels.py b/tests/NISTModels.py
similarity index 96%
rename from examples/NISTModels.py
rename to tests/NISTModels.py
index 03f6850..0e07a2d 100644
--- a/examples/NISTModels.py
+++ b/tests/NISTModels.py
@@ -1,7 +1,9 @@
 import os
+import sys
 from numpy import exp, log, log10, sin, cos, arctan, array
 from lmfit import Parameters
-NIST_DIR = 'NIST_STRD'
+thisdir, thisfile = os.path.split(__file__)
+NIST_DIR = os.path.join(thisdir, 'NIST_STRD')
 
 def read_params(params):
     if isinstance(params, Parameters):
@@ -192,5 +194,5 @@ def ReadNistData(dataset):
     out = {'y': y, 'x': x, 'nparams': nparams, 'ndata': ndata,
            'nfree': nfree, 'start1': start1, 'start2': start2,
            'sum_squares': sum_squares, 'std_dev': std_dev,
-           'cert_values': certval,  'cert_stderr': certerr }
+           'cert': certval,  'cert_values': certval,  'cert_stderr': certerr }
     return out
diff --git a/examples/NIST_STRD/Bennett5.dat b/tests/NIST_STRD/Bennett5.dat
similarity index 100%
rename from examples/NIST_STRD/Bennett5.dat
rename to tests/NIST_STRD/Bennett5.dat
diff --git a/examples/NIST_STRD/BoxBOD.dat b/tests/NIST_STRD/BoxBOD.dat
similarity index 100%
rename from examples/NIST_STRD/BoxBOD.dat
rename to tests/NIST_STRD/BoxBOD.dat
diff --git a/examples/NIST_STRD/Chwirut1.dat b/tests/NIST_STRD/Chwirut1.dat
similarity index 100%
rename from examples/NIST_STRD/Chwirut1.dat
rename to tests/NIST_STRD/Chwirut1.dat
diff --git a/examples/NIST_STRD/Chwirut2.dat b/tests/NIST_STRD/Chwirut2.dat
similarity index 100%
rename from examples/NIST_STRD/Chwirut2.dat
rename to tests/NIST_STRD/Chwirut2.dat
diff --git a/examples/NIST_STRD/DanWood.dat b/tests/NIST_STRD/DanWood.dat
similarity index 100%
rename from examples/NIST_STRD/DanWood.dat
rename to tests/NIST_STRD/DanWood.dat
diff --git a/examples/NIST_STRD/ENSO.dat b/tests/NIST_STRD/ENSO.dat
similarity index 100%
rename from examples/NIST_STRD/ENSO.dat
rename to tests/NIST_STRD/ENSO.dat
diff --git a/examples/NIST_STRD/Eckerle4.dat b/tests/NIST_STRD/Eckerle4.dat
similarity index 100%
rename from examples/NIST_STRD/Eckerle4.dat
rename to tests/NIST_STRD/Eckerle4.dat
diff --git a/examples/NIST_STRD/Gauss1.dat b/tests/NIST_STRD/Gauss1.dat
similarity index 100%
rename from examples/NIST_STRD/Gauss1.dat
rename to tests/NIST_STRD/Gauss1.dat
diff --git a/examples/NIST_STRD/Gauss2.dat b/tests/NIST_STRD/Gauss2.dat
similarity index 100%
rename from examples/NIST_STRD/Gauss2.dat
rename to tests/NIST_STRD/Gauss2.dat
diff --git a/examples/NIST_STRD/Gauss3.dat b/tests/NIST_STRD/Gauss3.dat
similarity index 100%
rename from examples/NIST_STRD/Gauss3.dat
rename to tests/NIST_STRD/Gauss3.dat
diff --git a/examples/NIST_STRD/Hahn1.dat b/tests/NIST_STRD/Hahn1.dat
similarity index 100%
rename from examples/NIST_STRD/Hahn1.dat
rename to tests/NIST_STRD/Hahn1.dat
diff --git a/examples/NIST_STRD/Kirby2.dat b/tests/NIST_STRD/Kirby2.dat
similarity index 100%
rename from examples/NIST_STRD/Kirby2.dat
rename to tests/NIST_STRD/Kirby2.dat
diff --git a/examples/NIST_STRD/Lanczos1.dat b/tests/NIST_STRD/Lanczos1.dat
similarity index 100%
rename from examples/NIST_STRD/Lanczos1.dat
rename to tests/NIST_STRD/Lanczos1.dat
diff --git a/examples/NIST_STRD/Lanczos2.dat b/tests/NIST_STRD/Lanczos2.dat
similarity index 100%
rename from examples/NIST_STRD/Lanczos2.dat
rename to tests/NIST_STRD/Lanczos2.dat
diff --git a/examples/NIST_STRD/Lanczos3.dat b/tests/NIST_STRD/Lanczos3.dat
similarity index 100%
rename from examples/NIST_STRD/Lanczos3.dat
rename to tests/NIST_STRD/Lanczos3.dat
diff --git a/examples/NIST_STRD/MGH09.dat b/tests/NIST_STRD/MGH09.dat
similarity index 100%
rename from examples/NIST_STRD/MGH09.dat
rename to tests/NIST_STRD/MGH09.dat
diff --git a/examples/NIST_STRD/MGH10.dat b/tests/NIST_STRD/MGH10.dat
similarity index 100%
rename from examples/NIST_STRD/MGH10.dat
rename to tests/NIST_STRD/MGH10.dat
diff --git a/examples/NIST_STRD/MGH17.dat b/tests/NIST_STRD/MGH17.dat
similarity index 100%
rename from examples/NIST_STRD/MGH17.dat
rename to tests/NIST_STRD/MGH17.dat
diff --git a/examples/NIST_STRD/Misra1a.dat b/tests/NIST_STRD/Misra1a.dat
similarity index 100%
rename from examples/NIST_STRD/Misra1a.dat
rename to tests/NIST_STRD/Misra1a.dat
diff --git a/examples/NIST_STRD/Misra1b.dat b/tests/NIST_STRD/Misra1b.dat
similarity index 100%
rename from examples/NIST_STRD/Misra1b.dat
rename to tests/NIST_STRD/Misra1b.dat
diff --git a/examples/NIST_STRD/Misra1c.dat b/tests/NIST_STRD/Misra1c.dat
similarity index 100%
rename from examples/NIST_STRD/Misra1c.dat
rename to tests/NIST_STRD/Misra1c.dat
diff --git a/examples/NIST_STRD/Misra1d.dat b/tests/NIST_STRD/Misra1d.dat
similarity index 100%
rename from examples/NIST_STRD/Misra1d.dat
rename to tests/NIST_STRD/Misra1d.dat
diff --git a/examples/NIST_STRD/Models b/tests/NIST_STRD/Models
similarity index 100%
rename from examples/NIST_STRD/Models
rename to tests/NIST_STRD/Models
diff --git a/examples/NIST_STRD/Nelson.dat b/tests/NIST_STRD/Nelson.dat
similarity index 100%
rename from examples/NIST_STRD/Nelson.dat
rename to tests/NIST_STRD/Nelson.dat
diff --git a/examples/NIST_STRD/Rat42.dat b/tests/NIST_STRD/Rat42.dat
similarity index 100%
rename from examples/NIST_STRD/Rat42.dat
rename to tests/NIST_STRD/Rat42.dat
diff --git a/examples/NIST_STRD/Rat43.dat b/tests/NIST_STRD/Rat43.dat
similarity index 100%
rename from examples/NIST_STRD/Rat43.dat
rename to tests/NIST_STRD/Rat43.dat
diff --git a/examples/NIST_STRD/Roszman1.dat b/tests/NIST_STRD/Roszman1.dat
similarity index 100%
rename from examples/NIST_STRD/Roszman1.dat
rename to tests/NIST_STRD/Roszman1.dat
diff --git a/examples/NIST_STRD/Thurber.dat b/tests/NIST_STRD/Thurber.dat
similarity index 100%
rename from examples/NIST_STRD/Thurber.dat
rename to tests/NIST_STRD/Thurber.dat
diff --git a/tests/lmfit_testutils.py b/tests/lmfit_testutils.py
new file mode 100644
index 0000000..79e89d4
--- /dev/null
+++ b/tests/lmfit_testutils.py
@@ -0,0 +1,18 @@
+from lmfit import Parameter
+from numpy.testing import assert_allclose
+
+def assert_paramval(param, val, tol=1.e-3):
+    """assert that a named parameter's value is close to expected value"""
+
+    assert(isinstance(param, Parameter))
+    pval = param.value
+
+    assert_allclose([pval], [val], rtol=tol, atol=tol,
+                    err_msg='',verbose=True)
+
+def assert_paramattr(param, attr, val):
+    """assert that a named parameter's value is a value"""
+    assert(isinstance(param, Parameter))
+    assert(hasattr(param, attr))
+    assert(getattr(param, attr) == val)
+
diff --git a/tests/test_1variable.py b/tests/test_1variable.py
index ee8f1ac..3e3d530 100644
--- a/tests/test_1variable.py
+++ b/tests/test_1variable.py
@@ -2,48 +2,56 @@
 # From Nick Schurch
 
 import lmfit, numpy
+from numpy.testing import assert_allclose
 
 def linear_chisq(params, x, data, errs=None):
-    
+
     ''' Calcs chi-squared residuals linear model (weighted by errors if given)
     '''
-    
+
     if type(params) is not lmfit.parameter.Parameters:
         msg = "Params argument is not a lmfit parameter set"
         raise TypeError(msg)
-    
+
     if "m" not in params.keys():
         msg = "No slope parameter (m) defined in the model"
         raise KeyError(msg)
-    
+
     if "c" not in params.keys():
         msg = "No intercept parameter (c) defined in the model"
         raise KeyError(msg)
-    
+
     m = params["m"].value
     c = params["c"].value
-    
+
     model = m*x+c
-        
+
     residuals = (data-model)
     if errs is not None:
         residuals = residuals/errs
-    
-    return(residuals)
 
-rands = [-0.21698284, 0.41900591, 0.02349374, -0.218552, -0.3513699,
-		0.33418304, 0.04226855, 0.213303, 0.45948731, 0.33587736]
+    return(residuals)
 
-x = numpy.arange(10)+1
-y = numpy.arange(10)+1+rands
-y_errs = numpy.sqrt(y)/2
+def test_1var():
+    rands = [-0.21698284, 0.41900591, 0.02349374, -0.218552, -0.3513699,
+             0.33418304, 0.04226855, 0.213303, 0.45948731, 0.33587736]
 
+    x = numpy.arange(10)+1
+    y = numpy.arange(10)+1+rands
+    y_errs = numpy.sqrt(y)/2
 
-params = lmfit.Parameters()
-params.add(name="m", value=1.0, vary=True)
-params.add(name="c", value=0.0, vary=False)
+    params = lmfit.Parameters()
+    params.add(name="m", value=1.0, vary=True)
+    params.add(name="c", value=0.0, vary=False)
 
-lin_reg = lmfit.minimize(linear_chisq, params, args=(x, y))
+    out = lmfit.minimize(linear_chisq, params, args=(x, y))
 
-lmfit.report_fit(params)
+    lmfit.report_fit(out)
+    assert_allclose(params['m'].value, 1.025, rtol=0.02, atol=0.02)
+    assert(len(params)==2)
+    assert(out.nvarys == 1)
+    assert(out.chisqr > 0.01)
+    assert(out.chisqr < 5.00)
 
+if __name__ == '__main__':
+    test_1var()
diff --git a/tests/test_NIST_Strd.py b/tests/test_NIST_Strd.py
new file mode 100644
index 0000000..d3a1c37
--- /dev/null
+++ b/tests/test_NIST_Strd.py
@@ -0,0 +1,268 @@
+from __future__ import print_function
+import sys
+import math
+from optparse import OptionParser
+
+from lmfit import Parameters, minimize
+
+from NISTModels import Models, ReadNistData
+
+HASPYLAB = False
+for arg in sys.argv:
+    if 'nose' in arg:
+        HASPYLAB = False
+
+if HASPYLAB:
+    try:
+        import matplotlib
+        import pylab
+        HASPYLAB = True
+    except ImportError:
+        HASPYLAB = False
+
+def ndig(a, b):
+    "precision for NIST values"
+    return round(-math.log10((abs(abs(a)-abs(b)) +1.e-15)/ abs(b)))
+
+ABAR = ' |----------------+----------------+------------------+-------------------|'
+def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
+    buff = [' ======================================',
+ ' %s: ' % DataSet,
+ ' | Parameter Name |  Value Found   |  Certified Value | # Matching Digits |']
+    buff.append(ABAR)
+
+    val_dig_min = 200
+    err_dig_min = 200
+    fmt = ' | %s | % -.7e | % -.7e   | %2i                |'
+    for i in range(NISTdata['nparams']):
+        parname = 'b%i' % (i+1)
+        par = params[parname]
+        thisval = par.value
+        certval = NISTdata['cert_values'][i]
+        vdig    = ndig(thisval, certval)
+        pname   = (parname + ' value ' + ' '*14)[:14]
+        buff.append(fmt % (pname, thisval, certval, vdig))
+        val_dig_min = min(val_dig_min, vdig)
+
+        thiserr = par.stderr
+        certerr = NISTdata['cert_stderr'][i]
+        if thiserr is not None and myfit.errorbars:
+            edig   = ndig(thiserr, certerr)
+            ename = (parname + ' stderr' + ' '*14)[:14]
+            buff.append(fmt % (ename, thiserr, certerr, edig))
+            err_dig_min = min(err_dig_min, edig)
+
+    buff.append(ABAR)
+    sumsq = NISTdata['sum_squares']
+    try:
+        chi2 = myfit.chisqr
+        buff.append(' | Sum of Squares | %.7e  | %.7e    |  %2i               |'
+                    % (chi2, sumsq, ndig(chi2, sumsq)))
+    except:
+        pass
+    buff.append(ABAR)
+    if not myfit.errorbars:
+        buff.append(' |          * * * * COULD NOT ESTIMATE UNCERTAINTIES * * * *              |')
+        err_dig_min = 0
+    if err_dig_min < 199:
+        buff.append(' Worst agreement: %i digits for value, %i digits for error '
+                    % (val_dig_min, err_dig_min))
+    else:
+        buff.append(' Worst agreement: %i digits' % (val_dig_min))
+    return val_dig_min, '\n'.join(buff)
+
+def NIST_Dataset(DataSet, method='leastsq', start='start2',
+                 plot=True, verbose=False):
+
+    NISTdata = ReadNistData(DataSet)
+    resid, npar, dimx = Models[DataSet]
+    y = NISTdata['y']
+    x = NISTdata['x']
+
+    params = Parameters()
+    for i in range(npar):
+        pname = 'b%i' % (i+1)
+        cval  = NISTdata['cert_values'][i]
+        cerr  = NISTdata['cert_stderr'][i]
+        pval1 = NISTdata[start][i]
+        params.add(pname, value=pval1)
+
+    myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y})
+    digs, buff = Compare_NIST_Results(DataSet, myfit, params, NISTdata)
+    if verbose:
+        print(buff)
+    if plot and HASPYLAB:
+        fit = -resid(params, x, )
+        pylab.plot(x, y, 'ro')
+        pylab.plot(x, fit, 'k+-')
+        pylab.show()
+
+    return digs > 1
+
+def build_usage():
+    modelnames = []
+    ms = ''
+    for d in sorted(Models.keys()):
+        ms = ms + ' %s ' % d
+        if len(ms) > 55:
+            modelnames.append(ms)
+            ms = '    '
+    modelnames.append(ms)
+    modelnames = '\n'.join(modelnames)
+
+    usage = """
+ === Test Fit to NIST StRD Models ===
+
+usage:
+------
+    python fit_NIST.py [options] Model Start
+
+where Start is one of 'start1','start2' or 'cert', for different
+starting values, and Model is one of
+
+    %s
+
+if Model = 'all', all models and starting values will be run.
+
+options:
+--------
+  -m  name of fitting method.  One of:
+          leastsq, nelder, powell, lbfgsb, bfgs,
+          tnc, cobyla, slsqp, cg, newto-cg
+      leastsq (Levenberg-Marquardt) is the default
+""" % modelnames
+    return usage
+
+############################
+def run_interactive():
+    usage = build_usage()
+    parser = OptionParser(usage=usage, prog="fit-NIST.py")
+
+    parser.add_option("-m", "--method", dest="method",
+                      metavar='METH',
+                      default='leastsq',
+                      help="set method name, default = 'leastsq'")
+
+    (opts, args) = parser.parse_args()
+    dset = ''
+    start = 'start2'
+    if len(args) > 0:
+        dset = args[0]
+    if len(args) > 1:
+        start = args[1]
+
+    if dset.lower() == 'all':
+        tpass = 0
+        tfail = 0
+        failures = []
+        dsets = sorted(Models.keys())
+        for dset in dsets:
+            for start in ('start1', 'start2', 'cert'):
+                if NIST_Dataset(dset, method=opts.method, start=start,
+                                plot=False, verbose=True):
+                    tpass += 1
+                else:
+                    tfail += 1
+                    failures.append("   %s (starting at '%s')" % (dset, start))
+        print('--------------------------------------')
+        print(' Fit Method: %s ' %  opts.method)
+        print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
+        print(' Tests Failed for:\n %s' % '\n '.join(failures))
+        print('--------------------------------------')
+    elif dset not in Models:
+        print(usage)
+    else:
+        return NIST_Dataset(dset, method=opts.method,
+                            start=start, plot=True, verbose=True)
+
+def RunNIST_Model(model):
+    out1 = NIST_Dataset(model, start='start1', plot=False, verbose=False)
+    out2 = NIST_Dataset(model, start='start2', plot=False, verbose=False)
+    print("NIST Test" , model, out1, out2)
+    assert(out1 or out2)
+    return out1 or out2
+
+def test_Bennett5():
+    return RunNIST_Model('Bennett5')
+
+def test_BoxBOD():
+    return RunNIST_Model('BoxBOD')
+
+def test_Chwirut1():
+    return RunNIST_Model('Chwirut1')
+
+def test_Chwirut2():
+    return RunNIST_Model('Chwirut2')
+
+def test_DanWood():
+    return RunNIST_Model('DanWood')
+
+def test_ENSO():
+    return RunNIST_Model('ENSO')
+
+def test_Eckerle4():
+    return RunNIST_Model('Eckerle4')
+
+def test_Gauss1():
+    return RunNIST_Model('Gauss1')
+
+def test_Gauss2():
+    return RunNIST_Model('Gauss2')
+
+def test_Gauss3():
+    return RunNIST_Model('Gauss3')
+
+def test_Hahn1():
+    return RunNIST_Model('Hahn1')
+
+def test_Kirby2():
+    return RunNIST_Model('Kirby2')
+
+def test_Lanczos1():
+    return RunNIST_Model('Lanczos1')
+
+def test_Lanczos2():
+    return RunNIST_Model('Lanczos2')
+
+def test_Lanczos3():
+    return RunNIST_Model('Lanczos3')
+
+def test_MGH09():
+    return RunNIST_Model('MGH09')
+
+def test_MGH10():
+    return RunNIST_Model('MGH10')
+
+def test_MGH17():
+    return RunNIST_Model('MGH17')
+
+def test_Misra1a():
+    return RunNIST_Model('Misra1a')
+
+def test_Misra1b():
+    return RunNIST_Model('Misra1b')
+
+def test_Misra1c():
+    return RunNIST_Model('Misra1c')
+
+def test_Misra1d():
+    return RunNIST_Model('Misra1d')
+
+def test_Nelson():
+    return RunNIST_Model('Nelson')
+
+def test_Rat42():
+    return RunNIST_Model('Rat42')
+
+def test_Rat43():
+    return RunNIST_Model('Rat43')
+
+def test_Roszman1():
+    return RunNIST_Model('Roszman1')
+
+def test_Thurber():
+    return RunNIST_Model('Thurber')
+
+if __name__ == '__main__':
+    run_interactive()
+
diff --git a/tests/test_algebraic_constraint.py b/tests/test_algebraic_constraint.py
index 3bf55e4..8d26942 100644
--- a/tests/test_algebraic_constraint.py
+++ b/tests/test_algebraic_constraint.py
@@ -1,13 +1,13 @@
 from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
 from lmfit import Parameters, Parameter, Minimizer
-from lmfit.utilfuncs import gaussian, loren, pvoigt
+from lmfit.lineshapes import gaussian, lorentzian, pvoigt
 from lmfit.printfuncs import report_fit
 
 def test_constraints1():
     def residual(pars, x, sigma=None, data=None):
         yg = gaussian(x, pars['amp_g'].value,
                       pars['cen_g'].value, pars['wid_g'].value)
-        yl = loren(x, pars['amp_l'].value,
+        yl = lorentzian(x, pars['amp_l'].value,
                    pars['cen_l'].value, pars['wid_l'].value)
 
         slope = pars['line_slope'].value
@@ -26,7 +26,7 @@ def test_constraints1():
     x = linspace(xmin, xmax, n)
 
     data = (gaussian(x, 21, 8.1, 1.2) +
-            loren(x, 10, 9.6, 2.4) +
+            lorentzian(x, 10, 9.6, 2.4) +
             random.normal(scale=0.23,  size=n) +
             x*0.5)
 
@@ -71,7 +71,7 @@ def test_constraints2():
     def residual(pars, x, sigma=None, data=None):
         yg = gaussian(x, pars['amp_g'].value,
                       pars['cen_g'].value, pars['wid_g'].value)
-        yl = loren(x, pars['amp_l'].value,
+        yl = lorentzian(x, pars['amp_l'].value,
                    pars['cen_l'].value, pars['wid_l'].value)
 
         slope = pars['line_slope'].value
@@ -90,7 +90,7 @@ def test_constraints2():
     x = linspace(xmin, xmax, n)
 
     data = (gaussian(x, 21, 8.1, 1.2) +
-            loren(x, 10, 9.6, 2.4) +
+            lorentzian(x, 10, 9.6, 2.4) +
             random.normal(scale=0.23,  size=n) +
             x*0.5)
 
@@ -103,7 +103,7 @@ def test_constraints2():
             Parameter(name='cen_l',  expr='1.5+cen_g'),
             Parameter(name='line_slope', value=0.0),
             Parameter(name='line_off', value=0.0)]
-    
+
     sigma = 0.021  # estimate of data error (for all data points)
 
     myfit = Minimizer(residual, pfit,
diff --git a/tests/test_algebraic_constraint2.py b/tests/test_algebraic_constraint2.py
index c4179e5..7f5e24f 100644
--- a/tests/test_algebraic_constraint2.py
+++ b/tests/test_algebraic_constraint2.py
@@ -1,31 +1,31 @@
 from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
 from lmfit import Parameters, Parameter, Minimizer
-from lmfit.utilfuncs import gaussian, loren, pvoigt
+from lmfit.lineshapes import gaussian, lorentzian, pvoigt
 from lmfit.printfuncs import report_fit
 import sys
 
 
-HASPYLAB = False
 # Turn off plotting if run by nosetests.
-if not sys.argv[0].endswith('nosetests'):
+WITHPLOT = True
+for arg in sys.argv:
+    if 'nose' in arg:
+        WITHPLOT = False
+
+if WITHPLOT:
     try:
         import matplotlib
         import pylab
-        HASPYLAB = True
     except ImportError:
-        pass
+        WITHPLOT = False
 
-# Turn off plotting if run by nosetests.
-if sys.argv[0].endswith('nosetests'):
-    HASPYLAB = False 
 
 def test_constraints(with_plot=True):
-    with_plot = with_plot and HASPYLAB
+    with_plot = with_plot and WITHPLOT
 
     def residual(pars, x, sigma=None, data=None):
         yg = gaussian(x, pars['amp_g'].value,
                    pars['cen_g'].value, pars['wid_g'].value)
-        yl = loren(x, pars['amp_l'].value,
+        yl = lorentzian(x, pars['amp_l'].value,
                    pars['cen_l'].value, pars['wid_l'].value)
 
         slope = pars['line_slope'].value
@@ -35,7 +35,7 @@ def test_constraints(with_plot=True):
             return model
         if sigma is None:
             return (model - data)
-        return (model - data)/sigma
+        return (model - data) / sigma
 
 
     n = 201
@@ -44,7 +44,7 @@ def test_constraints(with_plot=True):
     x = linspace(xmin, xmax, n)
 
     data = (gaussian(x, 21, 8.1, 1.2) +
-            loren(x, 10, 9.6, 2.4) +
+            lorentzian(x, 10, 9.6, 2.4) +
             random.normal(scale=0.23,  size=n) +
             x*0.5)
 
diff --git a/tests/test_basicfit.py b/tests/test_basicfit.py
new file mode 100644
index 0000000..c579652
--- /dev/null
+++ b/tests/test_basicfit.py
@@ -0,0 +1,45 @@
+import numpy as np
+from lmfit import minimize, Parameters, Parameter, report_fit
+from lmfit_testutils import assert_paramval, assert_paramattr
+
+def test_basic():
+    # create data to be fitted
+    x = np.linspace(0, 15, 301)
+    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+            np.random.normal(size=len(x), scale=0.2) )
+
+    # define objective function: returns the array to be minimized
+    def fcn2min(params, x, data):
+        """ model decaying sine wave, subtract data"""
+        amp = params['amp'].value
+        shift = params['shift'].value
+        omega = params['omega'].value
+        decay = params['decay'].value
+
+        model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+        return model - data
+
+    # create a set of Parameters
+    params = Parameters()
+    params.add('amp',   value= 10,  min=0)
+    params.add('decay', value= 0.1)
+    params.add('shift', value= 0.0, min=-np.pi/2., max=np.pi/2)
+    params.add('omega', value= 3.0)
+
+    # do fit, here with leastsq model
+    result = minimize(fcn2min, params, args=(x, data))
+
+    # calculate final result
+    final = data + result.residual
+
+    # report_fit(result)
+
+    assert(result.nfev >   5)
+    assert(result.nfev < 500)
+    assert(result.chisqr > 1)
+    assert(result.nvarys == 4)
+    assert_paramval(params['amp'],   5.03, tol=0.05)
+    assert_paramval(params['omega'], 2.0, tol=0.05)
+
+if __name__ == '__main__':
+    test_basic()
diff --git a/tests/test_bounds.py b/tests/test_bounds.py
new file mode 100644
index 0000000..0ae9417
--- /dev/null
+++ b/tests/test_bounds.py
@@ -0,0 +1,54 @@
+from lmfit import Parameters, minimize, fit_report
+from lmfit_testutils import assert_paramval, assert_paramattr
+
+from numpy import linspace, zeros, sin, exp, random, pi, sign
+
+def test_bounds():
+    p_true = Parameters()
+    p_true.add('amp', value=14.0)
+    p_true.add('period', value=5.4321)
+    p_true.add('shift', value=0.12345)
+    p_true.add('decay', value=0.01000)
+
+    def residual(pars, x, data=None):
+        amp = pars['amp'].value
+        per = pars['period'].value
+        shift = pars['shift'].value
+        decay = pars['decay'].value
+
+        if abs(shift) > pi/2:
+            shift = shift - sign(shift)*pi
+
+        model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
+        if data is None:
+            return model
+        return (model - data)
+
+    n = 1500
+    xmin = 0.
+    xmax = 250.0
+    random.seed(0)
+    noise = random.normal(scale=2.80, size=n)
+    x     = linspace(xmin, xmax, n)
+    data  = residual(p_true, x) + noise
+
+    fit_params = Parameters()
+    fit_params.add('amp', value=13.0, max=20, min=0.0)
+    fit_params.add('period', value=2, max=10)
+    fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.)
+    fit_params.add('decay', value=0.02, max=0.10, min=0.00)
+
+    out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+
+    fit = residual(fit_params, x)
+
+    assert(out.nfev  > 10)
+    assert(out.nfree > 50)
+    assert(out.chisqr > 1.0)
+
+    print(fit_report(out, show_correl=True, modelpars=p_true))
+    assert_paramval(fit_params['decay'], 0.01, tol=1.e-2)
+    assert_paramval(fit_params['shift'], 0.123, tol=1.e-2)
+
+if __name__ == '__main__':
+    test_bounds()
diff --git a/tests/test_model.py b/tests/test_model.py
index 29a3b1b..477cdd3 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -1,13 +1,15 @@
 import unittest
 import warnings
 from numpy.testing import assert_allclose
-from lmfit.utilfuncs import assert_results_close
 import numpy as np
 
-from lmfit import Model, Parameter
-from lmfit import specified_models
-from lmfit.utilfuncs import gaussian, normalized_gaussian
+from lmfit import Model, Parameter, models
+from lmfit.lineshapes import gaussian
 
+def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
+                         err_msg='', verbose=True):
+    for param_name, value in desired.items():
+         assert_allclose(actual[param_name], value, rtol, atol, err_msg, verbose)
 
 class TestUserDefiniedModel(unittest.TestCase):
     # mainly aimed at checking that the API does what it says it does
@@ -17,10 +19,10 @@ class TestUserDefiniedModel(unittest.TestCase):
         self.x = np.linspace(-10, 10, num=1000)
         np.random.seed(1)
         self.noise = 0.01*np.random.randn(*self.x.shape)
-        self.true_values = lambda: dict(height=7, center=1, sigma=3)
-        self.guess = lambda: dict(height=5, center=2, sigma=4)
+        self.true_values = lambda: dict(amplitude=7.1, center=1.1, sigma=2.40)
+        self.guess = lambda: dict(amplitude=5, center=2, sigma=4)
         # return a fresh copy
-        self.model = Model(gaussian, ['x'])
+        self.model = Model(gaussian)
         self.data = gaussian(x=self.x, **self.true_values()) + self.noise
 
     def test_fit_with_keyword_params(self):
@@ -28,7 +30,7 @@ class TestUserDefiniedModel(unittest.TestCase):
         assert_results_close(result.values, self.true_values())
 
     def test_fit_with_parameters_obj(self):
-        params = self.model.params()
+        params = self.model.params
         for param_name, value in self.guess().items():
             params[param_name].value = value
         result = self.model.fit(self.data, params, x=self.x)
@@ -39,11 +41,11 @@ class TestUserDefiniedModel(unittest.TestCase):
         # using keyword argument parameters
         guess_missing_sigma = self.guess()
         del guess_missing_sigma['sigma']
-        f = lambda: self.model.fit(self.data, x=self.x, **guess_missing_sigma)
-        self.assertRaises(ValueError, f)
+        #f = lambda: self.model.fit(self.data, x=self.x, **guess_missing_sigma)
+        #self.assertRaises(ValueError, f)
 
         # using Parameters
-        params = self.model.params()
+        params = self.model.params
         for param_name, value in guess_missing_sigma.items():
             params[param_name].value = value
         f = lambda: self.model.fit(self.data, params, x=self.x)
@@ -53,10 +55,10 @@ class TestUserDefiniedModel(unittest.TestCase):
         guess = self.guess()
         guess['extra'] = 5
 
-        def flexible_func(x, height, center, sigma, **kwargs):
-            return gaussian(x, height, center, sigma)
-        
-        flexible_model = Model(flexible_func, ['x'])
+        def flexible_func(x, amplitude, center, sigma, **kwargs):
+            return gaussian(x, amplitude, center, sigma)
+
+        flexible_model = Model(flexible_func)
         with warnings.catch_warnings(record=True) as w:
             warnings.simplefilter("always")
             flexible_model.fit(self.data, x=self.x, **guess)
@@ -84,14 +86,13 @@ class TestUserDefiniedModel(unittest.TestCase):
         assert_results_close(result.values, true_values, rtol=0.05)
 
     def test_result_attributes(self):
-
         # result.init_values
         result = self.model.fit(self.data, x=self.x, **self.guess())
         assert_results_close(result.values, self.true_values())
         self.assertTrue(result.init_values == self.guess())
 
         # result.init_params
-        params = self.model.params()
+        params = self.model.params
         for param_name, value in self.guess().items():
             params[param_name].value = value
         self.assertTrue(result.init_params == params)
@@ -109,38 +110,36 @@ class TestUserDefiniedModel(unittest.TestCase):
     # testing model addition...
 
     def test_user_defined_gaussian_plus_constant(self):
-        data = self.data + 5
-        model = self.model + specified_models.Constant()
+        data = self.data + 5.0
+        model = self.model + models.ConstantModel()
         guess = self.guess()
-        guess['c'] = 10
+        guess['c'] = 10.1
         true_values = self.true_values()
-        true_values['c'] = 5
+        true_values['c'] = 5.0
+
         result = model.fit(data, x=self.x, **guess)
-        assert_results_close(result.values, true_values)
+        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
 
     def test_sum_of_two_gaussians(self):
-
         # two user-defined gaussians
         model1 = self.model
-        f2 = lambda x, height_, center_, sigma_: gaussian(
-            x, height_, center_, sigma_)
-        model2 = Model(f2, ['x'])
+        f2 = lambda x, amp, cen, sig: gaussian(x, amplitude=amp, center=cen, sigma=sig)
+        model2 = Model(f2)
         values1 = self.true_values()
-        values2 = self.true_values()
-        values2['sigma'] = 1.5
-        values2['height'] = 4
-        data = gaussian(x=self.x, **values1)
-        data += gaussian(x=self.x, **values2)
+        values2 = {'cen': 2.45, 'sig':0.8, 'amp':3.15}
+
+        data  = gaussian(x=self.x, **values1) + f2(x=self.x, **values2) + self.noise/3.0
         model = self.model + model2
-        values2 = {k + '_': v for k, v in values2.items()}
-        guess = {'sigma': Parameter(value=2, min=0), 'center': 1,
-                 'height': 1,
-                 'sigma_': Parameter(value=1, min=0), 'center_': 1,
-                 'height_': 1}
+        guess = {'sigma': Parameter(value=2, min=0),
+                 'center': Parameter(value=1, min=0.2, max=1.8),
+                 'amplitude': Parameter(value=3, min=0),
+                 'sig':  Parameter(value=1, min=0),
+                 'cen': Parameter(value=2.4, min=2, max=3.5),
+                 'amp': Parameter(value=1, min=0)}
 
         true_values = dict(list(values1.items()) + list(values2.items()))
         result = model.fit(data, x=self.x, **guess)
-        assert_results_close(result.values, true_values)
+        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
 
         # user-defined models with common parameter names
         # cannot be added, and should raise
@@ -148,23 +147,24 @@ class TestUserDefiniedModel(unittest.TestCase):
         self.assertRaises(NameError, f)
 
         # two predefined_gaussians, using suffix to differentiate
-        model1 = specified_models.Gaussian(['x'])
-        model2 = specified_models.Gaussian(['x'], suffix='_')
+        model1 = models.GaussianModel(prefix='g1_')
+        model2 = models.GaussianModel(prefix='g2_')
         model = model1 + model2
-        true_values = {'center': values1['center'],
-                       'height': values1['height'],
-                       'sigma': values1['sigma'],
-                       'center_': values2['center_'],
-                       'height_': values2['height_'],
-                       'sigma_': values2['sigma_']}
-        guess = {'sigma': 2, 'center': 1, 'height': 1,
-                 'sigma_': 1, 'center_': 1, 'height_': 1}
+        true_values = {'g1_center': values1['center'],
+                       'g1_amplitude': values1['amplitude'],
+                       'g1_sigma': values1['sigma'],
+                       'g2_center': values2['cen'],
+                       'g2_amplitude': values2['amp'],
+                       'g2_sigma': values2['sig']}
+        guess = {'g1_sigma': 2, 'g1_center': 1, 'g1_amplitude': 3,
+                 'g2_sigma': 1, 'g2_center': 2.4, 'g2_amplitude': 1}
+
         result = model.fit(data, x=self.x, **guess)
-        assert_results_close(result.values, true_values)
+        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
 
         # without suffix, the names collide and Model should raise
-        model1 = specified_models.Gaussian(['x'])
-        model2 = specified_models.Gaussian(['x'])
+        model1 = models.GaussianModel()
+        model2 = models.GaussianModel()
         f = lambda: model1 + model2
         self.assertRaises(NameError, f)
 
@@ -179,7 +179,7 @@ class CommonTests(object):
         try:
             args = self.args
         except AttributeError:
-            self.model_instance = self.model(['x'])
+            self.model_instance = self.model()
             func = self.model_instance.func
 
         else:
@@ -192,22 +192,12 @@ class CommonTests(object):
         result = model.fit(self.data, x=self.x, **self.guess())
         assert_results_close(result.values, self.true_values())
 
-
-class TestNormalizedGaussian(CommonTests, unittest.TestCase):
-
-    def setUp(self):
-        self.true_values = lambda: dict(center=0, sigma=1.5)
-        self.guess = lambda: dict(center=1, sigma=2)
-        self.model = specified_models.NormalizedGaussian
-        super(TestNormalizedGaussian, self).setUp()
-
-
 class TestLinear(CommonTests, unittest.TestCase):
 
     def setUp(self):
         self.true_values = lambda: dict(slope=5, intercept=2)
         self.guess = lambda: dict(slope=10, intercept=6)
-        self.model = specified_models.Linear
+        self.model = models.LinearModel
         super(TestLinear, self).setUp()
 
 
@@ -216,7 +206,7 @@ class TestParabolic(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(a=5, b=2, c=8)
         self.guess = lambda: dict(a=1, b=6, c=3)
-        self.model = specified_models.Parabolic
+        self.model = models.ParabolicModel
         super(TestParabolic, self).setUp()
 
 
@@ -226,7 +216,7 @@ class TestPolynomialOrder2(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(c2=5, c1=2, c0=8)
         self.guess = lambda: dict(c1=1, c2=6, c0=3)
-        self.model = specified_models.Polynomial
+        self.model = models.PolynomialModel
         self.args = (2,)
         super(TestPolynomialOrder2, self).setUp()
 
@@ -237,7 +227,7 @@ class TestPolynomialOrder3(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(c3=2, c2=5, c1=2, c0=8)
         self.guess = lambda: dict(c3=1, c1=1, c2=6, c0=3)
-        self.model = specified_models.Polynomial
+        self.model = models.PolynomialModel
         self.args = (3,)
         super(TestPolynomialOrder3, self).setUp()
 
@@ -247,16 +237,16 @@ class TestConstant(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(c=5)
         self.guess = lambda: dict(c=2)
-        self.model = specified_models.Constant
+        self.model = models.ConstantModel
         super(TestConstant, self).setUp()
 
 
 class TestPowerlaw(CommonTests, unittest.TestCase):
 
     def setUp(self):
-        self.true_values = lambda: dict(coefficient=5, exponent=3)
-        self.guess = lambda: dict(coefficient=2, exponent=8)
-        self.model = specified_models.PowerLaw
+        self.true_values = lambda: dict(amplitude=5, exponent=3)
+        self.guess = lambda: dict(amplitude=2, exponent=8)
+        self.model = models.PowerLawModel
         super(TestPowerlaw, self).setUp()
 
 
@@ -265,5 +255,5 @@ class TestExponential(CommonTests, unittest.TestCase):
     def setUp(self):
         self.true_values = lambda: dict(amplitude=5, decay=3)
         self.guess = lambda: dict(amplitude=2, decay=8)
-        self.model = specified_models.Exponential
+        self.model = models.ExponentialModel
         super(TestExponential, self).setUp()
diff --git a/tests/test_multidatasets.py b/tests/test_multidatasets.py
new file mode 100644
index 0000000..bc23abf
--- /dev/null
+++ b/tests/test_multidatasets.py
@@ -0,0 +1,76 @@
+#
+# example fitting to multiple (simulated) data sets
+#
+import numpy as np
+import matplotlib.pyplot as plt
+from lmfit import minimize, Parameters, fit_report
+from lmfit.lineshapes import gaussian
+
+def gauss_dataset(params, i, x):
+    """calc gaussian from params for data set i
+    using simple, hardwired naming convention"""
+    amp = params['amp_%i' % (i+1)].value
+    cen = params['cen_%i' % (i+1)].value
+    sig = params['sig_%i' % (i+1)].value
+    return gaussian(x, amp, cen, sig)
+
+def objective(params, x, data):
+    """ calculate total residual for fits to several data sets held
+    in a 2-D array, and modeled by Gaussian functions"""
+    ndata, nx = data.shape
+    resid = 0.0*data[:]
+    # make residual per data set
+    for i in range(ndata):
+        resid[i, :] = data[i, :] - gauss_dataset(params, i, x)
+    # now flatten this to a 1D array, as minimize() needs
+    return resid.flatten()
+
+def test_multidatasets():
+    # create 5 datasets
+    x  = np.linspace( -1, 2, 151)
+    data = []
+    for i in np.arange(5):
+        amp  =  2.60 + 1.50*np.random.rand()
+        cen  = -0.20 + 1.50*np.random.rand()
+        sig  =  0.25 + 0.03*np.random.rand()
+        dat  = gaussian(x, amp, cen, sig) + \
+               np.random.normal(size=len(x), scale=0.1)
+        data.append(dat)
+
+    # data has shape (5, 151)
+    data = np.array(data)
+    assert(data.shape) == (5, 151)
+
+    # create 5 sets of parameters, one per data set
+    pars = Parameters()
+    for iy, y in enumerate(data):
+        pars.add( 'amp_%i' % (iy+1), value=0.5, min=0.0,  max=200)
+        pars.add( 'cen_%i' % (iy+1), value=0.4, min=-2.0,  max=2.0)
+        pars.add( 'sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0)
+
+    # but now constrain all values of sigma to have the same value
+    # by assigning sig_2, sig_3, .. sig_5 to be equal to sig_1
+    for iy in (2, 3, 4, 5):
+        pars['sig_%i' % iy].expr='sig_1'
+
+    # run the global fit to all the data sets
+    out = minimize(objective, pars, args=(x, data))
+
+    assert(len(pars) == 15)
+    assert(out.nvarys == 11)
+    assert(out.nfev  > 15)
+    assert(out.chisqr > 1.0)
+    assert(pars['amp_1'].value > 0.1)
+    assert(pars['sig_1'].value > 0.1)
+    assert(pars['sig_2'].value == pars['sig_1'].value)
+
+    ## plot the data sets and fits
+    #  plt.figure()
+    #  for i in range(5):
+    #      y_fit = gauss_dataset(pars, i, x)
+    #      plt.plot(x, data[i, :], 'o', x, y_fit, '-')
+    #  plt.show()
+
+if __name__ == '__main__':
+    test_multidatasets()
+
diff --git a/tests/test_nose.py b/tests/test_nose.py
index 387f96f..9925d30 100644
--- a/tests/test_nose.py
+++ b/tests/test_nose.py
@@ -1,6 +1,7 @@
 # -*- coding: utf-8 -*-
 from __future__ import print_function
 from lmfit import minimize, Parameters, Parameter, report_fit, Minimizer
+from lmfit.lineshapes import gaussian
 import numpy as np
 pi = np.pi
 import unittest
@@ -173,12 +174,11 @@ def test_derive():
             params1['b'].value, params2['b'].value,
             params1['c'].value, params2['c'].value ))
 
-    check_wo_stderr(min1.params['a'], min2.params['a'].value, 0.000001)
-    check_wo_stderr(min1.params['b'], min2.params['b'].value, 0.000001)
-    check_wo_stderr(min1.params['c'], min2.params['c'].value, 0.000001)
+    check_wo_stderr(min1.params['a'], min2.params['a'].value, 0.00005)
+    check_wo_stderr(min1.params['b'], min2.params['b'].value, 0.00005)
+    check_wo_stderr(min1.params['c'], min2.params['c'].value, 0.00005)
 
 def test_peakfit():
-    from lmfit.utilfuncs import gaussian
     def residual(pars, x, data=None):
         g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
         g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
@@ -223,8 +223,8 @@ def test_peakfit():
 
     myfit.leastsq()
 
-    print(' N fev = ', myfit.nfev)
-    print(myfit.chisqr, myfit.redchi, myfit.nfree)
+    # print(' N fev = ', myfit.nfev)
+    # print(myfit.chisqr, myfit.redchi, myfit.nfree)
 
     report_fit(fit_params)
 
@@ -294,7 +294,7 @@ class CommonMinimizerTest(object):
 
         for para, true_para in zip(self.fit_params.values(),
                                    self.p_true.values()):
-            check_wo_stderr(para, true_para.value)
+            check_wo_stderr(para, true_para.value, sig=0.15)
 
 class TestNelder_Mead(CommonMinimizerTest, unittest.TestCase):
 
diff --git a/tests/test_stepmodel.py b/tests/test_stepmodel.py
new file mode 100644
index 0000000..9408c77
--- /dev/null
+++ b/tests/test_stepmodel.py
@@ -0,0 +1,59 @@
+import numpy as np
+from lmfit import fit_report
+from lmfit.models import StepModel, ConstantModel
+from lmfit_testutils import assert_paramval, assert_paramattr
+
+import matplotlib.pyplot as plt
+
+def get_data():
+    x  = np.linspace(0, 10, 201)
+    dat = np.ones_like(x)
+    dat[:48] = 0.0
+    dat[48:77] = np.arange(77-48)/(77.0-48)
+    dat = dat +  5e-2*np.random.randn(len(x))
+    dat = 110.2 * dat + 12.0
+    return x, dat
+
+def test_stepmodel_linear():
+    x, y = get_data()
+    stepmod = StepModel(form='linear')
+    stepmod.guess_starting_values(y, x)
+
+    mod = stepmod + ConstantModel()
+    mod.set_paramval('c', 3*y.min())
+    out = mod.fit(y, x=x)
+
+    assert(out.nfev > 5)
+    assert(out.nvarys == 4)
+    assert(out.chisqr > 1)
+    assert(mod.params['c'].value > 3)
+    assert(mod.params['center'].value > 1)
+    assert(mod.params['center'].value < 4)
+    assert(mod.params['sigma'].value > 0.5)
+    assert(mod.params['sigma'].value < 3.5)
+    assert(mod.params['amplitude'].value > 50)
+
+
+def test_stepmodel_erf():
+    x, y = get_data()
+    stepmod = StepModel(form='erf')
+    stepmod.guess_starting_values(y, x)
+
+    mod = stepmod + ConstantModel()
+    mod.set_paramval('c', 3) # *y.min())
+
+    out = mod.fit(y, x=x)
+    assert(out.nfev > 5)
+    assert(out.nvarys == 4)
+    assert(out.chisqr > 1)
+    assert(mod.params['c'].value > 3)
+    assert(mod.params['center'].value > 1)
+    assert(mod.params['center'].value < 4)
+    assert(mod.params['amplitude'].value > 50)
+    assert(mod.params['sigma'].value > 0.2)
+    assert(mod.params['sigma'].value < 1.5)
+
+if __name__ == '__main__':
+    # test_stepmodel_linear()
+    test_stepmodel_erf()
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/lmfit-py.git



More information about the debian-science-commits mailing list