[lmfit-py] 01/03: Imported Upstream version 0.9.3+dfsg

Frédéric-Emmanuel Picca picca at moszumanska.debian.org
Thu Mar 31 17:06:56 UTC 2016


This is an automated email from the git hooks/post-receive script.

picca pushed a commit to branch master
in repository lmfit-py.

commit 7be530f22592e2d2193230d863b85d2f5c91c116
Author: Picca Frédéric-Emmanuel <picca at debian.org>
Date:   Thu Mar 31 18:44:17 2016 +0200

    Imported Upstream version 0.9.3+dfsg
---
 INSTALL                                         |   24 +-
 LICENSE                                         |   54 +-
 MANIFEST.in                                     |   24 +-
 NIST_STRD/Bennett5.dat                          |  428 +--
 NIST_STRD/BoxBOD.dat                            |  132 +-
 NIST_STRD/Chwirut1.dat                          |  548 ++--
 NIST_STRD/Chwirut2.dat                          |  228 +-
 NIST_STRD/DanWood.dat                           |  132 +-
 NIST_STRD/ENSO.dat                              |  456 ++--
 NIST_STRD/Eckerle4.dat                          |  190 +-
 NIST_STRD/Gauss1.dat                            |  620 ++---
 NIST_STRD/Gauss2.dat                            |  620 ++---
 NIST_STRD/Gauss3.dat                            |  620 ++---
 NIST_STRD/Hahn1.dat                             |  592 ++--
 NIST_STRD/Kirby2.dat                            |  422 +--
 NIST_STRD/Lanczos1.dat                          |  168 +-
 NIST_STRD/Lanczos2.dat                          |  168 +-
 NIST_STRD/Lanczos3.dat                          |  168 +-
 NIST_STRD/MGH09.dat                             |  142 +-
 NIST_STRD/MGH10.dat                             |  152 +-
 NIST_STRD/MGH17.dat                             |  186 +-
 NIST_STRD/Misra1a.dat                           |  148 +-
 NIST_STRD/Misra1b.dat                           |  148 +-
 NIST_STRD/Misra1c.dat                           |  148 +-
 NIST_STRD/Misra1d.dat                           |  148 +-
 NIST_STRD/Nelson.dat                            |  376 +--
 NIST_STRD/Rat42.dat                             |  138 +-
 NIST_STRD/Rat43.dat                             |  150 +-
 NIST_STRD/Roszman1.dat                          |  170 +-
 NIST_STRD/Thurber.dat                           |  194 +-
 PKG-INFO                                        |    2 +-
 README                                          |   65 -
 THANKS.txt                                      |   48 +-
 doc/Makefile                                    |  224 +-
 doc/__pycache__/extensions.cpython-35.pyc       |  Bin 0 -> 358 bytes
 doc/_images/emcee_dbl_exp.png                   |  Bin 0 -> 19442 bytes
 doc/_images/emcee_dbl_exp2.png                  |  Bin 0 -> 22518 bytes
 doc/_images/emcee_triangle.png                  |  Bin 0 -> 195958 bytes
 doc/_templates/indexsidebar.html                |   48 +-
 doc/bounds.rst                                  |  157 +-
 doc/builtin_models.rst                          | 1923 ++++++-------
 doc/conf.py                                     |  363 +--
 doc/confidence.rst                              |  370 +--
 doc/constraints.rst                             |  332 +--
 doc/contents.rst                                |   36 +-
 doc/extensions.py                               |   20 +-
 doc/extensions.pyc                              |  Bin 406 -> 398 bytes
 doc/faq.rst                                     |  195 +-
 doc/fitting.rst                                 | 1519 ++++++-----
 doc/index.rst                                   |  136 +-
 doc/installation.rst                            |  164 +-
 doc/intro.rst                                   |  300 +--
 doc/model.rst                                   | 2290 ++++++++--------
 doc/parameters.rst                              |  477 ++--
 doc/sphinx/ext_mathjax.py                       |   20 +-
 doc/sphinx/ext_pngmath.py                       |   20 +-
 doc/sphinx/theme/lmfitdoc/layout.html           |  132 +-
 doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t |  696 ++---
 doc/sphinx/theme/lmfitdoc/theme.conf            |    8 +-
 doc/support.rst                                 |   60 +-
 doc/whatsnew.rst                                |  194 +-
 lmfit/__init__.py                               |  106 +-
 lmfit/_differentialevolution.py                 | 1500 +++++------
 lmfit/_version.py                               |    4 +-
 lmfit/asteval.py                                | 1606 +++++------
 lmfit/astutils.py                               |  516 ++--
 lmfit/confidence.py                             |  835 +++---
 lmfit/lineshapes.py                             |  572 ++--
 lmfit/minimizer.py                              | 2056 ++++++++------
 lmfit/model.py                                  | 2098 ++++++++-------
 lmfit/models.py                                 |  938 +++----
 lmfit/ordereddict.py                            |  256 +-
 lmfit/parameter.py                              | 1543 ++++++-----
 lmfit/printfuncs.py                             |  456 ++--
 lmfit/ui/__init__.py                            |   96 +-
 lmfit/ui/basefitter.py                          |  640 ++---
 lmfit/ui/ipy_fitter.py                          |  564 ++--
 lmfit/uncertainties/__init__.py                 | 3290 +++++++++++------------
 lmfit/uncertainties/umath.py                    |  700 ++---
 publish_docs.sh                                 |  118 +-
 requirements.txt                                |    4 +-
 setup.py                                        |  108 +-
 tests/NISTModels.py                             |  396 +--
 tests/_test_ci.py                               |  116 +-
 tests/_test_make_paras_and_func.py              |   62 +-
 tests/lmfit_testutils.py                        |   36 +-
 tests/test_1variable.py                         |  114 +-
 tests/test_NIST_Strd.py                         |  534 ++--
 tests/test_algebraic_constraint.py              |  294 +-
 tests/test_algebraic_constraint2.py             |  206 +-
 tests/test_basicfit.py                          |   94 +-
 tests/test_bounded_jacobian.py                  |   86 +-
 tests/test_bounds.py                            |  108 +-
 tests/test_confidence.py                        |  132 +-
 tests/test_copy_params.py                       |   72 +-
 tests/test_default_kws.py                       |   48 +-
 tests/test_itercb.py                            |   58 +-
 tests/test_manypeaks_speed.py                   |   74 +-
 tests/test_model.py                             | 1088 ++++----
 tests/test_multidatasets.py                     |  148 +-
 tests/test_nose.py                              | 1007 ++++---
 tests/test_parameters.py                        |  259 +-
 tests/test_params_set.py                        |   94 +-
 tests/test_stepmodel.py                         |  116 +-
 versioneer.py                                   | 1802 ++++++-------
 105 files changed, 21898 insertions(+), 20545 deletions(-)

diff --git a/INSTALL b/INSTALL
index 08c0ef0..712b012 100644
--- a/INSTALL
+++ b/INSTALL
@@ -1,12 +1,12 @@
-Installation instructions for LMFIT-py
-========================================
-
-To install the lmfit python module, use::
-
-   python setup.py build
-   python setup.py install
-
-Python 2.6 or higher is required, as are numpy and scipy.
-
-Matt Newville <newville at cars.uchicago.edu>
-Last Update:  2013-Dec-15
+Installation instructions for LMFIT-py
+========================================
+
+To install the lmfit python module, use::
+
+   python setup.py build
+   python setup.py install
+
+Python 2.6 or higher is required, as are numpy and scipy.
+
+Matt Newville <newville at cars.uchicago.edu>
+Last Update:  2013-Dec-15
diff --git a/LICENSE b/LICENSE
index 9b3aa46..174874e 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,27 +1,27 @@
-Copyright, Licensing, and Re-distribution
------------------------------------------
-
-The LMFIT-py code is distribution under the following license:
-
-  Copyright (c) 2014 Matthew Newville, The University of Chicago
-                     Till Stensitzki, Freie Universitat Berlin
-                     Daniel B. Allen, Johns Hopkins University
-                     Michal Rawlik, Eidgenossische Technische Hochschule, Zurich
-                     Antonino Ingargiola, University of California, Los Angeles
-                     A. R. J. Nelson, Australian Nuclear Science and Technology Organisation
-
-  Permission to use and redistribute the source code or binary forms of this
-  software and its documentation, with or without modification is hereby
-  granted provided that the above notice of copyright, these terms of use,
-  and the disclaimer of warranty below appear in the source code and
-  documentation, and that none of the names of above institutions or
-  authors appear in advertising or endorsement of works derived from this
-  software without specific prior written permission from all parties.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-  DEALINGS IN THIS SOFTWARE.
+Copyright, Licensing, and Re-distribution
+-----------------------------------------
+
+The LMFIT-py code is distribution under the following license:
+
+  Copyright (c) 2014 Matthew Newville, The University of Chicago
+                     Till Stensitzki, Freie Universitat Berlin
+                     Daniel B. Allen, Johns Hopkins University
+                     Michal Rawlik, Eidgenossische Technische Hochschule, Zurich
+                     Antonino Ingargiola, University of California, Los Angeles
+                     A. R. J. Nelson, Australian Nuclear Science and Technology Organisation
+
+  Permission to use and redistribute the source code or binary forms of this
+  software and its documentation, with or without modification is hereby
+  granted provided that the above notice of copyright, these terms of use,
+  and the disclaimer of warranty below appear in the source code and
+  documentation, and that none of the names of above institutions or
+  authors appear in advertising or endorsement of works derived from this
+  software without specific prior written permission from all parties.
+
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+  DEALINGS IN THIS SOFTWARE.
diff --git a/MANIFEST.in b/MANIFEST.in
index d83b8ba..1f48260 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,12 +1,12 @@
-include README.txt INSTALL LICENSE MANIFEST.in PKG-INFO THANKS.txt
-include setup.py publish_docs.sh
-include requirements.txt
-exclude *.pyc core.* *~ *.pdf
-recursive-include lmfit *.py
-recursive-include tests *.py *.dat
-recursive-include NIST_STRD *.dat
-recursive-include doc *
-recursive-exclude doc/_build *
-recursive-exclude doc *.pdf
-include versioneer.py
-include lmfit/_version.py
+include README.txt INSTALL LICENSE MANIFEST.in PKG-INFO THANKS.txt
+include setup.py publish_docs.sh
+include requirements.txt
+exclude *.pyc core.* *~ *.pdf
+recursive-include lmfit *.py
+recursive-include tests *.py *.dat
+recursive-include NIST_STRD *.dat
+recursive-include doc *
+recursive-exclude doc/_build *
+recursive-exclude doc *.pdf
+include versioneer.py
+include lmfit/_version.py
diff --git a/NIST_STRD/Bennett5.dat b/NIST_STRD/Bennett5.dat
index eba218a..51335f4 100644
--- a/NIST_STRD/Bennett5.dat
+++ b/NIST_STRD/Bennett5.dat
@@ -1,214 +1,214 @@
-NIST/ITL StRD
-Dataset Name:  Bennett5          (Bennett5.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  43)
-               Certified Values  (lines 41 to  48)
-               Data              (lines 61 to 214)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study involving
-               superconductivity magnetization modeling.  The
-               response variable is magnetism, and the predictor
-               variable is the log of time in minutes.
-
-Reference:     Bennett, L., L. Swartzendruber, and H. Brown, 
-               NIST (1994).  
-               Superconductivity Magnetization Modeling.
-
-
-
-
-
-
-Data:          1 Response Variable  (y = magnetism)
-               1 Predictor Variable (x = log[time])
-               154 Observations
-               Higher Level of Difficulty
-               Observed Data
-
-Model:         Miscellaneous Class
-               3 Parameters (b1 to b3)
-
-               y = b1 * (b2+x)**(-1/b3)  +  e
-
- 
- 
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   -2000       -1500        -2.5235058043E+03  2.9715175411E+02
-  b2 =      50          45         4.6736564644E+01  1.2448871856E+00
-  b3 =       0.8         0.85      9.3218483193E-01  2.0272299378E-02
-
-Residual Sum of Squares:                    5.2404744073E-04
-Residual Standard Deviation:                1.8629312528E-03
-Degrees of Freedom:                               151
-Number of Observations:                           154
-
-
-
-
-
-
-
-
-
-
-
-Data:   y               x
-     -34.834702E0      7.447168E0
-     -34.393200E0      8.102586E0
-     -34.152901E0      8.452547E0
-     -33.979099E0      8.711278E0
-     -33.845901E0      8.916774E0
-     -33.732899E0      9.087155E0
-     -33.640301E0      9.232590E0
-     -33.559200E0      9.359535E0
-     -33.486801E0      9.472166E0
-     -33.423100E0      9.573384E0
-     -33.365101E0      9.665293E0
-     -33.313000E0      9.749461E0
-     -33.260899E0      9.827092E0
-     -33.217400E0      9.899128E0
-     -33.176899E0      9.966321E0
-     -33.139198E0     10.029280E0
-     -33.101601E0     10.088510E0
-     -33.066799E0     10.144430E0
-     -33.035000E0     10.197380E0
-     -33.003101E0     10.247670E0
-     -32.971298E0     10.295560E0
-     -32.942299E0     10.341250E0
-     -32.916302E0     10.384950E0
-     -32.890202E0     10.426820E0
-     -32.864101E0     10.467000E0
-     -32.841000E0     10.505640E0
-     -32.817799E0     10.542830E0
-     -32.797501E0     10.578690E0
-     -32.774300E0     10.613310E0
-     -32.757000E0     10.646780E0
-     -32.733799E0     10.679150E0
-     -32.716400E0     10.710520E0
-     -32.699100E0     10.740920E0
-     -32.678799E0     10.770440E0
-     -32.661400E0     10.799100E0
-     -32.644001E0     10.826970E0
-     -32.626701E0     10.854080E0
-     -32.612202E0     10.880470E0
-     -32.597698E0     10.906190E0
-     -32.583199E0     10.931260E0
-     -32.568699E0     10.955720E0
-     -32.554298E0     10.979590E0
-     -32.539799E0     11.002910E0
-     -32.525299E0     11.025700E0
-     -32.510799E0     11.047980E0
-     -32.499199E0     11.069770E0
-     -32.487598E0     11.091100E0
-     -32.473202E0     11.111980E0
-     -32.461601E0     11.132440E0
-     -32.435501E0     11.152480E0
-     -32.435501E0     11.172130E0
-     -32.426800E0     11.191410E0
-     -32.412300E0     11.210310E0
-     -32.400799E0     11.228870E0
-     -32.392101E0     11.247090E0
-     -32.380501E0     11.264980E0
-     -32.366001E0     11.282560E0
-     -32.357300E0     11.299840E0
-     -32.348598E0     11.316820E0
-     -32.339901E0     11.333520E0
-     -32.328400E0     11.349940E0
-     -32.319698E0     11.366100E0
-     -32.311001E0     11.382000E0
-     -32.299400E0     11.397660E0
-     -32.290699E0     11.413070E0
-     -32.282001E0     11.428240E0
-     -32.273300E0     11.443200E0
-     -32.264599E0     11.457930E0
-     -32.256001E0     11.472440E0
-     -32.247299E0     11.486750E0
-     -32.238602E0     11.500860E0
-     -32.229900E0     11.514770E0
-     -32.224098E0     11.528490E0
-     -32.215401E0     11.542020E0
-     -32.203800E0     11.555380E0
-     -32.198002E0     11.568550E0
-     -32.189400E0     11.581560E0
-     -32.183601E0     11.594420E0
-     -32.174900E0     11.607121E0
-     -32.169102E0     11.619640E0
-     -32.163300E0     11.632000E0
-     -32.154598E0     11.644210E0
-     -32.145901E0     11.656280E0
-     -32.140099E0     11.668200E0
-     -32.131401E0     11.679980E0
-     -32.125599E0     11.691620E0
-     -32.119801E0     11.703130E0
-     -32.111198E0     11.714510E0
-     -32.105400E0     11.725760E0
-     -32.096699E0     11.736880E0
-     -32.090900E0     11.747890E0
-     -32.088001E0     11.758780E0
-     -32.079300E0     11.769550E0
-     -32.073502E0     11.780200E0
-     -32.067699E0     11.790730E0
-     -32.061901E0     11.801160E0
-     -32.056099E0     11.811480E0
-     -32.050301E0     11.821700E0
-     -32.044498E0     11.831810E0
-     -32.038799E0     11.841820E0
-     -32.033001E0     11.851730E0
-     -32.027199E0     11.861550E0
-     -32.024300E0     11.871270E0
-     -32.018501E0     11.880890E0
-     -32.012699E0     11.890420E0
-     -32.004002E0     11.899870E0
-     -32.001099E0     11.909220E0
-     -31.995300E0     11.918490E0
-     -31.989500E0     11.927680E0
-     -31.983700E0     11.936780E0
-     -31.977900E0     11.945790E0
-     -31.972099E0     11.954730E0
-     -31.969299E0     11.963590E0
-     -31.963501E0     11.972370E0
-     -31.957701E0     11.981070E0
-     -31.951900E0     11.989700E0
-     -31.946100E0     11.998260E0
-     -31.940300E0     12.006740E0
-     -31.937401E0     12.015150E0
-     -31.931601E0     12.023490E0
-     -31.925800E0     12.031760E0
-     -31.922899E0     12.039970E0
-     -31.917101E0     12.048100E0
-     -31.911301E0     12.056170E0
-     -31.908400E0     12.064180E0
-     -31.902599E0     12.072120E0
-     -31.896900E0     12.080010E0
-     -31.893999E0     12.087820E0
-     -31.888201E0     12.095580E0
-     -31.885300E0     12.103280E0
-     -31.882401E0     12.110920E0
-     -31.876600E0     12.118500E0
-     -31.873699E0     12.126030E0
-     -31.867901E0     12.133500E0
-     -31.862101E0     12.140910E0
-     -31.859200E0     12.148270E0
-     -31.856300E0     12.155570E0
-     -31.850500E0     12.162830E0
-     -31.844700E0     12.170030E0
-     -31.841801E0     12.177170E0
-     -31.838900E0     12.184270E0
-     -31.833099E0     12.191320E0
-     -31.830200E0     12.198320E0
-     -31.827299E0     12.205270E0
-     -31.821600E0     12.212170E0
-     -31.818701E0     12.219030E0
-     -31.812901E0     12.225840E0
-     -31.809999E0     12.232600E0
-     -31.807100E0     12.239320E0
-     -31.801300E0     12.245990E0
-     -31.798401E0     12.252620E0
-     -31.795500E0     12.259200E0
-     -31.789700E0     12.265750E0
-     -31.786800E0     12.272240E0
+NIST/ITL StRD
+Dataset Name:  Bennett5          (Bennett5.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to  43)
+               Certified Values  (lines 41 to  48)
+               Data              (lines 61 to 214)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study involving
+               superconductivity magnetization modeling.  The
+               response variable is magnetism, and the predictor
+               variable is the log of time in minutes.
+
+Reference:     Bennett, L., L. Swartzendruber, and H. Brown, 
+               NIST (1994).  
+               Superconductivity Magnetization Modeling.
+
+
+
+
+
+
+Data:          1 Response Variable  (y = magnetism)
+               1 Predictor Variable (x = log[time])
+               154 Observations
+               Higher Level of Difficulty
+               Observed Data
+
+Model:         Miscellaneous Class
+               3 Parameters (b1 to b3)
+
+               y = b1 * (b2+x)**(-1/b3)  +  e
+
+ 
+ 
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   -2000       -1500        -2.5235058043E+03  2.9715175411E+02
+  b2 =      50          45         4.6736564644E+01  1.2448871856E+00
+  b3 =       0.8         0.85      9.3218483193E-01  2.0272299378E-02
+
+Residual Sum of Squares:                    5.2404744073E-04
+Residual Standard Deviation:                1.8629312528E-03
+Degrees of Freedom:                               151
+Number of Observations:                           154
+
+
+
+
+
+
+
+
+
+
+
+Data:   y               x
+     -34.834702E0      7.447168E0
+     -34.393200E0      8.102586E0
+     -34.152901E0      8.452547E0
+     -33.979099E0      8.711278E0
+     -33.845901E0      8.916774E0
+     -33.732899E0      9.087155E0
+     -33.640301E0      9.232590E0
+     -33.559200E0      9.359535E0
+     -33.486801E0      9.472166E0
+     -33.423100E0      9.573384E0
+     -33.365101E0      9.665293E0
+     -33.313000E0      9.749461E0
+     -33.260899E0      9.827092E0
+     -33.217400E0      9.899128E0
+     -33.176899E0      9.966321E0
+     -33.139198E0     10.029280E0
+     -33.101601E0     10.088510E0
+     -33.066799E0     10.144430E0
+     -33.035000E0     10.197380E0
+     -33.003101E0     10.247670E0
+     -32.971298E0     10.295560E0
+     -32.942299E0     10.341250E0
+     -32.916302E0     10.384950E0
+     -32.890202E0     10.426820E0
+     -32.864101E0     10.467000E0
+     -32.841000E0     10.505640E0
+     -32.817799E0     10.542830E0
+     -32.797501E0     10.578690E0
+     -32.774300E0     10.613310E0
+     -32.757000E0     10.646780E0
+     -32.733799E0     10.679150E0
+     -32.716400E0     10.710520E0
+     -32.699100E0     10.740920E0
+     -32.678799E0     10.770440E0
+     -32.661400E0     10.799100E0
+     -32.644001E0     10.826970E0
+     -32.626701E0     10.854080E0
+     -32.612202E0     10.880470E0
+     -32.597698E0     10.906190E0
+     -32.583199E0     10.931260E0
+     -32.568699E0     10.955720E0
+     -32.554298E0     10.979590E0
+     -32.539799E0     11.002910E0
+     -32.525299E0     11.025700E0
+     -32.510799E0     11.047980E0
+     -32.499199E0     11.069770E0
+     -32.487598E0     11.091100E0
+     -32.473202E0     11.111980E0
+     -32.461601E0     11.132440E0
+     -32.435501E0     11.152480E0
+     -32.435501E0     11.172130E0
+     -32.426800E0     11.191410E0
+     -32.412300E0     11.210310E0
+     -32.400799E0     11.228870E0
+     -32.392101E0     11.247090E0
+     -32.380501E0     11.264980E0
+     -32.366001E0     11.282560E0
+     -32.357300E0     11.299840E0
+     -32.348598E0     11.316820E0
+     -32.339901E0     11.333520E0
+     -32.328400E0     11.349940E0
+     -32.319698E0     11.366100E0
+     -32.311001E0     11.382000E0
+     -32.299400E0     11.397660E0
+     -32.290699E0     11.413070E0
+     -32.282001E0     11.428240E0
+     -32.273300E0     11.443200E0
+     -32.264599E0     11.457930E0
+     -32.256001E0     11.472440E0
+     -32.247299E0     11.486750E0
+     -32.238602E0     11.500860E0
+     -32.229900E0     11.514770E0
+     -32.224098E0     11.528490E0
+     -32.215401E0     11.542020E0
+     -32.203800E0     11.555380E0
+     -32.198002E0     11.568550E0
+     -32.189400E0     11.581560E0
+     -32.183601E0     11.594420E0
+     -32.174900E0     11.607121E0
+     -32.169102E0     11.619640E0
+     -32.163300E0     11.632000E0
+     -32.154598E0     11.644210E0
+     -32.145901E0     11.656280E0
+     -32.140099E0     11.668200E0
+     -32.131401E0     11.679980E0
+     -32.125599E0     11.691620E0
+     -32.119801E0     11.703130E0
+     -32.111198E0     11.714510E0
+     -32.105400E0     11.725760E0
+     -32.096699E0     11.736880E0
+     -32.090900E0     11.747890E0
+     -32.088001E0     11.758780E0
+     -32.079300E0     11.769550E0
+     -32.073502E0     11.780200E0
+     -32.067699E0     11.790730E0
+     -32.061901E0     11.801160E0
+     -32.056099E0     11.811480E0
+     -32.050301E0     11.821700E0
+     -32.044498E0     11.831810E0
+     -32.038799E0     11.841820E0
+     -32.033001E0     11.851730E0
+     -32.027199E0     11.861550E0
+     -32.024300E0     11.871270E0
+     -32.018501E0     11.880890E0
+     -32.012699E0     11.890420E0
+     -32.004002E0     11.899870E0
+     -32.001099E0     11.909220E0
+     -31.995300E0     11.918490E0
+     -31.989500E0     11.927680E0
+     -31.983700E0     11.936780E0
+     -31.977900E0     11.945790E0
+     -31.972099E0     11.954730E0
+     -31.969299E0     11.963590E0
+     -31.963501E0     11.972370E0
+     -31.957701E0     11.981070E0
+     -31.951900E0     11.989700E0
+     -31.946100E0     11.998260E0
+     -31.940300E0     12.006740E0
+     -31.937401E0     12.015150E0
+     -31.931601E0     12.023490E0
+     -31.925800E0     12.031760E0
+     -31.922899E0     12.039970E0
+     -31.917101E0     12.048100E0
+     -31.911301E0     12.056170E0
+     -31.908400E0     12.064180E0
+     -31.902599E0     12.072120E0
+     -31.896900E0     12.080010E0
+     -31.893999E0     12.087820E0
+     -31.888201E0     12.095580E0
+     -31.885300E0     12.103280E0
+     -31.882401E0     12.110920E0
+     -31.876600E0     12.118500E0
+     -31.873699E0     12.126030E0
+     -31.867901E0     12.133500E0
+     -31.862101E0     12.140910E0
+     -31.859200E0     12.148270E0
+     -31.856300E0     12.155570E0
+     -31.850500E0     12.162830E0
+     -31.844700E0     12.170030E0
+     -31.841801E0     12.177170E0
+     -31.838900E0     12.184270E0
+     -31.833099E0     12.191320E0
+     -31.830200E0     12.198320E0
+     -31.827299E0     12.205270E0
+     -31.821600E0     12.212170E0
+     -31.818701E0     12.219030E0
+     -31.812901E0     12.225840E0
+     -31.809999E0     12.232600E0
+     -31.807100E0     12.239320E0
+     -31.801300E0     12.245990E0
+     -31.798401E0     12.252620E0
+     -31.795500E0     12.259200E0
+     -31.789700E0     12.265750E0
+     -31.786800E0     12.272240E0
diff --git a/NIST_STRD/BoxBOD.dat b/NIST_STRD/BoxBOD.dat
index 6a742fd..49163c7 100644
--- a/NIST_STRD/BoxBOD.dat
+++ b/NIST_STRD/BoxBOD.dat
@@ -1,66 +1,66 @@
-NIST/ITL StRD
-Dataset Name:  BoxBOD            (BoxBOD.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 42)
-               Certified Values  (lines 41 to 47)
-               Data              (lines 61 to 66)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are described in detail in Box, Hunter and
-               Hunter (1978).  The response variable is biochemical
-               oxygen demand (BOD) in mg/l, and the predictor
-               variable is incubation time in days.
-
-
-Reference:     Box, G. P., W. G. Hunter, and J. S. Hunter (1978).
-               Statistics for Experimenters.  
-               New York, NY: Wiley, pp. 483-487.
-
-
-
-
-
-Data:          1 Response  (y = biochemical oxygen demand)
-               1 Predictor (x = incubation time)
-               6 Observations
-               Higher Level of Difficulty
-               Observed Data
-
-Model:         Exponential Class
-               2 Parameters (b1 and b2)
-
-               y = b1*(1-exp[-b2*x])  +  e
-
-
- 
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   1           100           2.1380940889E+02  1.2354515176E+01
-  b2 =   1             0.75        5.4723748542E-01  1.0455993237E-01
-
-Residual Sum of Squares:                    1.1680088766E+03
-Residual Standard Deviation:                1.7088072423E+01
-Degrees of Freedom:                                4
-Number of Observations:                            6  
-
-
-
-
-
-
-
-
-
-
-
-
-Data:   y             x
-      109             1
-      149             2
-      149             3
-      191             5
-      213             7
-      224            10
+NIST/ITL StRD
+Dataset Name:  BoxBOD            (BoxBOD.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 42)
+               Certified Values  (lines 41 to 47)
+               Data              (lines 61 to 66)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are described in detail in Box, Hunter and
+               Hunter (1978).  The response variable is biochemical
+               oxygen demand (BOD) in mg/l, and the predictor
+               variable is incubation time in days.
+
+
+Reference:     Box, G. P., W. G. Hunter, and J. S. Hunter (1978).
+               Statistics for Experimenters.  
+               New York, NY: Wiley, pp. 483-487.
+
+
+
+
+
+Data:          1 Response  (y = biochemical oxygen demand)
+               1 Predictor (x = incubation time)
+               6 Observations
+               Higher Level of Difficulty
+               Observed Data
+
+Model:         Exponential Class
+               2 Parameters (b1 and b2)
+
+               y = b1*(1-exp[-b2*x])  +  e
+
+
+ 
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   1           100           2.1380940889E+02  1.2354515176E+01
+  b2 =   1             0.75        5.4723748542E-01  1.0455993237E-01
+
+Residual Sum of Squares:                    1.1680088766E+03
+Residual Standard Deviation:                1.7088072423E+01
+Degrees of Freedom:                                4
+Number of Observations:                            6  
+
+
+
+
+
+
+
+
+
+
+
+
+Data:   y             x
+      109             1
+      149             2
+      149             3
+      191             5
+      213             7
+      224            10
diff --git a/NIST_STRD/Chwirut1.dat b/NIST_STRD/Chwirut1.dat
index 4ad8aa5..5e72e4e 100644
--- a/NIST_STRD/Chwirut1.dat
+++ b/NIST_STRD/Chwirut1.dat
@@ -1,274 +1,274 @@
-NIST/ITL StRD
-Dataset Name:  Chwirut1          (Chwirut1.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  43)
-               Certified Values  (lines 41 to  48)
-               Data              (lines 61 to 274)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study involving
-               ultrasonic calibration.  The response variable is
-               ultrasonic response, and the predictor variable is
-               metal distance.
-
-Reference:     Chwirut, D., NIST (197?).  
-               Ultrasonic Reference Block Study. 
-
-
-
-
-
-
-
-Data:          1 Response Variable  (y = ultrasonic response)
-               1 Predictor Variable (x = metal distance)
-               214 Observations
-               Lower Level of Difficulty
-               Observed Data
-
-Model:         Exponential Class
-               3 Parameters (b1 to b3)
-
-               y = exp[-b1*x]/(b2+b3*x)  +  e
-
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   0.1         0.15          1.9027818370E-01  2.1938557035E-02
-  b2 =   0.01        0.008         6.1314004477E-03  3.4500025051E-04
-  b3 =   0.02        0.010         1.0530908399E-02  7.9281847748E-04
-
-Residual Sum of Squares:                    2.3844771393E+03
-Residual Standard Deviation:                3.3616721320E+00
-Degrees of Freedom:                               211
-Number of Observations:                           214
-
-
-
-
-
-
-
-
-
-
-
-Data:  y            x
-     92.9000E0     0.5000E0
-     78.7000E0     0.6250E0
-     64.2000E0     0.7500E0
-     64.9000E0     0.8750E0
-     57.1000E0     1.0000E0
-     43.3000E0     1.2500E0
-     31.1000E0     1.7500E0
-     23.6000E0     2.2500E0
-     31.0500E0     1.7500E0
-     23.7750E0     2.2500E0
-     17.7375E0     2.7500E0
-     13.8000E0     3.2500E0
-     11.5875E0     3.7500E0
-      9.4125E0     4.2500E0
-      7.7250E0     4.7500E0
-      7.3500E0     5.2500E0
-      8.0250E0     5.7500E0
-     90.6000E0     0.5000E0
-     76.9000E0     0.6250E0
-     71.6000E0     0.7500E0
-     63.6000E0     0.8750E0
-     54.0000E0     1.0000E0
-     39.2000E0     1.2500E0
-     29.3000E0     1.7500E0
-     21.4000E0     2.2500E0
-     29.1750E0     1.7500E0
-     22.1250E0     2.2500E0
-     17.5125E0     2.7500E0
-     14.2500E0     3.2500E0
-      9.4500E0     3.7500E0
-      9.1500E0     4.2500E0
-      7.9125E0     4.7500E0
-      8.4750E0     5.2500E0
-      6.1125E0     5.7500E0
-     80.0000E0     0.5000E0
-     79.0000E0     0.6250E0
-     63.8000E0     0.7500E0
-     57.2000E0     0.8750E0
-     53.2000E0     1.0000E0
-     42.5000E0     1.2500E0
-     26.8000E0     1.7500E0
-     20.4000E0     2.2500E0
-     26.8500E0     1.7500E0
-     21.0000E0     2.2500E0
-     16.4625E0     2.7500E0
-     12.5250E0     3.2500E0
-     10.5375E0     3.7500E0
-      8.5875E0     4.2500E0
-      7.1250E0     4.7500E0
-      6.1125E0     5.2500E0
-      5.9625E0     5.7500E0
-     74.1000E0     0.5000E0
-     67.3000E0     0.6250E0
-     60.8000E0     0.7500E0
-     55.5000E0     0.8750E0
-     50.3000E0     1.0000E0
-     41.0000E0     1.2500E0
-     29.4000E0     1.7500E0
-     20.4000E0     2.2500E0
-     29.3625E0     1.7500E0
-     21.1500E0     2.2500E0
-     16.7625E0     2.7500E0
-     13.2000E0     3.2500E0
-     10.8750E0     3.7500E0
-      8.1750E0     4.2500E0
-      7.3500E0     4.7500E0
-      5.9625E0     5.2500E0
-      5.6250E0     5.7500E0
-     81.5000E0      .5000E0
-     62.4000E0      .7500E0
-     32.5000E0     1.5000E0
-     12.4100E0     3.0000E0
-     13.1200E0     3.0000E0
-     15.5600E0     3.0000E0
-      5.6300E0     6.0000E0
-     78.0000E0      .5000E0
-     59.9000E0      .7500E0
-     33.2000E0     1.5000E0
-     13.8400E0     3.0000E0
-     12.7500E0     3.0000E0
-     14.6200E0     3.0000E0
-      3.9400E0     6.0000E0
-     76.8000E0      .5000E0
-     61.0000E0      .7500E0
-     32.9000E0     1.5000E0
-     13.8700E0     3.0000E0
-     11.8100E0     3.0000E0
-     13.3100E0     3.0000E0
-      5.4400E0     6.0000E0
-     78.0000E0      .5000E0
-     63.5000E0      .7500E0
-     33.8000E0     1.5000E0
-     12.5600E0     3.0000E0
-      5.6300E0     6.0000E0
-     12.7500E0     3.0000E0
-     13.1200E0     3.0000E0
-      5.4400E0     6.0000E0
-     76.8000E0      .5000E0
-     60.0000E0      .7500E0
-     47.8000E0     1.0000E0
-     32.0000E0     1.5000E0
-     22.2000E0     2.0000E0
-     22.5700E0     2.0000E0
-     18.8200E0     2.5000E0
-     13.9500E0     3.0000E0
-     11.2500E0     4.0000E0
-      9.0000E0     5.0000E0
-      6.6700E0     6.0000E0
-     75.8000E0      .5000E0
-     62.0000E0      .7500E0
-     48.8000E0     1.0000E0
-     35.2000E0     1.5000E0
-     20.0000E0     2.0000E0
-     20.3200E0     2.0000E0
-     19.3100E0     2.5000E0
-     12.7500E0     3.0000E0
-     10.4200E0     4.0000E0
-      7.3100E0     5.0000E0
-      7.4200E0     6.0000E0
-     70.5000E0      .5000E0
-     59.5000E0      .7500E0
-     48.5000E0     1.0000E0
-     35.8000E0     1.5000E0
-     21.0000E0     2.0000E0
-     21.6700E0     2.0000E0
-     21.0000E0     2.5000E0
-     15.6400E0     3.0000E0
-      8.1700E0     4.0000E0
-      8.5500E0     5.0000E0
-     10.1200E0     6.0000E0
-     78.0000E0      .5000E0
-     66.0000E0      .6250E0
-     62.0000E0      .7500E0
-     58.0000E0      .8750E0
-     47.7000E0     1.0000E0
-     37.8000E0     1.2500E0
-     20.2000E0     2.2500E0
-     21.0700E0     2.2500E0
-     13.8700E0     2.7500E0
-      9.6700E0     3.2500E0
-      7.7600E0     3.7500E0
-      5.4400E0     4.2500E0
-      4.8700E0     4.7500E0
-      4.0100E0     5.2500E0
-      3.7500E0     5.7500E0
-     24.1900E0     3.0000E0
-     25.7600E0     3.0000E0
-     18.0700E0     3.0000E0
-     11.8100E0     3.0000E0
-     12.0700E0     3.0000E0
-     16.1200E0     3.0000E0
-     70.8000E0      .5000E0
-     54.7000E0      .7500E0
-     48.0000E0     1.0000E0
-     39.8000E0     1.5000E0
-     29.8000E0     2.0000E0
-     23.7000E0     2.5000E0
-     29.6200E0     2.0000E0
-     23.8100E0     2.5000E0
-     17.7000E0     3.0000E0
-     11.5500E0     4.0000E0
-     12.0700E0     5.0000E0
-      8.7400E0     6.0000E0
-     80.7000E0      .5000E0
-     61.3000E0      .7500E0
-     47.5000E0     1.0000E0
-     29.0000E0     1.5000E0
-     24.0000E0     2.0000E0
-     17.7000E0     2.5000E0
-     24.5600E0     2.0000E0
-     18.6700E0     2.5000E0
-     16.2400E0     3.0000E0
-      8.7400E0     4.0000E0
-      7.8700E0     5.0000E0
-      8.5100E0     6.0000E0
-     66.7000E0      .5000E0
-     59.2000E0      .7500E0
-     40.8000E0     1.0000E0
-     30.7000E0     1.5000E0
-     25.7000E0     2.0000E0
-     16.3000E0     2.5000E0
-     25.9900E0     2.0000E0
-     16.9500E0     2.5000E0
-     13.3500E0     3.0000E0
-      8.6200E0     4.0000E0
-      7.2000E0     5.0000E0
-      6.6400E0     6.0000E0
-     13.6900E0     3.0000E0
-     81.0000E0      .5000E0
-     64.5000E0      .7500E0
-     35.5000E0     1.5000E0
-     13.3100E0     3.0000E0
-      4.8700E0     6.0000E0
-     12.9400E0     3.0000E0
-      5.0600E0     6.0000E0
-     15.1900E0     3.0000E0
-     14.6200E0     3.0000E0
-     15.6400E0     3.0000E0
-     25.5000E0     1.7500E0
-     25.9500E0     1.7500E0
-     81.7000E0      .5000E0
-     61.6000E0      .7500E0
-     29.8000E0     1.7500E0
-     29.8100E0     1.7500E0
-     17.1700E0     2.7500E0
-     10.3900E0     3.7500E0
-     28.4000E0     1.7500E0
-     28.6900E0     1.7500E0
-     81.3000E0      .5000E0
-     60.9000E0      .7500E0
-     16.6500E0     2.7500E0
-     10.0500E0     3.7500E0
-     28.9000E0     1.7500E0
-     28.9500E0     1.7500E0
+NIST/ITL StRD
+Dataset Name:  Chwirut1          (Chwirut1.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to  43)
+               Certified Values  (lines 41 to  48)
+               Data              (lines 61 to 274)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study involving
+               ultrasonic calibration.  The response variable is
+               ultrasonic response, and the predictor variable is
+               metal distance.
+
+Reference:     Chwirut, D., NIST (197?).  
+               Ultrasonic Reference Block Study. 
+
+
+
+
+
+
+
+Data:          1 Response Variable  (y = ultrasonic response)
+               1 Predictor Variable (x = metal distance)
+               214 Observations
+               Lower Level of Difficulty
+               Observed Data
+
+Model:         Exponential Class
+               3 Parameters (b1 to b3)
+
+               y = exp[-b1*x]/(b2+b3*x)  +  e
+
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   0.1         0.15          1.9027818370E-01  2.1938557035E-02
+  b2 =   0.01        0.008         6.1314004477E-03  3.4500025051E-04
+  b3 =   0.02        0.010         1.0530908399E-02  7.9281847748E-04
+
+Residual Sum of Squares:                    2.3844771393E+03
+Residual Standard Deviation:                3.3616721320E+00
+Degrees of Freedom:                               211
+Number of Observations:                           214
+
+
+
+
+
+
+
+
+
+
+
+Data:  y            x
+     92.9000E0     0.5000E0
+     78.7000E0     0.6250E0
+     64.2000E0     0.7500E0
+     64.9000E0     0.8750E0
+     57.1000E0     1.0000E0
+     43.3000E0     1.2500E0
+     31.1000E0     1.7500E0
+     23.6000E0     2.2500E0
+     31.0500E0     1.7500E0
+     23.7750E0     2.2500E0
+     17.7375E0     2.7500E0
+     13.8000E0     3.2500E0
+     11.5875E0     3.7500E0
+      9.4125E0     4.2500E0
+      7.7250E0     4.7500E0
+      7.3500E0     5.2500E0
+      8.0250E0     5.7500E0
+     90.6000E0     0.5000E0
+     76.9000E0     0.6250E0
+     71.6000E0     0.7500E0
+     63.6000E0     0.8750E0
+     54.0000E0     1.0000E0
+     39.2000E0     1.2500E0
+     29.3000E0     1.7500E0
+     21.4000E0     2.2500E0
+     29.1750E0     1.7500E0
+     22.1250E0     2.2500E0
+     17.5125E0     2.7500E0
+     14.2500E0     3.2500E0
+      9.4500E0     3.7500E0
+      9.1500E0     4.2500E0
+      7.9125E0     4.7500E0
+      8.4750E0     5.2500E0
+      6.1125E0     5.7500E0
+     80.0000E0     0.5000E0
+     79.0000E0     0.6250E0
+     63.8000E0     0.7500E0
+     57.2000E0     0.8750E0
+     53.2000E0     1.0000E0
+     42.5000E0     1.2500E0
+     26.8000E0     1.7500E0
+     20.4000E0     2.2500E0
+     26.8500E0     1.7500E0
+     21.0000E0     2.2500E0
+     16.4625E0     2.7500E0
+     12.5250E0     3.2500E0
+     10.5375E0     3.7500E0
+      8.5875E0     4.2500E0
+      7.1250E0     4.7500E0
+      6.1125E0     5.2500E0
+      5.9625E0     5.7500E0
+     74.1000E0     0.5000E0
+     67.3000E0     0.6250E0
+     60.8000E0     0.7500E0
+     55.5000E0     0.8750E0
+     50.3000E0     1.0000E0
+     41.0000E0     1.2500E0
+     29.4000E0     1.7500E0
+     20.4000E0     2.2500E0
+     29.3625E0     1.7500E0
+     21.1500E0     2.2500E0
+     16.7625E0     2.7500E0
+     13.2000E0     3.2500E0
+     10.8750E0     3.7500E0
+      8.1750E0     4.2500E0
+      7.3500E0     4.7500E0
+      5.9625E0     5.2500E0
+      5.6250E0     5.7500E0
+     81.5000E0      .5000E0
+     62.4000E0      .7500E0
+     32.5000E0     1.5000E0
+     12.4100E0     3.0000E0
+     13.1200E0     3.0000E0
+     15.5600E0     3.0000E0
+      5.6300E0     6.0000E0
+     78.0000E0      .5000E0
+     59.9000E0      .7500E0
+     33.2000E0     1.5000E0
+     13.8400E0     3.0000E0
+     12.7500E0     3.0000E0
+     14.6200E0     3.0000E0
+      3.9400E0     6.0000E0
+     76.8000E0      .5000E0
+     61.0000E0      .7500E0
+     32.9000E0     1.5000E0
+     13.8700E0     3.0000E0
+     11.8100E0     3.0000E0
+     13.3100E0     3.0000E0
+      5.4400E0     6.0000E0
+     78.0000E0      .5000E0
+     63.5000E0      .7500E0
+     33.8000E0     1.5000E0
+     12.5600E0     3.0000E0
+      5.6300E0     6.0000E0
+     12.7500E0     3.0000E0
+     13.1200E0     3.0000E0
+      5.4400E0     6.0000E0
+     76.8000E0      .5000E0
+     60.0000E0      .7500E0
+     47.8000E0     1.0000E0
+     32.0000E0     1.5000E0
+     22.2000E0     2.0000E0
+     22.5700E0     2.0000E0
+     18.8200E0     2.5000E0
+     13.9500E0     3.0000E0
+     11.2500E0     4.0000E0
+      9.0000E0     5.0000E0
+      6.6700E0     6.0000E0
+     75.8000E0      .5000E0
+     62.0000E0      .7500E0
+     48.8000E0     1.0000E0
+     35.2000E0     1.5000E0
+     20.0000E0     2.0000E0
+     20.3200E0     2.0000E0
+     19.3100E0     2.5000E0
+     12.7500E0     3.0000E0
+     10.4200E0     4.0000E0
+      7.3100E0     5.0000E0
+      7.4200E0     6.0000E0
+     70.5000E0      .5000E0
+     59.5000E0      .7500E0
+     48.5000E0     1.0000E0
+     35.8000E0     1.5000E0
+     21.0000E0     2.0000E0
+     21.6700E0     2.0000E0
+     21.0000E0     2.5000E0
+     15.6400E0     3.0000E0
+      8.1700E0     4.0000E0
+      8.5500E0     5.0000E0
+     10.1200E0     6.0000E0
+     78.0000E0      .5000E0
+     66.0000E0      .6250E0
+     62.0000E0      .7500E0
+     58.0000E0      .8750E0
+     47.7000E0     1.0000E0
+     37.8000E0     1.2500E0
+     20.2000E0     2.2500E0
+     21.0700E0     2.2500E0
+     13.8700E0     2.7500E0
+      9.6700E0     3.2500E0
+      7.7600E0     3.7500E0
+      5.4400E0     4.2500E0
+      4.8700E0     4.7500E0
+      4.0100E0     5.2500E0
+      3.7500E0     5.7500E0
+     24.1900E0     3.0000E0
+     25.7600E0     3.0000E0
+     18.0700E0     3.0000E0
+     11.8100E0     3.0000E0
+     12.0700E0     3.0000E0
+     16.1200E0     3.0000E0
+     70.8000E0      .5000E0
+     54.7000E0      .7500E0
+     48.0000E0     1.0000E0
+     39.8000E0     1.5000E0
+     29.8000E0     2.0000E0
+     23.7000E0     2.5000E0
+     29.6200E0     2.0000E0
+     23.8100E0     2.5000E0
+     17.7000E0     3.0000E0
+     11.5500E0     4.0000E0
+     12.0700E0     5.0000E0
+      8.7400E0     6.0000E0
+     80.7000E0      .5000E0
+     61.3000E0      .7500E0
+     47.5000E0     1.0000E0
+     29.0000E0     1.5000E0
+     24.0000E0     2.0000E0
+     17.7000E0     2.5000E0
+     24.5600E0     2.0000E0
+     18.6700E0     2.5000E0
+     16.2400E0     3.0000E0
+      8.7400E0     4.0000E0
+      7.8700E0     5.0000E0
+      8.5100E0     6.0000E0
+     66.7000E0      .5000E0
+     59.2000E0      .7500E0
+     40.8000E0     1.0000E0
+     30.7000E0     1.5000E0
+     25.7000E0     2.0000E0
+     16.3000E0     2.5000E0
+     25.9900E0     2.0000E0
+     16.9500E0     2.5000E0
+     13.3500E0     3.0000E0
+      8.6200E0     4.0000E0
+      7.2000E0     5.0000E0
+      6.6400E0     6.0000E0
+     13.6900E0     3.0000E0
+     81.0000E0      .5000E0
+     64.5000E0      .7500E0
+     35.5000E0     1.5000E0
+     13.3100E0     3.0000E0
+      4.8700E0     6.0000E0
+     12.9400E0     3.0000E0
+      5.0600E0     6.0000E0
+     15.1900E0     3.0000E0
+     14.6200E0     3.0000E0
+     15.6400E0     3.0000E0
+     25.5000E0     1.7500E0
+     25.9500E0     1.7500E0
+     81.7000E0      .5000E0
+     61.6000E0      .7500E0
+     29.8000E0     1.7500E0
+     29.8100E0     1.7500E0
+     17.1700E0     2.7500E0
+     10.3900E0     3.7500E0
+     28.4000E0     1.7500E0
+     28.6900E0     1.7500E0
+     81.3000E0      .5000E0
+     60.9000E0      .7500E0
+     16.6500E0     2.7500E0
+     10.0500E0     3.7500E0
+     28.9000E0     1.7500E0
+     28.9500E0     1.7500E0
diff --git a/NIST_STRD/Chwirut2.dat b/NIST_STRD/Chwirut2.dat
index 03703de..0651faa 100644
--- a/NIST_STRD/Chwirut2.dat
+++ b/NIST_STRD/Chwirut2.dat
@@ -1,114 +1,114 @@
-NIST/ITL StRD
-Dataset Name:  Chwirut2          (Chwirut2.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  43)
-               Certified Values  (lines 41 to  48)
-               Data              (lines 61 to 114)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study involving
-               ultrasonic calibration.  The response variable is
-               ultrasonic response, and the predictor variable is
-               metal distance.
-
-
-
-Reference:     Chwirut, D., NIST (197?).  
-               Ultrasonic Reference Block Study. 
-
-
-
-
-
-Data:          1 Response  (y = ultrasonic response)
-               1 Predictor (x = metal distance)
-               54 Observations
-               Lower Level of Difficulty
-               Observed Data
-
-Model:         Exponential Class
-               3 Parameters (b1 to b3)
-
-               y = exp(-b1*x)/(b2+b3*x)  +  e
-
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   0.1         0.15          1.6657666537E-01  3.8303286810E-02
-  b2 =   0.01        0.008         5.1653291286E-03  6.6621605126E-04
-  b3 =   0.02        0.010         1.2150007096E-02  1.5304234767E-03
-
-Residual Sum of Squares:                    5.1304802941E+02
-Residual Standard Deviation:                3.1717133040E+00
-Degrees of Freedom:                                51
-Number of Observations:                            54
-
-
-
-
-
-
-
-
-
- 
-
-Data:  y             x
-      92.9000E0     0.500E0
-      57.1000E0     1.000E0
-      31.0500E0     1.750E0
-      11.5875E0     3.750E0
-       8.0250E0     5.750E0
-      63.6000E0     0.875E0
-      21.4000E0     2.250E0
-      14.2500E0     3.250E0
-       8.4750E0     5.250E0
-      63.8000E0     0.750E0
-      26.8000E0     1.750E0
-      16.4625E0     2.750E0
-       7.1250E0     4.750E0
-      67.3000E0     0.625E0
-      41.0000E0     1.250E0
-      21.1500E0     2.250E0
-       8.1750E0     4.250E0
-      81.5000E0      .500E0
-      13.1200E0     3.000E0
-      59.9000E0      .750E0
-      14.6200E0     3.000E0
-      32.9000E0     1.500E0
-       5.4400E0     6.000E0
-      12.5600E0     3.000E0
-       5.4400E0     6.000E0
-      32.0000E0     1.500E0
-      13.9500E0     3.000E0
-      75.8000E0      .500E0
-      20.0000E0     2.000E0
-      10.4200E0     4.000E0
-      59.5000E0      .750E0
-      21.6700E0     2.000E0
-       8.5500E0     5.000E0
-      62.0000E0      .750E0
-      20.2000E0     2.250E0
-       7.7600E0     3.750E0
-       3.7500E0     5.750E0
-      11.8100E0     3.000E0
-      54.7000E0      .750E0
-      23.7000E0     2.500E0
-      11.5500E0     4.000E0
-      61.3000E0      .750E0
-      17.7000E0     2.500E0
-       8.7400E0     4.000E0
-      59.2000E0      .750E0
-      16.3000E0     2.500E0
-       8.6200E0     4.000E0
-      81.0000E0      .500E0
-       4.8700E0     6.000E0
-      14.6200E0     3.000E0
-      81.7000E0      .500E0
-      17.1700E0     2.750E0
-      81.3000E0      .500E0
-      28.9000E0     1.750E0
+NIST/ITL StRD
+Dataset Name:  Chwirut2          (Chwirut2.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to  43)
+               Certified Values  (lines 41 to  48)
+               Data              (lines 61 to 114)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study involving
+               ultrasonic calibration.  The response variable is
+               ultrasonic response, and the predictor variable is
+               metal distance.
+
+
+
+Reference:     Chwirut, D., NIST (197?).  
+               Ultrasonic Reference Block Study. 
+
+
+
+
+
+Data:          1 Response  (y = ultrasonic response)
+               1 Predictor (x = metal distance)
+               54 Observations
+               Lower Level of Difficulty
+               Observed Data
+
+Model:         Exponential Class
+               3 Parameters (b1 to b3)
+
+               y = exp(-b1*x)/(b2+b3*x)  +  e
+
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   0.1         0.15          1.6657666537E-01  3.8303286810E-02
+  b2 =   0.01        0.008         5.1653291286E-03  6.6621605126E-04
+  b3 =   0.02        0.010         1.2150007096E-02  1.5304234767E-03
+
+Residual Sum of Squares:                    5.1304802941E+02
+Residual Standard Deviation:                3.1717133040E+00
+Degrees of Freedom:                                51
+Number of Observations:                            54
+
+
+
+
+
+
+
+
+
+ 
+
+Data:  y             x
+      92.9000E0     0.500E0
+      57.1000E0     1.000E0
+      31.0500E0     1.750E0
+      11.5875E0     3.750E0
+       8.0250E0     5.750E0
+      63.6000E0     0.875E0
+      21.4000E0     2.250E0
+      14.2500E0     3.250E0
+       8.4750E0     5.250E0
+      63.8000E0     0.750E0
+      26.8000E0     1.750E0
+      16.4625E0     2.750E0
+       7.1250E0     4.750E0
+      67.3000E0     0.625E0
+      41.0000E0     1.250E0
+      21.1500E0     2.250E0
+       8.1750E0     4.250E0
+      81.5000E0      .500E0
+      13.1200E0     3.000E0
+      59.9000E0      .750E0
+      14.6200E0     3.000E0
+      32.9000E0     1.500E0
+       5.4400E0     6.000E0
+      12.5600E0     3.000E0
+       5.4400E0     6.000E0
+      32.0000E0     1.500E0
+      13.9500E0     3.000E0
+      75.8000E0      .500E0
+      20.0000E0     2.000E0
+      10.4200E0     4.000E0
+      59.5000E0      .750E0
+      21.6700E0     2.000E0
+       8.5500E0     5.000E0
+      62.0000E0      .750E0
+      20.2000E0     2.250E0
+       7.7600E0     3.750E0
+       3.7500E0     5.750E0
+      11.8100E0     3.000E0
+      54.7000E0      .750E0
+      23.7000E0     2.500E0
+      11.5500E0     4.000E0
+      61.3000E0      .750E0
+      17.7000E0     2.500E0
+       8.7400E0     4.000E0
+      59.2000E0      .750E0
+      16.3000E0     2.500E0
+       8.6200E0     4.000E0
+      81.0000E0      .500E0
+       4.8700E0     6.000E0
+      14.6200E0     3.000E0
+      81.7000E0      .500E0
+      17.1700E0     2.750E0
+      81.3000E0      .500E0
+      28.9000E0     1.750E0
diff --git a/NIST_STRD/DanWood.dat b/NIST_STRD/DanWood.dat
index 479a9bd..317f6a7 100644
--- a/NIST_STRD/DanWood.dat
+++ b/NIST_STRD/DanWood.dat
@@ -1,66 +1,66 @@
-NIST/ITL StRD
-Dataset Name:  DanWood           (DanWood.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 42)
-               Certified Values  (lines 41 to 47)
-               Data              (lines 61 to 66)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data and model are described in Daniel and Wood
-               (1980), and originally published in E.S.Keeping, 
-               "Introduction to Statistical Inference," Van Nostrand
-               Company, Princeton, NJ, 1962, p. 354.  The response
-               variable is energy radieted from a carbon filament
-               lamp per cm**2 per second, and the predictor variable
-               is the absolute temperature of the filament in 1000
-               degrees Kelvin.
-
-Reference:     Daniel, C. and F. S. Wood (1980).
-               Fitting Equations to Data, Second Edition. 
-               New York, NY:  John Wiley and Sons, pp. 428-431.
-
-
-Data:          1 Response Variable  (y = energy)
-               1 Predictor Variable (x = temperature)
-               6 Observations
-               Lower Level of Difficulty
-               Observed Data
-
-Model:         Miscellaneous Class
-               2 Parameters (b1 and b2)
-
-               y  = b1*x**b2  +  e
-
-
- 
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   1           0.7           7.6886226176E-01  1.8281973860E-02
-  b2 =   5           4             3.8604055871E+00  5.1726610913E-02
- 
-Residual Sum of Squares:                    4.3173084083E-03
-Residual Standard Deviation:                3.2853114039E-02
-Degrees of Freedom:                                4
-Number of Observations:                            6 
- 
- 
- 
- 
- 
- 
- 
- 
- 
- 
- 
- 
-Data:  y              x
-      2.138E0        1.309E0
-      3.421E0        1.471E0
-      3.597E0        1.490E0
-      4.340E0        1.565E0
-      4.882E0        1.611E0
-      5.660E0        1.680E0
+NIST/ITL StRD
+Dataset Name:  DanWood           (DanWood.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 42)
+               Certified Values  (lines 41 to 47)
+               Data              (lines 61 to 66)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data and model are described in Daniel and Wood
+               (1980), and originally published in E.S.Keeping, 
+               "Introduction to Statistical Inference," Van Nostrand
+               Company, Princeton, NJ, 1962, p. 354.  The response
+               variable is energy radieted from a carbon filament
+               lamp per cm**2 per second, and the predictor variable
+               is the absolute temperature of the filament in 1000
+               degrees Kelvin.
+
+Reference:     Daniel, C. and F. S. Wood (1980).
+               Fitting Equations to Data, Second Edition. 
+               New York, NY:  John Wiley and Sons, pp. 428-431.
+
+
+Data:          1 Response Variable  (y = energy)
+               1 Predictor Variable (x = temperature)
+               6 Observations
+               Lower Level of Difficulty
+               Observed Data
+
+Model:         Miscellaneous Class
+               2 Parameters (b1 and b2)
+
+               y  = b1*x**b2  +  e
+
+
+ 
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   1           0.7           7.6886226176E-01  1.8281973860E-02
+  b2 =   5           4             3.8604055871E+00  5.1726610913E-02
+ 
+Residual Sum of Squares:                    4.3173084083E-03
+Residual Standard Deviation:                3.2853114039E-02
+Degrees of Freedom:                                4
+Number of Observations:                            6 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+Data:  y              x
+      2.138E0        1.309E0
+      3.421E0        1.471E0
+      3.597E0        1.490E0
+      4.340E0        1.565E0
+      4.882E0        1.611E0
+      5.660E0        1.680E0
diff --git a/NIST_STRD/ENSO.dat b/NIST_STRD/ENSO.dat
index f374db2..efe5cd8 100644
--- a/NIST_STRD/ENSO.dat
+++ b/NIST_STRD/ENSO.dat
@@ -1,228 +1,228 @@
-NIST/ITL StRD
-Dataset Name:  ENSO              (ENSO.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  49)
-               Certified Values  (lines 41 to  54)
-               Data              (lines 61 to 228)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   The data are monthly averaged atmospheric pressure 
-               differences between Easter Island and Darwin, 
-               Australia.  This difference drives the trade winds in 
-               the southern hemisphere.  Fourier analysis of the data
-               reveals 3 significant cycles.  The annual cycle is the
-               strongest, but cycles with periods of approximately 44
-               and 26 months are also present.  These cycles
-               correspond to the El Nino and the Southern Oscillation.
-               Arguments to the SIN and COS functions are in radians.
-
-Reference:     Kahaner, D., C. Moler, and S. Nash, (1989). 
-               Numerical Methods and Software.  
-               Englewood Cliffs, NJ: Prentice Hall, pp. 441-445.
-
-Data:          1 Response  (y = atmospheric pressure)
-               1 Predictor (x = time)
-               168 Observations
-               Average Level of Difficulty
-               Observed Data
-
-Model:         Miscellaneous Class
-               9 Parameters (b1 to b9)
-
-               y = b1 + b2*cos( 2*pi*x/12 ) + b3*sin( 2*pi*x/12 ) 
-                      + b5*cos( 2*pi*x/b4 ) + b6*sin( 2*pi*x/b4 )
-                      + b8*cos( 2*pi*x/b7 ) + b9*sin( 2*pi*x/b7 )  + e
- 
-          Starting values                  Certified Values
- 
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   11.0        10.0          1.0510749193E+01  1.7488832467E-01
-  b2 =    3.0         3.0          3.0762128085E+00  2.4310052139E-01
-  b3 =    0.5         0.5          5.3280138227E-01  2.4354686618E-01
-  b4 =   40.0        44.0          4.4311088700E+01  9.4408025976E-01
-  b5 =   -0.7        -1.5         -1.6231428586E+00  2.8078369611E-01
-  b6 =   -1.3         0.5          5.2554493756E-01  4.8073701119E-01
-  b7 =   25.0        26.0          2.6887614440E+01  4.1612939130E-01
-  b8 =   -0.3        -0.1          2.1232288488E-01  5.1460022911E-01
-  b9 =    1.4         1.5          1.4966870418E+00  2.5434468893E-01
-
-Residual Sum of Squares:                    7.8853978668E+02
-Residual Standard Deviation:                2.2269642403E+00
-Degrees of Freedom:                               159
-Number of Observations:                           168
-
-
-
-
-
-Data:   y          x
-    12.90000    1.000000
-    11.30000    2.000000
-    10.60000    3.000000
-    11.20000    4.000000
-    10.90000    5.000000
-    7.500000    6.000000
-    7.700000    7.000000
-    11.70000    8.000000
-    12.90000    9.000000
-    14.30000   10.000000
-    10.90000    11.00000
-    13.70000    12.00000
-    17.10000    13.00000
-    14.00000    14.00000
-    15.30000    15.00000
-    8.500000    16.00000
-    5.700000    17.00000
-    5.500000    18.00000
-    7.600000    19.00000
-    8.600000    20.00000
-    7.300000    21.00000
-    7.600000    22.00000
-    12.70000    23.00000
-    11.00000    24.00000
-    12.70000    25.00000
-    12.90000    26.00000
-    13.00000    27.00000
-    10.90000    28.00000
-   10.400000    29.00000
-   10.200000    30.00000
-    8.000000    31.00000
-    10.90000    32.00000
-    13.60000    33.00000
-   10.500000    34.00000
-    9.200000    35.00000
-    12.40000    36.00000
-    12.70000    37.00000
-    13.30000    38.00000
-   10.100000    39.00000
-    7.800000    40.00000
-    4.800000    41.00000
-    3.000000    42.00000
-    2.500000    43.00000
-    6.300000    44.00000
-    9.700000    45.00000
-    11.60000    46.00000
-    8.600000    47.00000
-    12.40000    48.00000
-   10.500000    49.00000
-    13.30000    50.00000
-   10.400000    51.00000
-    8.100000    52.00000
-    3.700000    53.00000
-    10.70000    54.00000
-    5.100000    55.00000
-   10.400000    56.00000
-    10.90000    57.00000
-    11.70000    58.00000
-    11.40000    59.00000
-    13.70000    60.00000
-    14.10000    61.00000
-    14.00000    62.00000
-    12.50000    63.00000
-    6.300000    64.00000
-    9.600000    65.00000
-    11.70000    66.00000
-    5.000000    67.00000
-    10.80000    68.00000
-    12.70000    69.00000
-    10.80000    70.00000
-    11.80000    71.00000
-    12.60000    72.00000
-    15.70000    73.00000
-    12.60000    74.00000
-    14.80000    75.00000
-    7.800000    76.00000
-    7.100000    77.00000
-    11.20000    78.00000
-    8.100000    79.00000
-    6.400000    80.00000
-    5.200000    81.00000
-    12.00000    82.00000
-   10.200000    83.00000
-    12.70000    84.00000
-   10.200000    85.00000
-    14.70000    86.00000
-    12.20000    87.00000
-    7.100000    88.00000
-    5.700000    89.00000
-    6.700000    90.00000
-    3.900000    91.00000
-    8.500000    92.00000
-    8.300000    93.00000
-    10.80000    94.00000
-    16.70000    95.00000
-    12.60000    96.00000
-    12.50000    97.00000
-    12.50000    98.00000
-    9.800000    99.00000
-    7.200000   100.00000
-    4.100000   101.00000
-    10.60000   102.00000
-   10.100000   103.00000
-   10.100000   104.00000
-    11.90000   105.00000
-    13.60000    106.0000
-    16.30000    107.0000
-    17.60000    108.0000
-    15.50000    109.0000
-    16.00000    110.0000
-    15.20000    111.0000
-    11.20000    112.0000
-    14.30000    113.0000
-    14.50000    114.0000
-    8.500000    115.0000
-    12.00000    116.0000
-    12.70000    117.0000
-    11.30000    118.0000
-    14.50000    119.0000
-    15.10000    120.0000
-   10.400000    121.0000
-    11.50000    122.0000
-    13.40000    123.0000
-    7.500000    124.0000
-   0.6000000    125.0000
-   0.3000000    126.0000
-    5.500000    127.0000
-    5.000000    128.0000
-    4.600000    129.0000
-    8.200000    130.0000
-    9.900000    131.0000
-    9.200000    132.0000
-    12.50000    133.0000
-    10.90000    134.0000
-    9.900000    135.0000
-    8.900000    136.0000
-    7.600000    137.0000
-    9.500000    138.0000
-    8.400000    139.0000
-    10.70000    140.0000
-    13.60000    141.0000
-    13.70000    142.0000
-    13.70000    143.0000
-    16.50000    144.0000
-    16.80000    145.0000
-    17.10000    146.0000
-    15.40000    147.0000
-    9.500000    148.0000
-    6.100000    149.0000
-   10.100000    150.0000
-    9.300000    151.0000
-    5.300000    152.0000
-    11.20000    153.0000
-    16.60000    154.0000
-    15.60000    155.0000
-    12.00000    156.0000
-    11.50000    157.0000
-    8.600000    158.0000
-    13.80000    159.0000
-    8.700000    160.0000
-    8.600000    161.0000
-    8.600000    162.0000
-    8.700000    163.0000
-    12.80000    164.0000
-    13.20000    165.0000
-    14.00000    166.0000
-    13.40000    167.0000
-    14.80000    168.0000
+NIST/ITL StRD
+Dataset Name:  ENSO              (ENSO.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to  49)
+               Certified Values  (lines 41 to  54)
+               Data              (lines 61 to 228)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   The data are monthly averaged atmospheric pressure 
+               differences between Easter Island and Darwin, 
+               Australia.  This difference drives the trade winds in 
+               the southern hemisphere.  Fourier analysis of the data
+               reveals 3 significant cycles.  The annual cycle is the
+               strongest, but cycles with periods of approximately 44
+               and 26 months are also present.  These cycles
+               correspond to the El Nino and the Southern Oscillation.
+               Arguments to the SIN and COS functions are in radians.
+
+Reference:     Kahaner, D., C. Moler, and S. Nash, (1989). 
+               Numerical Methods and Software.  
+               Englewood Cliffs, NJ: Prentice Hall, pp. 441-445.
+
+Data:          1 Response  (y = atmospheric pressure)
+               1 Predictor (x = time)
+               168 Observations
+               Average Level of Difficulty
+               Observed Data
+
+Model:         Miscellaneous Class
+               9 Parameters (b1 to b9)
+
+               y = b1 + b2*cos( 2*pi*x/12 ) + b3*sin( 2*pi*x/12 ) 
+                      + b5*cos( 2*pi*x/b4 ) + b6*sin( 2*pi*x/b4 )
+                      + b8*cos( 2*pi*x/b7 ) + b9*sin( 2*pi*x/b7 )  + e
+ 
+          Starting values                  Certified Values
+ 
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   11.0        10.0          1.0510749193E+01  1.7488832467E-01
+  b2 =    3.0         3.0          3.0762128085E+00  2.4310052139E-01
+  b3 =    0.5         0.5          5.3280138227E-01  2.4354686618E-01
+  b4 =   40.0        44.0          4.4311088700E+01  9.4408025976E-01
+  b5 =   -0.7        -1.5         -1.6231428586E+00  2.8078369611E-01
+  b6 =   -1.3         0.5          5.2554493756E-01  4.8073701119E-01
+  b7 =   25.0        26.0          2.6887614440E+01  4.1612939130E-01
+  b8 =   -0.3        -0.1          2.1232288488E-01  5.1460022911E-01
+  b9 =    1.4         1.5          1.4966870418E+00  2.5434468893E-01
+
+Residual Sum of Squares:                    7.8853978668E+02
+Residual Standard Deviation:                2.2269642403E+00
+Degrees of Freedom:                               159
+Number of Observations:                           168
+
+
+
+
+
+Data:   y          x
+    12.90000    1.000000
+    11.30000    2.000000
+    10.60000    3.000000
+    11.20000    4.000000
+    10.90000    5.000000
+    7.500000    6.000000
+    7.700000    7.000000
+    11.70000    8.000000
+    12.90000    9.000000
+    14.30000   10.000000
+    10.90000    11.00000
+    13.70000    12.00000
+    17.10000    13.00000
+    14.00000    14.00000
+    15.30000    15.00000
+    8.500000    16.00000
+    5.700000    17.00000
+    5.500000    18.00000
+    7.600000    19.00000
+    8.600000    20.00000
+    7.300000    21.00000
+    7.600000    22.00000
+    12.70000    23.00000
+    11.00000    24.00000
+    12.70000    25.00000
+    12.90000    26.00000
+    13.00000    27.00000
+    10.90000    28.00000
+   10.400000    29.00000
+   10.200000    30.00000
+    8.000000    31.00000
+    10.90000    32.00000
+    13.60000    33.00000
+   10.500000    34.00000
+    9.200000    35.00000
+    12.40000    36.00000
+    12.70000    37.00000
+    13.30000    38.00000
+   10.100000    39.00000
+    7.800000    40.00000
+    4.800000    41.00000
+    3.000000    42.00000
+    2.500000    43.00000
+    6.300000    44.00000
+    9.700000    45.00000
+    11.60000    46.00000
+    8.600000    47.00000
+    12.40000    48.00000
+   10.500000    49.00000
+    13.30000    50.00000
+   10.400000    51.00000
+    8.100000    52.00000
+    3.700000    53.00000
+    10.70000    54.00000
+    5.100000    55.00000
+   10.400000    56.00000
+    10.90000    57.00000
+    11.70000    58.00000
+    11.40000    59.00000
+    13.70000    60.00000
+    14.10000    61.00000
+    14.00000    62.00000
+    12.50000    63.00000
+    6.300000    64.00000
+    9.600000    65.00000
+    11.70000    66.00000
+    5.000000    67.00000
+    10.80000    68.00000
+    12.70000    69.00000
+    10.80000    70.00000
+    11.80000    71.00000
+    12.60000    72.00000
+    15.70000    73.00000
+    12.60000    74.00000
+    14.80000    75.00000
+    7.800000    76.00000
+    7.100000    77.00000
+    11.20000    78.00000
+    8.100000    79.00000
+    6.400000    80.00000
+    5.200000    81.00000
+    12.00000    82.00000
+   10.200000    83.00000
+    12.70000    84.00000
+   10.200000    85.00000
+    14.70000    86.00000
+    12.20000    87.00000
+    7.100000    88.00000
+    5.700000    89.00000
+    6.700000    90.00000
+    3.900000    91.00000
+    8.500000    92.00000
+    8.300000    93.00000
+    10.80000    94.00000
+    16.70000    95.00000
+    12.60000    96.00000
+    12.50000    97.00000
+    12.50000    98.00000
+    9.800000    99.00000
+    7.200000   100.00000
+    4.100000   101.00000
+    10.60000   102.00000
+   10.100000   103.00000
+   10.100000   104.00000
+    11.90000   105.00000
+    13.60000    106.0000
+    16.30000    107.0000
+    17.60000    108.0000
+    15.50000    109.0000
+    16.00000    110.0000
+    15.20000    111.0000
+    11.20000    112.0000
+    14.30000    113.0000
+    14.50000    114.0000
+    8.500000    115.0000
+    12.00000    116.0000
+    12.70000    117.0000
+    11.30000    118.0000
+    14.50000    119.0000
+    15.10000    120.0000
+   10.400000    121.0000
+    11.50000    122.0000
+    13.40000    123.0000
+    7.500000    124.0000
+   0.6000000    125.0000
+   0.3000000    126.0000
+    5.500000    127.0000
+    5.000000    128.0000
+    4.600000    129.0000
+    8.200000    130.0000
+    9.900000    131.0000
+    9.200000    132.0000
+    12.50000    133.0000
+    10.90000    134.0000
+    9.900000    135.0000
+    8.900000    136.0000
+    7.600000    137.0000
+    9.500000    138.0000
+    8.400000    139.0000
+    10.70000    140.0000
+    13.60000    141.0000
+    13.70000    142.0000
+    13.70000    143.0000
+    16.50000    144.0000
+    16.80000    145.0000
+    17.10000    146.0000
+    15.40000    147.0000
+    9.500000    148.0000
+    6.100000    149.0000
+   10.100000    150.0000
+    9.300000    151.0000
+    5.300000    152.0000
+    11.20000    153.0000
+    16.60000    154.0000
+    15.60000    155.0000
+    12.00000    156.0000
+    11.50000    157.0000
+    8.600000    158.0000
+    13.80000    159.0000
+    8.700000    160.0000
+    8.600000    161.0000
+    8.600000    162.0000
+    8.700000    163.0000
+    12.80000    164.0000
+    13.20000    165.0000
+    14.00000    166.0000
+    13.40000    167.0000
+    14.80000    168.0000
diff --git a/NIST_STRD/Eckerle4.dat b/NIST_STRD/Eckerle4.dat
index 2d0d8bf..dd54f5a 100644
--- a/NIST_STRD/Eckerle4.dat
+++ b/NIST_STRD/Eckerle4.dat
@@ -1,95 +1,95 @@
-NIST/ITL StRD
-Dataset Name:  Eckerle4          (Eckerle4.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 43)
-               Certified Values  (lines 41 to 48)
-               Data              (lines 61 to 95)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study involving
-               circular interference transmittance.  The response
-               variable is transmittance, and the predictor variable
-               is wavelength.
-
-
-Reference:     Eckerle, K., NIST (197?).  
-               Circular Interference Transmittance Study.
-
-
-
-
-
-
-Data:          1 Response Variable  (y = transmittance)
-               1 Predictor Variable (x = wavelength)
-               35 Observations
-               Higher Level of Difficulty
-               Observed Data
-
-Model:         Exponential Class
-               3 Parameters (b1 to b3)
-
-               y = (b1/b2) * exp[-0.5*((x-b3)/b2)**2]  +  e
-
-
-
-          Starting values                  Certified Values
- 
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =     1           1.5         1.5543827178E+00  1.5408051163E-02
-  b2 =    10           5           4.0888321754E+00  4.6803020753E-02
-  b3 =   500         450           4.5154121844E+02  4.6800518816E-02
-
-Residual Sum of Squares:                    1.4635887487E-03
-Residual Standard Deviation:                6.7629245447E-03
-Degrees of Freedom:                                32
-Number of Observations:                            35
-
-
-
-
-
-
-
-
-
-
-
-Data:  y                x
-      0.0001575E0    400.000000E0
-      0.0001699E0    405.000000E0
-      0.0002350E0    410.000000E0
-      0.0003102E0    415.000000E0
-      0.0004917E0    420.000000E0
-      0.0008710E0    425.000000E0
-      0.0017418E0    430.000000E0
-      0.0046400E0    435.000000E0
-      0.0065895E0    436.500000E0
-      0.0097302E0    438.000000E0
-      0.0149002E0    439.500000E0
-      0.0237310E0    441.000000E0
-      0.0401683E0    442.500000E0
-      0.0712559E0    444.000000E0
-      0.1264458E0    445.500000E0
-      0.2073413E0    447.000000E0
-      0.2902366E0    448.500000E0
-      0.3445623E0    450.000000E0
-      0.3698049E0    451.500000E0
-      0.3668534E0    453.000000E0
-      0.3106727E0    454.500000E0
-      0.2078154E0    456.000000E0
-      0.1164354E0    457.500000E0
-      0.0616764E0    459.000000E0
-      0.0337200E0    460.500000E0
-      0.0194023E0    462.000000E0
-      0.0117831E0    463.500000E0
-      0.0074357E0    465.000000E0
-      0.0022732E0    470.000000E0
-      0.0008800E0    475.000000E0
-      0.0004579E0    480.000000E0
-      0.0002345E0    485.000000E0
-      0.0001586E0    490.000000E0
-      0.0001143E0    495.000000E0
-      0.0000710E0    500.000000E0
+NIST/ITL StRD
+Dataset Name:  Eckerle4          (Eckerle4.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 43)
+               Certified Values  (lines 41 to 48)
+               Data              (lines 61 to 95)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study involving
+               circular interference transmittance.  The response
+               variable is transmittance, and the predictor variable
+               is wavelength.
+
+
+Reference:     Eckerle, K., NIST (197?).  
+               Circular Interference Transmittance Study.
+
+
+
+
+
+
+Data:          1 Response Variable  (y = transmittance)
+               1 Predictor Variable (x = wavelength)
+               35 Observations
+               Higher Level of Difficulty
+               Observed Data
+
+Model:         Exponential Class
+               3 Parameters (b1 to b3)
+
+               y = (b1/b2) * exp[-0.5*((x-b3)/b2)**2]  +  e
+
+
+
+          Starting values                  Certified Values
+ 
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =     1           1.5         1.5543827178E+00  1.5408051163E-02
+  b2 =    10           5           4.0888321754E+00  4.6803020753E-02
+  b3 =   500         450           4.5154121844E+02  4.6800518816E-02
+
+Residual Sum of Squares:                    1.4635887487E-03
+Residual Standard Deviation:                6.7629245447E-03
+Degrees of Freedom:                                32
+Number of Observations:                            35
+
+
+
+
+
+
+
+
+
+
+
+Data:  y                x
+      0.0001575E0    400.000000E0
+      0.0001699E0    405.000000E0
+      0.0002350E0    410.000000E0
+      0.0003102E0    415.000000E0
+      0.0004917E0    420.000000E0
+      0.0008710E0    425.000000E0
+      0.0017418E0    430.000000E0
+      0.0046400E0    435.000000E0
+      0.0065895E0    436.500000E0
+      0.0097302E0    438.000000E0
+      0.0149002E0    439.500000E0
+      0.0237310E0    441.000000E0
+      0.0401683E0    442.500000E0
+      0.0712559E0    444.000000E0
+      0.1264458E0    445.500000E0
+      0.2073413E0    447.000000E0
+      0.2902366E0    448.500000E0
+      0.3445623E0    450.000000E0
+      0.3698049E0    451.500000E0
+      0.3668534E0    453.000000E0
+      0.3106727E0    454.500000E0
+      0.2078154E0    456.000000E0
+      0.1164354E0    457.500000E0
+      0.0616764E0    459.000000E0
+      0.0337200E0    460.500000E0
+      0.0194023E0    462.000000E0
+      0.0117831E0    463.500000E0
+      0.0074357E0    465.000000E0
+      0.0022732E0    470.000000E0
+      0.0008800E0    475.000000E0
+      0.0004579E0    480.000000E0
+      0.0002345E0    485.000000E0
+      0.0001586E0    490.000000E0
+      0.0001143E0    495.000000E0
+      0.0000710E0    500.000000E0
diff --git a/NIST_STRD/Gauss1.dat b/NIST_STRD/Gauss1.dat
index df8dfac..89c389e 100644
--- a/NIST_STRD/Gauss1.dat
+++ b/NIST_STRD/Gauss1.dat
@@ -1,310 +1,310 @@
-NIST/ITL StRD
-Dataset Name:  Gauss1            (Gauss1.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  48)
-               Certified Values  (lines 41 to  53)
-               Data              (lines 61 to 310)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   The data are two well-separated Gaussians on a 
-               decaying exponential baseline plus normally 
-               distributed zero-mean noise with variance = 6.25.
-
-Reference:     Rust, B., NIST (1996).
-
-
-
-
-
-
-
-
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               250 Observations
-               Lower Level of Difficulty
-               Generated Data
- 
-Model:         Exponential Class
-               8 Parameters (b1 to b8) 
- 
-               y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
-                                   + b6*exp( -(x-b7)**2 / b8**2 ) + e
- 
- 
-          Starting values                  Certified Values
- 
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =    97.0        94.0         9.8778210871E+01  5.7527312730E-01
-  b2 =     0.009       0.0105      1.0497276517E-02  1.1406289017E-04
-  b3 =   100.0        99.0         1.0048990633E+02  5.8831775752E-01
-  b4 =    65.0        63.0         6.7481111276E+01  1.0460593412E-01
-  b5 =    20.0        25.0         2.3129773360E+01  1.7439951146E-01
-  b6 =    70.0        71.0         7.1994503004E+01  6.2622793913E-01
-  b7 =   178.0       180.0         1.7899805021E+02  1.2436988217E-01
-  b8 =    16.5        20.0         1.8389389025E+01  2.0134312832E-01
-
-Residual Sum of Squares:                    1.3158222432E+03
-Residual Standard Deviation:                2.3317980180E+00
-Degrees of Freedom:                               242
-Number of Observations:                           250
-
-
-
-
- 
-
-Data:   y          x
-    97.62227    1.000000
-    97.80724    2.000000
-    96.62247    3.000000
-    92.59022    4.000000
-    91.23869    5.000000
-    95.32704    6.000000
-    90.35040    7.000000
-    89.46235    8.000000
-    91.72520    9.000000
-    89.86916   10.000000
-    86.88076    11.00000
-    85.94360    12.00000
-    87.60686    13.00000
-    86.25839    14.00000
-    80.74976    15.00000
-    83.03551    16.00000
-    88.25837    17.00000
-    82.01316    18.00000
-    82.74098    19.00000
-    83.30034    20.00000
-    81.27850    21.00000
-    81.85506    22.00000
-    80.75195    23.00000
-    80.09573    24.00000
-    81.07633    25.00000
-    78.81542    26.00000
-    78.38596    27.00000
-    79.93386    28.00000
-    79.48474    29.00000
-    79.95942    30.00000
-    76.10691    31.00000
-    78.39830    32.00000
-    81.43060    33.00000
-    82.48867    34.00000
-    81.65462    35.00000
-    80.84323    36.00000
-    88.68663    37.00000
-    84.74438    38.00000
-    86.83934    39.00000
-    85.97739    40.00000
-    91.28509    41.00000
-    97.22411    42.00000
-    93.51733    43.00000
-    94.10159    44.00000
-   101.91760    45.00000
-    98.43134    46.00000
-    110.4214    47.00000
-    107.6628    48.00000
-    111.7288    49.00000
-    116.5115    50.00000
-    120.7609    51.00000
-    123.9553    52.00000
-    124.2437    53.00000
-    130.7996    54.00000
-    133.2960    55.00000
-    130.7788    56.00000
-    132.0565    57.00000
-    138.6584    58.00000
-    142.9252    59.00000
-    142.7215    60.00000
-    144.1249    61.00000
-    147.4377    62.00000
-    148.2647    63.00000
-    152.0519    64.00000
-    147.3863    65.00000
-    149.2074    66.00000
-    148.9537    67.00000
-    144.5876    68.00000
-    148.1226    69.00000
-    148.0144    70.00000
-    143.8893    71.00000
-    140.9088    72.00000
-    143.4434    73.00000
-    139.3938    74.00000
-    135.9878    75.00000
-    136.3927    76.00000
-    126.7262    77.00000
-    124.4487    78.00000
-    122.8647    79.00000
-    113.8557    80.00000
-    113.7037    81.00000
-    106.8407    82.00000
-    107.0034    83.00000
-   102.46290    84.00000
-    96.09296    85.00000
-    94.57555    86.00000
-    86.98824    87.00000
-    84.90154    88.00000
-    81.18023    89.00000
-    76.40117    90.00000
-    67.09200    91.00000
-    72.67155    92.00000
-    68.10848    93.00000
-    67.99088    94.00000
-    63.34094    95.00000
-    60.55253    96.00000
-    56.18687    97.00000
-    53.64482    98.00000
-    53.70307    99.00000
-    48.07893   100.00000
-    42.21258   101.00000
-    45.65181   102.00000
-    41.69728   103.00000
-    41.24946   104.00000
-    39.21349   105.00000
-    37.71696    106.0000
-    36.68395    107.0000
-    37.30393    108.0000
-    37.43277    109.0000
-    37.45012    110.0000
-    32.64648    111.0000
-    31.84347    112.0000
-    31.39951    113.0000
-    26.68912    114.0000
-    32.25323    115.0000
-    27.61008    116.0000
-    33.58649    117.0000
-    28.10714    118.0000
-    30.26428    119.0000
-    28.01648    120.0000
-    29.11021    121.0000
-    23.02099    122.0000
-    25.65091    123.0000
-    28.50295    124.0000
-    25.23701    125.0000
-    26.13828    126.0000
-    33.53260    127.0000
-    29.25195    128.0000
-    27.09847    129.0000
-    26.52999    130.0000
-    25.52401    131.0000
-    26.69218    132.0000
-    24.55269    133.0000
-    27.71763    134.0000
-    25.20297    135.0000
-    25.61483    136.0000
-    25.06893    137.0000
-    27.63930    138.0000
-    24.94851    139.0000
-    25.86806    140.0000
-    22.48183    141.0000
-    26.90045    142.0000
-    25.39919    143.0000
-    17.90614    144.0000
-    23.76039    145.0000
-    25.89689    146.0000
-    27.64231    147.0000
-    22.86101    148.0000
-    26.47003    149.0000
-    23.72888    150.0000
-    27.54334    151.0000
-    30.52683    152.0000
-    28.07261    153.0000
-    34.92815    154.0000
-    28.29194    155.0000
-    34.19161    156.0000
-    35.41207    157.0000
-    37.09336    158.0000
-    40.98330    159.0000
-    39.53923    160.0000
-    47.80123    161.0000
-    47.46305    162.0000
-    51.04166    163.0000
-    54.58065    164.0000
-    57.53001    165.0000
-    61.42089    166.0000
-    62.79032    167.0000
-    68.51455    168.0000
-    70.23053    169.0000
-    74.42776    170.0000
-    76.59911    171.0000
-    81.62053    172.0000
-    83.42208    173.0000
-    79.17451    174.0000
-    88.56985    175.0000
-    85.66525    176.0000
-    86.55502    177.0000
-    90.65907    178.0000
-    84.27290    179.0000
-    85.72220    180.0000
-    83.10702    181.0000
-    82.16884    182.0000
-    80.42568    183.0000
-    78.15692    184.0000
-    79.79691    185.0000
-    77.84378    186.0000
-    74.50327    187.0000
-    71.57289    188.0000
-    65.88031    189.0000
-    65.01385    190.0000
-    60.19582    191.0000
-    59.66726    192.0000
-    52.95478    193.0000
-    53.87792    194.0000
-    44.91274    195.0000
-    41.09909    196.0000
-    41.68018    197.0000
-    34.53379    198.0000
-    34.86419    199.0000
-    33.14787    200.0000
-    29.58864    201.0000
-    27.29462    202.0000
-    21.91439    203.0000
-    19.08159    204.0000
-    24.90290    205.0000
-    19.82341    206.0000
-    16.75551    207.0000
-    18.24558    208.0000
-    17.23549    209.0000
-    16.34934    210.0000
-    13.71285    211.0000
-    14.75676    212.0000
-    13.97169    213.0000
-    12.42867    214.0000
-    14.35519    215.0000
-    7.703309    216.0000
-   10.234410    217.0000
-    11.78315    218.0000
-    13.87768    219.0000
-    4.535700    220.0000
-   10.059280    221.0000
-    8.424824    222.0000
-   10.533120    223.0000
-    9.602255    224.0000
-    7.877514    225.0000
-    6.258121    226.0000
-    8.899865    227.0000
-    7.877754    228.0000
-    12.51191    229.0000
-    10.66205    230.0000
-    6.035400    231.0000
-    6.790655    232.0000
-    8.783535    233.0000
-    4.600288    234.0000
-    8.400915    235.0000
-    7.216561    236.0000
-   10.017410    237.0000
-    7.331278    238.0000
-    6.527863    239.0000
-    2.842001    240.0000
-   10.325070    241.0000
-    4.790995    242.0000
-    8.377101    243.0000
-    6.264445    244.0000
-    2.706213    245.0000
-    8.362329    246.0000
-    8.983658    247.0000
-    3.362571    248.0000
-    1.182746    249.0000
-    4.875359    250.0000
+NIST/ITL StRD
+Dataset Name:  Gauss1            (Gauss1.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to  48)
+               Certified Values  (lines 41 to  53)
+               Data              (lines 61 to 310)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   The data are two well-separated Gaussians on a 
+               decaying exponential baseline plus normally 
+               distributed zero-mean noise with variance = 6.25.
+
+Reference:     Rust, B., NIST (1996).
+
+
+
+
+
+
+
+
+
+Data:          1 Response  (y)
+               1 Predictor (x)
+               250 Observations
+               Lower Level of Difficulty
+               Generated Data
+ 
+Model:         Exponential Class
+               8 Parameters (b1 to b8) 
+ 
+               y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+                                   + b6*exp( -(x-b7)**2 / b8**2 ) + e
+ 
+ 
+          Starting values                  Certified Values
+ 
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =    97.0        94.0         9.8778210871E+01  5.7527312730E-01
+  b2 =     0.009       0.0105      1.0497276517E-02  1.1406289017E-04
+  b3 =   100.0        99.0         1.0048990633E+02  5.8831775752E-01
+  b4 =    65.0        63.0         6.7481111276E+01  1.0460593412E-01
+  b5 =    20.0        25.0         2.3129773360E+01  1.7439951146E-01
+  b6 =    70.0        71.0         7.1994503004E+01  6.2622793913E-01
+  b7 =   178.0       180.0         1.7899805021E+02  1.2436988217E-01
+  b8 =    16.5        20.0         1.8389389025E+01  2.0134312832E-01
+
+Residual Sum of Squares:                    1.3158222432E+03
+Residual Standard Deviation:                2.3317980180E+00
+Degrees of Freedom:                               242
+Number of Observations:                           250
+
+
+
+
+ 
+
+Data:   y          x
+    97.62227    1.000000
+    97.80724    2.000000
+    96.62247    3.000000
+    92.59022    4.000000
+    91.23869    5.000000
+    95.32704    6.000000
+    90.35040    7.000000
+    89.46235    8.000000
+    91.72520    9.000000
+    89.86916   10.000000
+    86.88076    11.00000
+    85.94360    12.00000
+    87.60686    13.00000
+    86.25839    14.00000
+    80.74976    15.00000
+    83.03551    16.00000
+    88.25837    17.00000
+    82.01316    18.00000
+    82.74098    19.00000
+    83.30034    20.00000
+    81.27850    21.00000
+    81.85506    22.00000
+    80.75195    23.00000
+    80.09573    24.00000
+    81.07633    25.00000
+    78.81542    26.00000
+    78.38596    27.00000
+    79.93386    28.00000
+    79.48474    29.00000
+    79.95942    30.00000
+    76.10691    31.00000
+    78.39830    32.00000
+    81.43060    33.00000
+    82.48867    34.00000
+    81.65462    35.00000
+    80.84323    36.00000
+    88.68663    37.00000
+    84.74438    38.00000
+    86.83934    39.00000
+    85.97739    40.00000
+    91.28509    41.00000
+    97.22411    42.00000
+    93.51733    43.00000
+    94.10159    44.00000
+   101.91760    45.00000
+    98.43134    46.00000
+    110.4214    47.00000
+    107.6628    48.00000
+    111.7288    49.00000
+    116.5115    50.00000
+    120.7609    51.00000
+    123.9553    52.00000
+    124.2437    53.00000
+    130.7996    54.00000
+    133.2960    55.00000
+    130.7788    56.00000
+    132.0565    57.00000
+    138.6584    58.00000
+    142.9252    59.00000
+    142.7215    60.00000
+    144.1249    61.00000
+    147.4377    62.00000
+    148.2647    63.00000
+    152.0519    64.00000
+    147.3863    65.00000
+    149.2074    66.00000
+    148.9537    67.00000
+    144.5876    68.00000
+    148.1226    69.00000
+    148.0144    70.00000
+    143.8893    71.00000
+    140.9088    72.00000
+    143.4434    73.00000
+    139.3938    74.00000
+    135.9878    75.00000
+    136.3927    76.00000
+    126.7262    77.00000
+    124.4487    78.00000
+    122.8647    79.00000
+    113.8557    80.00000
+    113.7037    81.00000
+    106.8407    82.00000
+    107.0034    83.00000
+   102.46290    84.00000
+    96.09296    85.00000
+    94.57555    86.00000
+    86.98824    87.00000
+    84.90154    88.00000
+    81.18023    89.00000
+    76.40117    90.00000
+    67.09200    91.00000
+    72.67155    92.00000
+    68.10848    93.00000
+    67.99088    94.00000
+    63.34094    95.00000
+    60.55253    96.00000
+    56.18687    97.00000
+    53.64482    98.00000
+    53.70307    99.00000
+    48.07893   100.00000
+    42.21258   101.00000
+    45.65181   102.00000
+    41.69728   103.00000
+    41.24946   104.00000
+    39.21349   105.00000
+    37.71696    106.0000
+    36.68395    107.0000
+    37.30393    108.0000
+    37.43277    109.0000
+    37.45012    110.0000
+    32.64648    111.0000
+    31.84347    112.0000
+    31.39951    113.0000
+    26.68912    114.0000
+    32.25323    115.0000
+    27.61008    116.0000
+    33.58649    117.0000
+    28.10714    118.0000
+    30.26428    119.0000
+    28.01648    120.0000
+    29.11021    121.0000
+    23.02099    122.0000
+    25.65091    123.0000
+    28.50295    124.0000
+    25.23701    125.0000
+    26.13828    126.0000
+    33.53260    127.0000
+    29.25195    128.0000
+    27.09847    129.0000
+    26.52999    130.0000
+    25.52401    131.0000
+    26.69218    132.0000
+    24.55269    133.0000
+    27.71763    134.0000
+    25.20297    135.0000
+    25.61483    136.0000
+    25.06893    137.0000
+    27.63930    138.0000
+    24.94851    139.0000
+    25.86806    140.0000
+    22.48183    141.0000
+    26.90045    142.0000
+    25.39919    143.0000
+    17.90614    144.0000
+    23.76039    145.0000
+    25.89689    146.0000
+    27.64231    147.0000
+    22.86101    148.0000
+    26.47003    149.0000
+    23.72888    150.0000
+    27.54334    151.0000
+    30.52683    152.0000
+    28.07261    153.0000
+    34.92815    154.0000
+    28.29194    155.0000
+    34.19161    156.0000
+    35.41207    157.0000
+    37.09336    158.0000
+    40.98330    159.0000
+    39.53923    160.0000
+    47.80123    161.0000
+    47.46305    162.0000
+    51.04166    163.0000
+    54.58065    164.0000
+    57.53001    165.0000
+    61.42089    166.0000
+    62.79032    167.0000
+    68.51455    168.0000
+    70.23053    169.0000
+    74.42776    170.0000
+    76.59911    171.0000
+    81.62053    172.0000
+    83.42208    173.0000
+    79.17451    174.0000
+    88.56985    175.0000
+    85.66525    176.0000
+    86.55502    177.0000
+    90.65907    178.0000
+    84.27290    179.0000
+    85.72220    180.0000
+    83.10702    181.0000
+    82.16884    182.0000
+    80.42568    183.0000
+    78.15692    184.0000
+    79.79691    185.0000
+    77.84378    186.0000
+    74.50327    187.0000
+    71.57289    188.0000
+    65.88031    189.0000
+    65.01385    190.0000
+    60.19582    191.0000
+    59.66726    192.0000
+    52.95478    193.0000
+    53.87792    194.0000
+    44.91274    195.0000
+    41.09909    196.0000
+    41.68018    197.0000
+    34.53379    198.0000
+    34.86419    199.0000
+    33.14787    200.0000
+    29.58864    201.0000
+    27.29462    202.0000
+    21.91439    203.0000
+    19.08159    204.0000
+    24.90290    205.0000
+    19.82341    206.0000
+    16.75551    207.0000
+    18.24558    208.0000
+    17.23549    209.0000
+    16.34934    210.0000
+    13.71285    211.0000
+    14.75676    212.0000
+    13.97169    213.0000
+    12.42867    214.0000
+    14.35519    215.0000
+    7.703309    216.0000
+   10.234410    217.0000
+    11.78315    218.0000
+    13.87768    219.0000
+    4.535700    220.0000
+   10.059280    221.0000
+    8.424824    222.0000
+   10.533120    223.0000
+    9.602255    224.0000
+    7.877514    225.0000
+    6.258121    226.0000
+    8.899865    227.0000
+    7.877754    228.0000
+    12.51191    229.0000
+    10.66205    230.0000
+    6.035400    231.0000
+    6.790655    232.0000
+    8.783535    233.0000
+    4.600288    234.0000
+    8.400915    235.0000
+    7.216561    236.0000
+   10.017410    237.0000
+    7.331278    238.0000
+    6.527863    239.0000
+    2.842001    240.0000
+   10.325070    241.0000
+    4.790995    242.0000
+    8.377101    243.0000
+    6.264445    244.0000
+    2.706213    245.0000
+    8.362329    246.0000
+    8.983658    247.0000
+    3.362571    248.0000
+    1.182746    249.0000
+    4.875359    250.0000
diff --git a/NIST_STRD/Gauss2.dat b/NIST_STRD/Gauss2.dat
index 38222eb..ff185d1 100644
--- a/NIST_STRD/Gauss2.dat
+++ b/NIST_STRD/Gauss2.dat
@@ -1,310 +1,310 @@
-NIST/ITL StRD
-Dataset Name:  Gauss2            (Gauss2.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  48)
-               Certified Values  (lines 41 to  53)
-               Data              (lines 61 to 310)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   The data are two slightly-blended Gaussians on a 
-               decaying exponential baseline plus normally 
-               distributed zero-mean noise with variance = 6.25. 
-
-Reference:     Rust, B., NIST (1996). 
-
-
-
-
-
-
-
-
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               250 Observations
-               Lower Level of Difficulty
-               Generated Data
-
-Model:         Exponential Class
-               8 Parameters (b1 to b8)
-
-               y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 ) 
-                                   + b6*exp( -(x-b7)**2 / b8**2 ) + e
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =    96.0        98.0         9.9018328406E+01  5.3748766879E-01
-  b2 =     0.009       0.0105      1.0994945399E-02  1.3335306766E-04
-  b3 =   103.0       103.0         1.0188022528E+02  5.9217315772E-01
-  b4 =   106.0       105.0         1.0703095519E+02  1.5006798316E-01
-  b5 =    18.0        20.0         2.3578584029E+01  2.2695595067E-01
-  b6 =    72.0        73.0         7.2045589471E+01  6.1721965884E-01
-  b7 =   151.0       150.0         1.5327010194E+02  1.9466674341E-01
-  b8 =    18.0        20.0         1.9525972636E+01  2.6416549393E-01
-
-Residual Sum of Squares:                    1.2475282092E+03
-Residual Standard Deviation:                2.2704790782E+00
-Degrees of Freedom:                               242
-Number of Observations:                           250
-
-
-
-
-
- 
-Data:   y          x
-    97.58776    1.000000
-    97.76344    2.000000
-    96.56705    3.000000
-    92.52037    4.000000
-    91.15097    5.000000
-    95.21728    6.000000
-    90.21355    7.000000
-    89.29235    8.000000
-    91.51479    9.000000
-    89.60966   10.000000
-    86.56187    11.00000
-    85.55316    12.00000
-    87.13054    13.00000
-    85.67940    14.00000
-    80.04851    15.00000
-    82.18925    16.00000
-    87.24081    17.00000
-    80.79407    18.00000
-    81.28570    19.00000
-    81.56940    20.00000
-    79.22715    21.00000
-    79.43275    22.00000
-    77.90195    23.00000
-    76.75468    24.00000
-    77.17377    25.00000
-    74.27348    26.00000
-    73.11900    27.00000
-    73.84826    28.00000
-    72.47870    29.00000
-    71.92292    30.00000
-    66.92176    31.00000
-    67.93835    32.00000
-    69.56207    33.00000
-    69.07066    34.00000
-    66.53983    35.00000
-    63.87883    36.00000
-    69.71537    37.00000
-    63.60588    38.00000
-    63.37154    39.00000
-    60.01835    40.00000
-    62.67481    41.00000
-    65.80666    42.00000
-    59.14304    43.00000
-    56.62951    44.00000
-    61.21785    45.00000
-    54.38790    46.00000
-    62.93443    47.00000
-    56.65144    48.00000
-    57.13362    49.00000
-    58.29689    50.00000
-    58.91744    51.00000
-    58.50172    52.00000
-    55.22885    53.00000
-    58.30375    54.00000
-    57.43237    55.00000
-    51.69407    56.00000
-    49.93132    57.00000
-    53.70760    58.00000
-    55.39712    59.00000
-    52.89709    60.00000
-    52.31649    61.00000
-    53.98720    62.00000
-    53.54158    63.00000
-    56.45046    64.00000
-    51.32276    65.00000
-    53.11676    66.00000
-    53.28631    67.00000
-    49.80555    68.00000
-    54.69564    69.00000
-    56.41627    70.00000
-    54.59362    71.00000
-    54.38520    72.00000
-    60.15354    73.00000
-    59.78773    74.00000
-    60.49995    75.00000
-    65.43885    76.00000
-    60.70001    77.00000
-    63.71865    78.00000
-    67.77139    79.00000
-    64.70934    80.00000
-    70.78193    81.00000
-    70.38651    82.00000
-    77.22359    83.00000
-    79.52665    84.00000
-    80.13077    85.00000
-    85.67823    86.00000
-    85.20647    87.00000
-    90.24548    88.00000
-    93.61953    89.00000
-    95.86509    90.00000
-    93.46992    91.00000
-    105.8137    92.00000
-    107.8269    93.00000
-    114.0607    94.00000
-    115.5019    95.00000
-    118.5110    96.00000
-    119.6177    97.00000
-    122.1940    98.00000
-    126.9903    99.00000
-    125.7005   100.00000
-    123.7447   101.00000
-    130.6543   102.00000
-    129.7168   103.00000
-    131.8240   104.00000
-    131.8759   105.00000
-    131.9994    106.0000
-    132.1221    107.0000
-    133.4414    108.0000
-    133.8252    109.0000
-    133.6695    110.0000
-    128.2851    111.0000
-    126.5182    112.0000
-    124.7550    113.0000
-    118.4016    114.0000
-    122.0334    115.0000
-    115.2059    116.0000
-    118.7856    117.0000
-    110.7387    118.0000
-    110.2003    119.0000
-   105.17290    120.0000
-   103.44720    121.0000
-    94.54280    122.0000
-    94.40526    123.0000
-    94.57964    124.0000
-    88.76605    125.0000
-    87.28747    126.0000
-    92.50443    127.0000
-    86.27997    128.0000
-    82.44307    129.0000
-    80.47367    130.0000
-    78.36608    131.0000
-    78.74307    132.0000
-    76.12786    133.0000
-    79.13108    134.0000
-    76.76062    135.0000
-    77.60769    136.0000
-    77.76633    137.0000
-    81.28220    138.0000
-    79.74307    139.0000
-    81.97964    140.0000
-    80.02952    141.0000
-    85.95232    142.0000
-    85.96838    143.0000
-    79.94789    144.0000
-    87.17023    145.0000
-    90.50992    146.0000
-    93.23373    147.0000
-    89.14803    148.0000
-    93.11492    149.0000
-    90.34337    150.0000
-    93.69421    151.0000
-    95.74256    152.0000
-    91.85105    153.0000
-    96.74503    154.0000
-    87.60996    155.0000
-    90.47012    156.0000
-    88.11690    157.0000
-    85.70673    158.0000
-    85.01361    159.0000
-    78.53040    160.0000
-    81.34148    161.0000
-    75.19295    162.0000
-    72.66115    163.0000
-    69.85504    164.0000
-    66.29476    165.0000
-    63.58502    166.0000
-    58.33847    167.0000
-    57.50766    168.0000
-    52.80498    169.0000
-    50.79319    170.0000
-    47.03490    171.0000
-    46.47090    172.0000
-    43.09016    173.0000
-    34.11531    174.0000
-    39.28235    175.0000
-    32.68386    176.0000
-    30.44056    177.0000
-    31.98932    178.0000
-    23.63330    179.0000
-    23.69643    180.0000
-    20.26812    181.0000
-    19.07074    182.0000
-    17.59544    183.0000
-    16.08785    184.0000
-    18.94267    185.0000
-    18.61354    186.0000
-    17.25800    187.0000
-    16.62285    188.0000
-    13.48367    189.0000
-    15.37647    190.0000
-    13.47208    191.0000
-    15.96188    192.0000
-    12.32547    193.0000
-    16.33880    194.0000
-   10.438330    195.0000
-    9.628715    196.0000
-    13.12268    197.0000
-    8.772417    198.0000
-    11.76143    199.0000
-    12.55020    200.0000
-    11.33108    201.0000
-    11.20493    202.0000
-    7.816916    203.0000
-    6.800675    204.0000
-    14.26581    205.0000
-    10.66285    206.0000
-    8.911574    207.0000
-    11.56733    208.0000
-    11.58207    209.0000
-    11.59071    210.0000
-    9.730134    211.0000
-    11.44237    212.0000
-    11.22912    213.0000
-   10.172130    214.0000
-    12.50905    215.0000
-    6.201493    216.0000
-    9.019605    217.0000
-    10.80607    218.0000
-    13.09625    219.0000
-    3.914271    220.0000
-    9.567886    221.0000
-    8.038448    222.0000
-   10.231040    223.0000
-    9.367410    224.0000
-    7.695971    225.0000
-    6.118575    226.0000
-    8.793207    227.0000
-    7.796692    228.0000
-    12.45065    229.0000
-    10.61601    230.0000
-    6.001003    231.0000
-    6.765098    232.0000
-    8.764653    233.0000
-    4.586418    234.0000
-    8.390783    235.0000
-    7.209202    236.0000
-   10.012090    237.0000
-    7.327461    238.0000
-    6.525136    239.0000
-    2.840065    240.0000
-   10.323710    241.0000
-    4.790035    242.0000
-    8.376431    243.0000
-    6.263980    244.0000
-    2.705892    245.0000
-    8.362109    246.0000
-    8.983507    247.0000
-    3.362469    248.0000
-    1.182678    249.0000
-    4.875312    250.0000
+NIST/ITL StRD
+Dataset Name:  Gauss2            (Gauss2.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to  48)
+               Certified Values  (lines 41 to  53)
+               Data              (lines 61 to 310)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   The data are two slightly-blended Gaussians on a 
+               decaying exponential baseline plus normally 
+               distributed zero-mean noise with variance = 6.25. 
+
+Reference:     Rust, B., NIST (1996). 
+
+
+
+
+
+
+
+
+
+Data:          1 Response  (y)
+               1 Predictor (x)
+               250 Observations
+               Lower Level of Difficulty
+               Generated Data
+
+Model:         Exponential Class
+               8 Parameters (b1 to b8)
+
+               y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 ) 
+                                   + b6*exp( -(x-b7)**2 / b8**2 ) + e
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =    96.0        98.0         9.9018328406E+01  5.3748766879E-01
+  b2 =     0.009       0.0105      1.0994945399E-02  1.3335306766E-04
+  b3 =   103.0       103.0         1.0188022528E+02  5.9217315772E-01
+  b4 =   106.0       105.0         1.0703095519E+02  1.5006798316E-01
+  b5 =    18.0        20.0         2.3578584029E+01  2.2695595067E-01
+  b6 =    72.0        73.0         7.2045589471E+01  6.1721965884E-01
+  b7 =   151.0       150.0         1.5327010194E+02  1.9466674341E-01
+  b8 =    18.0        20.0         1.9525972636E+01  2.6416549393E-01
+
+Residual Sum of Squares:                    1.2475282092E+03
+Residual Standard Deviation:                2.2704790782E+00
+Degrees of Freedom:                               242
+Number of Observations:                           250
+
+
+
+
+
+ 
+Data:   y          x
+    97.58776    1.000000
+    97.76344    2.000000
+    96.56705    3.000000
+    92.52037    4.000000
+    91.15097    5.000000
+    95.21728    6.000000
+    90.21355    7.000000
+    89.29235    8.000000
+    91.51479    9.000000
+    89.60966   10.000000
+    86.56187    11.00000
+    85.55316    12.00000
+    87.13054    13.00000
+    85.67940    14.00000
+    80.04851    15.00000
+    82.18925    16.00000
+    87.24081    17.00000
+    80.79407    18.00000
+    81.28570    19.00000
+    81.56940    20.00000
+    79.22715    21.00000
+    79.43275    22.00000
+    77.90195    23.00000
+    76.75468    24.00000
+    77.17377    25.00000
+    74.27348    26.00000
+    73.11900    27.00000
+    73.84826    28.00000
+    72.47870    29.00000
+    71.92292    30.00000
+    66.92176    31.00000
+    67.93835    32.00000
+    69.56207    33.00000
+    69.07066    34.00000
+    66.53983    35.00000
+    63.87883    36.00000
+    69.71537    37.00000
+    63.60588    38.00000
+    63.37154    39.00000
+    60.01835    40.00000
+    62.67481    41.00000
+    65.80666    42.00000
+    59.14304    43.00000
+    56.62951    44.00000
+    61.21785    45.00000
+    54.38790    46.00000
+    62.93443    47.00000
+    56.65144    48.00000
+    57.13362    49.00000
+    58.29689    50.00000
+    58.91744    51.00000
+    58.50172    52.00000
+    55.22885    53.00000
+    58.30375    54.00000
+    57.43237    55.00000
+    51.69407    56.00000
+    49.93132    57.00000
+    53.70760    58.00000
+    55.39712    59.00000
+    52.89709    60.00000
+    52.31649    61.00000
+    53.98720    62.00000
+    53.54158    63.00000
+    56.45046    64.00000
+    51.32276    65.00000
+    53.11676    66.00000
+    53.28631    67.00000
+    49.80555    68.00000
+    54.69564    69.00000
+    56.41627    70.00000
+    54.59362    71.00000
+    54.38520    72.00000
+    60.15354    73.00000
+    59.78773    74.00000
+    60.49995    75.00000
+    65.43885    76.00000
+    60.70001    77.00000
+    63.71865    78.00000
+    67.77139    79.00000
+    64.70934    80.00000
+    70.78193    81.00000
+    70.38651    82.00000
+    77.22359    83.00000
+    79.52665    84.00000
+    80.13077    85.00000
+    85.67823    86.00000
+    85.20647    87.00000
+    90.24548    88.00000
+    93.61953    89.00000
+    95.86509    90.00000
+    93.46992    91.00000
+    105.8137    92.00000
+    107.8269    93.00000
+    114.0607    94.00000
+    115.5019    95.00000
+    118.5110    96.00000
+    119.6177    97.00000
+    122.1940    98.00000
+    126.9903    99.00000
+    125.7005   100.00000
+    123.7447   101.00000
+    130.6543   102.00000
+    129.7168   103.00000
+    131.8240   104.00000
+    131.8759   105.00000
+    131.9994    106.0000
+    132.1221    107.0000
+    133.4414    108.0000
+    133.8252    109.0000
+    133.6695    110.0000
+    128.2851    111.0000
+    126.5182    112.0000
+    124.7550    113.0000
+    118.4016    114.0000
+    122.0334    115.0000
+    115.2059    116.0000
+    118.7856    117.0000
+    110.7387    118.0000
+    110.2003    119.0000
+   105.17290    120.0000
+   103.44720    121.0000
+    94.54280    122.0000
+    94.40526    123.0000
+    94.57964    124.0000
+    88.76605    125.0000
+    87.28747    126.0000
+    92.50443    127.0000
+    86.27997    128.0000
+    82.44307    129.0000
+    80.47367    130.0000
+    78.36608    131.0000
+    78.74307    132.0000
+    76.12786    133.0000
+    79.13108    134.0000
+    76.76062    135.0000
+    77.60769    136.0000
+    77.76633    137.0000
+    81.28220    138.0000
+    79.74307    139.0000
+    81.97964    140.0000
+    80.02952    141.0000
+    85.95232    142.0000
+    85.96838    143.0000
+    79.94789    144.0000
+    87.17023    145.0000
+    90.50992    146.0000
+    93.23373    147.0000
+    89.14803    148.0000
+    93.11492    149.0000
+    90.34337    150.0000
+    93.69421    151.0000
+    95.74256    152.0000
+    91.85105    153.0000
+    96.74503    154.0000
+    87.60996    155.0000
+    90.47012    156.0000
+    88.11690    157.0000
+    85.70673    158.0000
+    85.01361    159.0000
+    78.53040    160.0000
+    81.34148    161.0000
+    75.19295    162.0000
+    72.66115    163.0000
+    69.85504    164.0000
+    66.29476    165.0000
+    63.58502    166.0000
+    58.33847    167.0000
+    57.50766    168.0000
+    52.80498    169.0000
+    50.79319    170.0000
+    47.03490    171.0000
+    46.47090    172.0000
+    43.09016    173.0000
+    34.11531    174.0000
+    39.28235    175.0000
+    32.68386    176.0000
+    30.44056    177.0000
+    31.98932    178.0000
+    23.63330    179.0000
+    23.69643    180.0000
+    20.26812    181.0000
+    19.07074    182.0000
+    17.59544    183.0000
+    16.08785    184.0000
+    18.94267    185.0000
+    18.61354    186.0000
+    17.25800    187.0000
+    16.62285    188.0000
+    13.48367    189.0000
+    15.37647    190.0000
+    13.47208    191.0000
+    15.96188    192.0000
+    12.32547    193.0000
+    16.33880    194.0000
+   10.438330    195.0000
+    9.628715    196.0000
+    13.12268    197.0000
+    8.772417    198.0000
+    11.76143    199.0000
+    12.55020    200.0000
+    11.33108    201.0000
+    11.20493    202.0000
+    7.816916    203.0000
+    6.800675    204.0000
+    14.26581    205.0000
+    10.66285    206.0000
+    8.911574    207.0000
+    11.56733    208.0000
+    11.58207    209.0000
+    11.59071    210.0000
+    9.730134    211.0000
+    11.44237    212.0000
+    11.22912    213.0000
+   10.172130    214.0000
+    12.50905    215.0000
+    6.201493    216.0000
+    9.019605    217.0000
+    10.80607    218.0000
+    13.09625    219.0000
+    3.914271    220.0000
+    9.567886    221.0000
+    8.038448    222.0000
+   10.231040    223.0000
+    9.367410    224.0000
+    7.695971    225.0000
+    6.118575    226.0000
+    8.793207    227.0000
+    7.796692    228.0000
+    12.45065    229.0000
+    10.61601    230.0000
+    6.001003    231.0000
+    6.765098    232.0000
+    8.764653    233.0000
+    4.586418    234.0000
+    8.390783    235.0000
+    7.209202    236.0000
+   10.012090    237.0000
+    7.327461    238.0000
+    6.525136    239.0000
+    2.840065    240.0000
+   10.323710    241.0000
+    4.790035    242.0000
+    8.376431    243.0000
+    6.263980    244.0000
+    2.705892    245.0000
+    8.362109    246.0000
+    8.983507    247.0000
+    3.362469    248.0000
+    1.182678    249.0000
+    4.875312    250.0000
diff --git a/NIST_STRD/Gauss3.dat b/NIST_STRD/Gauss3.dat
index e5eb56d..0f880b0 100644
--- a/NIST_STRD/Gauss3.dat
+++ b/NIST_STRD/Gauss3.dat
@@ -1,310 +1,310 @@
-NIST/ITL StRD
-Dataset Name:  Gauss3            (Gauss3.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  48)
-               Certified Values  (lines 41 to  53)
-               Data              (lines 61 to 310)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   The data are two strongly-blended Gaussians on a 
-               decaying exponential baseline plus normally 
-               distributed zero-mean noise with variance = 6.25.
-
-Reference:     Rust, B., NIST (1996).
-
-
-
-
-
-
-
-
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               250 Observations
-               Average Level of Difficulty
-               Generated Data
-
-Model:         Exponential Class
-               8 Parameters (b1 to b8)
-
-               y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
-                                   + b6*exp( -(x-b7)**2 / b8**2 ) + e
- 
- 
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =    94.9        96.0         9.8940368970E+01  5.3005192833E-01
-  b2 =     0.009       0.0096      1.0945879335E-02  1.2554058911E-04
-  b3 =    90.1        80.0         1.0069553078E+02  8.1256587317E-01
-  b4 =   113.0       110.0         1.1163619459E+02  3.5317859757E-01
-  b5 =    20.0        25.0         2.3300500029E+01  3.6584783023E-01
-  b6 =    73.8        74.0         7.3705031418E+01  1.2091239082E+00
-  b7 =   140.0       139.0         1.4776164251E+02  4.0488183351E-01
-  b8 =    20.0        25.0         1.9668221230E+01  3.7806634336E-01
-
-Residual Sum of Squares:                    1.2444846360E+03  
-Residual Standard Deviation:                2.2677077625E+00
-Degrees of Freedom:                               242
-Number of Observations:                           250
-
-
-
-
-
-
-Data:   y          x
-    97.58776    1.000000
-    97.76344    2.000000
-    96.56705    3.000000
-    92.52037    4.000000
-    91.15097    5.000000
-    95.21728    6.000000
-    90.21355    7.000000
-    89.29235    8.000000
-    91.51479    9.000000
-    89.60965   10.000000
-    86.56187    11.00000
-    85.55315    12.00000
-    87.13053    13.00000
-    85.67938    14.00000
-    80.04849    15.00000
-    82.18922    16.00000
-    87.24078    17.00000
-    80.79401    18.00000
-    81.28564    19.00000
-    81.56932    20.00000
-    79.22703    21.00000
-    79.43259    22.00000
-    77.90174    23.00000
-    76.75438    24.00000
-    77.17338    25.00000
-    74.27296    26.00000
-    73.11830    27.00000
-    73.84732    28.00000
-    72.47746    29.00000
-    71.92128    30.00000
-    66.91962    31.00000
-    67.93554    32.00000
-    69.55841    33.00000
-    69.06592    34.00000
-    66.53371    35.00000
-    63.87094    36.00000
-    69.70526    37.00000
-    63.59295    38.00000
-    63.35509    39.00000
-    59.99747    40.00000
-    62.64843    41.00000
-    65.77345    42.00000
-    59.10141    43.00000
-    56.57750    44.00000
-    61.15313    45.00000
-    54.30767    46.00000
-    62.83535    47.00000
-    56.52957    48.00000
-    56.98427    49.00000
-    58.11459    50.00000
-    58.69576    51.00000
-    58.23322    52.00000
-    54.90490    53.00000
-    57.91442    54.00000
-    56.96629    55.00000
-    51.13831    56.00000
-    49.27123    57.00000
-    52.92668    58.00000
-    54.47693    59.00000
-    51.81710    60.00000
-    51.05401    61.00000
-    52.51731    62.00000
-    51.83710    63.00000
-    54.48196    64.00000
-    49.05859    65.00000
-    50.52315    66.00000
-    50.32755    67.00000
-    46.44419    68.00000
-    50.89281    69.00000
-    52.13203    70.00000
-    49.78741    71.00000
-    49.01637    72.00000
-    54.18198    73.00000
-    53.17456    74.00000
-    53.20827    75.00000
-    57.43459    76.00000
-    51.95282    77.00000
-    54.20282    78.00000
-    57.46687    79.00000
-    53.60268    80.00000
-    58.86728    81.00000
-    57.66652    82.00000
-    63.71034    83.00000
-    65.24244    84.00000
-    65.10878    85.00000
-    69.96313    86.00000
-    68.85475    87.00000
-    73.32574    88.00000
-    76.21241    89.00000
-    78.06311    90.00000
-    75.37701    91.00000
-    87.54449    92.00000
-    89.50588    93.00000
-    95.82098    94.00000
-    97.48390    95.00000
-   100.86070    96.00000
-   102.48510    97.00000
-    105.7311    98.00000
-    111.3489    99.00000
-    111.0305   100.00000
-    110.1920   101.00000
-    118.3581   102.00000
-    118.8086   103.00000
-    122.4249   104.00000
-    124.0953   105.00000
-    125.9337    106.0000
-    127.8533    107.0000
-    131.0361    108.0000
-    133.3343    109.0000
-    135.1278    110.0000
-    131.7113    111.0000
-    131.9151    112.0000
-    132.1107    113.0000
-    127.6898    114.0000
-    133.2148    115.0000
-    128.2296    116.0000
-    133.5902    117.0000
-    127.2539    118.0000
-    128.3482    119.0000
-    124.8694    120.0000
-    124.6031    121.0000
-    117.0648    122.0000
-    118.1966    123.0000
-    119.5408    124.0000
-    114.7946    125.0000
-    114.2780    126.0000
-    120.3484    127.0000
-    114.8647    128.0000
-    111.6514    129.0000
-    110.1826    130.0000
-    108.4461    131.0000
-    109.0571    132.0000
-    106.5308    133.0000
-    109.4691    134.0000
-    106.8709    135.0000
-    107.3192    136.0000
-    106.9000    137.0000
-    109.6526    138.0000
-    107.1602    139.0000
-    108.2509    140.0000
-   104.96310    141.0000
-    109.3601    142.0000
-    107.6696    143.0000
-    99.77286    144.0000
-   104.96440    145.0000
-    106.1376    146.0000
-    106.5816    147.0000
-   100.12860    148.0000
-   101.66910    149.0000
-    96.44254    150.0000
-    97.34169    151.0000
-    96.97412    152.0000
-    90.73460    153.0000
-    93.37949    154.0000
-    82.12331    155.0000
-    83.01657    156.0000
-    78.87360    157.0000
-    74.86971    158.0000
-    72.79341    159.0000
-    65.14744    160.0000
-    67.02127    161.0000
-    60.16136    162.0000
-    57.13996    163.0000
-    54.05769    164.0000
-    50.42265    165.0000
-    47.82430    166.0000
-    42.85748    167.0000
-    42.45495    168.0000
-    38.30808    169.0000
-    36.95794    170.0000
-    33.94543    171.0000
-    34.19017    172.0000
-    31.66097    173.0000
-    23.56172    174.0000
-    29.61143    175.0000
-    23.88765    176.0000
-    22.49812    177.0000
-    24.86901    178.0000
-    17.29481    179.0000
-    18.09291    180.0000
-    15.34813    181.0000
-    14.77997    182.0000
-    13.87832    183.0000
-    12.88891    184.0000
-    16.20763    185.0000
-    16.29024    186.0000
-    15.29712    187.0000
-    14.97839    188.0000
-    12.11330    189.0000
-    14.24168    190.0000
-    12.53824    191.0000
-    15.19818    192.0000
-    11.70478    193.0000
-    15.83745    194.0000
-   10.035850    195.0000
-    9.307574    196.0000
-    12.86800    197.0000
-    8.571671    198.0000
-    11.60415    199.0000
-    12.42772    200.0000
-    11.23627    201.0000
-    11.13198    202.0000
-    7.761117    203.0000
-    6.758250    204.0000
-    14.23375    205.0000
-    10.63876    206.0000
-    8.893581    207.0000
-    11.55398    208.0000
-    11.57221    209.0000
-    11.58347    210.0000
-    9.724857    211.0000
-    11.43854    212.0000
-    11.22636    213.0000
-   10.170150    214.0000
-    12.50765    215.0000
-    6.200494    216.0000
-    9.018902    217.0000
-    10.80557    218.0000
-    13.09591    219.0000
-    3.914033    220.0000
-    9.567723    221.0000
-    8.038338    222.0000
-   10.230960    223.0000
-    9.367358    224.0000
-    7.695937    225.0000
-    6.118552    226.0000
-    8.793192    227.0000
-    7.796682    228.0000
-    12.45064    229.0000
-    10.61601    230.0000
-    6.001000    231.0000
-    6.765096    232.0000
-    8.764652    233.0000
-    4.586417    234.0000
-    8.390782    235.0000
-    7.209201    236.0000
-   10.012090    237.0000
-    7.327461    238.0000
-    6.525136    239.0000
-    2.840065    240.0000
-   10.323710    241.0000
-    4.790035    242.0000
-    8.376431    243.0000
-    6.263980    244.0000
-    2.705892    245.0000
-    8.362109    246.0000
-    8.983507    247.0000
-    3.362469    248.0000
-    1.182678    249.0000
-    4.875312    250.0000
+NIST/ITL StRD
+Dataset Name:  Gauss3            (Gauss3.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to  48)
+               Certified Values  (lines 41 to  53)
+               Data              (lines 61 to 310)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   The data are two strongly-blended Gaussians on a 
+               decaying exponential baseline plus normally 
+               distributed zero-mean noise with variance = 6.25.
+
+Reference:     Rust, B., NIST (1996).
+
+
+
+
+
+
+
+
+
+Data:          1 Response  (y)
+               1 Predictor (x)
+               250 Observations
+               Average Level of Difficulty
+               Generated Data
+
+Model:         Exponential Class
+               8 Parameters (b1 to b8)
+
+               y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+                                   + b6*exp( -(x-b7)**2 / b8**2 ) + e
+ 
+ 
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =    94.9        96.0         9.8940368970E+01  5.3005192833E-01
+  b2 =     0.009       0.0096      1.0945879335E-02  1.2554058911E-04
+  b3 =    90.1        80.0         1.0069553078E+02  8.1256587317E-01
+  b4 =   113.0       110.0         1.1163619459E+02  3.5317859757E-01
+  b5 =    20.0        25.0         2.3300500029E+01  3.6584783023E-01
+  b6 =    73.8        74.0         7.3705031418E+01  1.2091239082E+00
+  b7 =   140.0       139.0         1.4776164251E+02  4.0488183351E-01
+  b8 =    20.0        25.0         1.9668221230E+01  3.7806634336E-01
+
+Residual Sum of Squares:                    1.2444846360E+03  
+Residual Standard Deviation:                2.2677077625E+00
+Degrees of Freedom:                               242
+Number of Observations:                           250
+
+
+
+
+
+
+Data:   y          x
+    97.58776    1.000000
+    97.76344    2.000000
+    96.56705    3.000000
+    92.52037    4.000000
+    91.15097    5.000000
+    95.21728    6.000000
+    90.21355    7.000000
+    89.29235    8.000000
+    91.51479    9.000000
+    89.60965   10.000000
+    86.56187    11.00000
+    85.55315    12.00000
+    87.13053    13.00000
+    85.67938    14.00000
+    80.04849    15.00000
+    82.18922    16.00000
+    87.24078    17.00000
+    80.79401    18.00000
+    81.28564    19.00000
+    81.56932    20.00000
+    79.22703    21.00000
+    79.43259    22.00000
+    77.90174    23.00000
+    76.75438    24.00000
+    77.17338    25.00000
+    74.27296    26.00000
+    73.11830    27.00000
+    73.84732    28.00000
+    72.47746    29.00000
+    71.92128    30.00000
+    66.91962    31.00000
+    67.93554    32.00000
+    69.55841    33.00000
+    69.06592    34.00000
+    66.53371    35.00000
+    63.87094    36.00000
+    69.70526    37.00000
+    63.59295    38.00000
+    63.35509    39.00000
+    59.99747    40.00000
+    62.64843    41.00000
+    65.77345    42.00000
+    59.10141    43.00000
+    56.57750    44.00000
+    61.15313    45.00000
+    54.30767    46.00000
+    62.83535    47.00000
+    56.52957    48.00000
+    56.98427    49.00000
+    58.11459    50.00000
+    58.69576    51.00000
+    58.23322    52.00000
+    54.90490    53.00000
+    57.91442    54.00000
+    56.96629    55.00000
+    51.13831    56.00000
+    49.27123    57.00000
+    52.92668    58.00000
+    54.47693    59.00000
+    51.81710    60.00000
+    51.05401    61.00000
+    52.51731    62.00000
+    51.83710    63.00000
+    54.48196    64.00000
+    49.05859    65.00000
+    50.52315    66.00000
+    50.32755    67.00000
+    46.44419    68.00000
+    50.89281    69.00000
+    52.13203    70.00000
+    49.78741    71.00000
+    49.01637    72.00000
+    54.18198    73.00000
+    53.17456    74.00000
+    53.20827    75.00000
+    57.43459    76.00000
+    51.95282    77.00000
+    54.20282    78.00000
+    57.46687    79.00000
+    53.60268    80.00000
+    58.86728    81.00000
+    57.66652    82.00000
+    63.71034    83.00000
+    65.24244    84.00000
+    65.10878    85.00000
+    69.96313    86.00000
+    68.85475    87.00000
+    73.32574    88.00000
+    76.21241    89.00000
+    78.06311    90.00000
+    75.37701    91.00000
+    87.54449    92.00000
+    89.50588    93.00000
+    95.82098    94.00000
+    97.48390    95.00000
+   100.86070    96.00000
+   102.48510    97.00000
+    105.7311    98.00000
+    111.3489    99.00000
+    111.0305   100.00000
+    110.1920   101.00000
+    118.3581   102.00000
+    118.8086   103.00000
+    122.4249   104.00000
+    124.0953   105.00000
+    125.9337    106.0000
+    127.8533    107.0000
+    131.0361    108.0000
+    133.3343    109.0000
+    135.1278    110.0000
+    131.7113    111.0000
+    131.9151    112.0000
+    132.1107    113.0000
+    127.6898    114.0000
+    133.2148    115.0000
+    128.2296    116.0000
+    133.5902    117.0000
+    127.2539    118.0000
+    128.3482    119.0000
+    124.8694    120.0000
+    124.6031    121.0000
+    117.0648    122.0000
+    118.1966    123.0000
+    119.5408    124.0000
+    114.7946    125.0000
+    114.2780    126.0000
+    120.3484    127.0000
+    114.8647    128.0000
+    111.6514    129.0000
+    110.1826    130.0000
+    108.4461    131.0000
+    109.0571    132.0000
+    106.5308    133.0000
+    109.4691    134.0000
+    106.8709    135.0000
+    107.3192    136.0000
+    106.9000    137.0000
+    109.6526    138.0000
+    107.1602    139.0000
+    108.2509    140.0000
+   104.96310    141.0000
+    109.3601    142.0000
+    107.6696    143.0000
+    99.77286    144.0000
+   104.96440    145.0000
+    106.1376    146.0000
+    106.5816    147.0000
+   100.12860    148.0000
+   101.66910    149.0000
+    96.44254    150.0000
+    97.34169    151.0000
+    96.97412    152.0000
+    90.73460    153.0000
+    93.37949    154.0000
+    82.12331    155.0000
+    83.01657    156.0000
+    78.87360    157.0000
+    74.86971    158.0000
+    72.79341    159.0000
+    65.14744    160.0000
+    67.02127    161.0000
+    60.16136    162.0000
+    57.13996    163.0000
+    54.05769    164.0000
+    50.42265    165.0000
+    47.82430    166.0000
+    42.85748    167.0000
+    42.45495    168.0000
+    38.30808    169.0000
+    36.95794    170.0000
+    33.94543    171.0000
+    34.19017    172.0000
+    31.66097    173.0000
+    23.56172    174.0000
+    29.61143    175.0000
+    23.88765    176.0000
+    22.49812    177.0000
+    24.86901    178.0000
+    17.29481    179.0000
+    18.09291    180.0000
+    15.34813    181.0000
+    14.77997    182.0000
+    13.87832    183.0000
+    12.88891    184.0000
+    16.20763    185.0000
+    16.29024    186.0000
+    15.29712    187.0000
+    14.97839    188.0000
+    12.11330    189.0000
+    14.24168    190.0000
+    12.53824    191.0000
+    15.19818    192.0000
+    11.70478    193.0000
+    15.83745    194.0000
+   10.035850    195.0000
+    9.307574    196.0000
+    12.86800    197.0000
+    8.571671    198.0000
+    11.60415    199.0000
+    12.42772    200.0000
+    11.23627    201.0000
+    11.13198    202.0000
+    7.761117    203.0000
+    6.758250    204.0000
+    14.23375    205.0000
+    10.63876    206.0000
+    8.893581    207.0000
+    11.55398    208.0000
+    11.57221    209.0000
+    11.58347    210.0000
+    9.724857    211.0000
+    11.43854    212.0000
+    11.22636    213.0000
+   10.170150    214.0000
+    12.50765    215.0000
+    6.200494    216.0000
+    9.018902    217.0000
+    10.80557    218.0000
+    13.09591    219.0000
+    3.914033    220.0000
+    9.567723    221.0000
+    8.038338    222.0000
+   10.230960    223.0000
+    9.367358    224.0000
+    7.695937    225.0000
+    6.118552    226.0000
+    8.793192    227.0000
+    7.796682    228.0000
+    12.45064    229.0000
+    10.61601    230.0000
+    6.001000    231.0000
+    6.765096    232.0000
+    8.764652    233.0000
+    4.586417    234.0000
+    8.390782    235.0000
+    7.209201    236.0000
+   10.012090    237.0000
+    7.327461    238.0000
+    6.525136    239.0000
+    2.840065    240.0000
+   10.323710    241.0000
+    4.790035    242.0000
+    8.376431    243.0000
+    6.263980    244.0000
+    2.705892    245.0000
+    8.362109    246.0000
+    8.983507    247.0000
+    3.362469    248.0000
+    1.182678    249.0000
+    4.875312    250.0000
diff --git a/NIST_STRD/Hahn1.dat b/NIST_STRD/Hahn1.dat
index f3069d7..0e493a4 100644
--- a/NIST_STRD/Hahn1.dat
+++ b/NIST_STRD/Hahn1.dat
@@ -1,296 +1,296 @@
-NIST/ITL StRD
-Dataset Name:  Hahn1             (Hahn1.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  47)
-               Certified Values  (lines 41 to  52)
-               Data              (lines 61 to 296)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study involving
-               the thermal expansion of copper.  The response 
-               variable is the coefficient of thermal expansion, and
-               the predictor variable is temperature in degrees 
-               kelvin.
-
-
-Reference:     Hahn, T., NIST (197?). 
-               Copper Thermal Expansion Study.
-
-
-
-
-
-Data:          1 Response  (y = coefficient of thermal expansion)
-               1 Predictor (x = temperature, degrees kelvin)
-               236 Observations
-               Average Level of Difficulty
-               Observed Data
-
-Model:         Rational Class (cubic/cubic)
-               7 Parameters (b1 to b7)
-
-               y = (b1+b2*x+b3*x**2+b4*x**3) /
-                   (1+b5*x+b6*x**2+b7*x**3)  +  e
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   10           1            1.0776351733E+00  1.7070154742E-01
-  b2 =   -1          -0.1         -1.2269296921E-01  1.2000289189E-02
-  b3 =    0.05        0.005        4.0863750610E-03  2.2508314937E-04
-  b4 =   -0.00001    -0.000001    -1.4262662514E-06  2.7578037666E-07
-  b5 =   -0.05       -0.005       -5.7609940901E-03  2.4712888219E-04
-  b6 =    0.001       0.0001       2.4053735503E-04  1.0449373768E-05
-  b7 =   -0.000001   -0.0000001   -1.2314450199E-07  1.3027335327E-08
-
-Residual Sum of Squares:                    1.5324382854E+00 
-Residual Standard Deviation:                8.1803852243E-02
-Degrees of Freedom:                               229
-Number of Observations:                           236
-
-
-
-
-
-
-  
-Data:   y              x
-        .591E0         24.41E0  
-       1.547E0         34.82E0  
-       2.902E0         44.09E0  
-       2.894E0         45.07E0  
-       4.703E0         54.98E0  
-       6.307E0         65.51E0  
-       7.03E0          70.53E0  
-       7.898E0         75.70E0  
-       9.470E0         89.57E0  
-       9.484E0         91.14E0  
-      10.072E0         96.40E0  
-      10.163E0         97.19E0  
-      11.615E0        114.26E0  
-      12.005E0        120.25E0  
-      12.478E0        127.08E0  
-      12.982E0        133.55E0  
-      12.970E0        133.61E0  
-      13.926E0        158.67E0  
-      14.452E0        172.74E0  
-      14.404E0        171.31E0  
-      15.190E0        202.14E0  
-      15.550E0        220.55E0  
-      15.528E0        221.05E0  
-      15.499E0        221.39E0  
-      16.131E0        250.99E0  
-      16.438E0        268.99E0  
-      16.387E0        271.80E0  
-      16.549E0        271.97E0  
-      16.872E0        321.31E0  
-      16.830E0        321.69E0  
-      16.926E0        330.14E0  
-      16.907E0        333.03E0  
-      16.966E0        333.47E0  
-      17.060E0        340.77E0  
-      17.122E0        345.65E0  
-      17.311E0        373.11E0  
-      17.355E0        373.79E0  
-      17.668E0        411.82E0  
-      17.767E0        419.51E0  
-      17.803E0        421.59E0  
-      17.765E0        422.02E0  
-      17.768E0        422.47E0  
-      17.736E0        422.61E0  
-      17.858E0        441.75E0  
-      17.877E0        447.41E0  
-      17.912E0        448.7E0   
-      18.046E0        472.89E0  
-      18.085E0        476.69E0  
-      18.291E0        522.47E0  
-      18.357E0        522.62E0  
-      18.426E0        524.43E0  
-      18.584E0        546.75E0  
-      18.610E0        549.53E0  
-      18.870E0        575.29E0  
-      18.795E0        576.00E0  
-      19.111E0        625.55E0  
-        .367E0         20.15E0  
-        .796E0         28.78E0  
-       0.892E0         29.57E0  
-       1.903E0         37.41E0  
-       2.150E0         39.12E0  
-       3.697E0         50.24E0  
-       5.870E0         61.38E0  
-       6.421E0         66.25E0  
-       7.422E0         73.42E0  
-       9.944E0         95.52E0  
-      11.023E0        107.32E0  
-      11.87E0         122.04E0  
-      12.786E0        134.03E0  
-      14.067E0        163.19E0  
-      13.974E0        163.48E0  
-      14.462E0        175.70E0  
-      14.464E0        179.86E0  
-      15.381E0        211.27E0  
-      15.483E0        217.78E0  
-      15.59E0         219.14E0  
-      16.075E0        262.52E0  
-      16.347E0        268.01E0  
-      16.181E0        268.62E0  
-      16.915E0        336.25E0  
-      17.003E0        337.23E0  
-      16.978E0        339.33E0  
-      17.756E0        427.38E0  
-      17.808E0        428.58E0  
-      17.868E0        432.68E0  
-      18.481E0        528.99E0  
-      18.486E0        531.08E0  
-      19.090E0        628.34E0  
-      16.062E0        253.24E0  
-      16.337E0        273.13E0  
-      16.345E0        273.66E0  
-      16.388E0        282.10E0  
-      17.159E0        346.62E0  
-      17.116E0        347.19E0  
-      17.164E0        348.78E0  
-      17.123E0        351.18E0  
-      17.979E0        450.10E0  
-      17.974E0        450.35E0  
-      18.007E0        451.92E0  
-      17.993E0        455.56E0  
-      18.523E0        552.22E0  
-      18.669E0        553.56E0  
-      18.617E0        555.74E0  
-      19.371E0        652.59E0  
-      19.330E0        656.20E0  
-       0.080E0         14.13E0  
-       0.248E0         20.41E0  
-       1.089E0         31.30E0  
-       1.418E0         33.84E0  
-       2.278E0         39.70E0  
-       3.624E0         48.83E0  
-       4.574E0         54.50E0  
-       5.556E0         60.41E0  
-       7.267E0         72.77E0  
-       7.695E0         75.25E0  
-       9.136E0         86.84E0  
-       9.959E0         94.88E0  
-       9.957E0         96.40E0  
-      11.600E0        117.37E0  
-      13.138E0        139.08E0  
-      13.564E0        147.73E0  
-      13.871E0        158.63E0  
-      13.994E0        161.84E0  
-      14.947E0        192.11E0  
-      15.473E0        206.76E0  
-      15.379E0        209.07E0  
-      15.455E0        213.32E0  
-      15.908E0        226.44E0  
-      16.114E0        237.12E0  
-      17.071E0        330.90E0  
-      17.135E0        358.72E0  
-      17.282E0        370.77E0  
-      17.368E0        372.72E0  
-      17.483E0        396.24E0  
-      17.764E0        416.59E0  
-      18.185E0        484.02E0  
-      18.271E0        495.47E0  
-      18.236E0        514.78E0  
-      18.237E0        515.65E0  
-      18.523E0        519.47E0  
-      18.627E0        544.47E0  
-      18.665E0        560.11E0  
-      19.086E0        620.77E0  
-       0.214E0         18.97E0  
-       0.943E0         28.93E0  
-       1.429E0         33.91E0  
-       2.241E0         40.03E0  
-       2.951E0         44.66E0  
-       3.782E0         49.87E0  
-       4.757E0         55.16E0  
-       5.602E0         60.90E0  
-       7.169E0         72.08E0  
-       8.920E0         85.15E0  
-      10.055E0         97.06E0  
-      12.035E0        119.63E0  
-      12.861E0        133.27E0  
-      13.436E0        143.84E0  
-      14.167E0        161.91E0  
-      14.755E0        180.67E0  
-      15.168E0        198.44E0  
-      15.651E0        226.86E0  
-      15.746E0        229.65E0  
-      16.216E0        258.27E0  
-      16.445E0        273.77E0  
-      16.965E0        339.15E0  
-      17.121E0        350.13E0  
-      17.206E0        362.75E0  
-      17.250E0        371.03E0  
-      17.339E0        393.32E0  
-      17.793E0        448.53E0  
-      18.123E0        473.78E0  
-      18.49E0         511.12E0  
-      18.566E0        524.70E0  
-      18.645E0        548.75E0  
-      18.706E0        551.64E0  
-      18.924E0        574.02E0  
-      19.1E0          623.86E0  
-       0.375E0         21.46E0  
-       0.471E0         24.33E0  
-       1.504E0         33.43E0  
-       2.204E0         39.22E0  
-       2.813E0         44.18E0  
-       4.765E0         55.02E0  
-       9.835E0         94.33E0  
-      10.040E0         96.44E0  
-      11.946E0        118.82E0  
-      12.596E0        128.48E0  
-      13.303E0        141.94E0  
-      13.922E0        156.92E0  
-      14.440E0        171.65E0  
-      14.951E0        190.00E0  
-      15.627E0        223.26E0  
-      15.639E0        223.88E0  
-      15.814E0        231.50E0  
-      16.315E0        265.05E0  
-      16.334E0        269.44E0  
-      16.430E0        271.78E0  
-      16.423E0        273.46E0  
-      17.024E0        334.61E0  
-      17.009E0        339.79E0  
-      17.165E0        349.52E0  
-      17.134E0        358.18E0  
-      17.349E0        377.98E0  
-      17.576E0        394.77E0  
-      17.848E0        429.66E0  
-      18.090E0        468.22E0  
-      18.276E0        487.27E0  
-      18.404E0        519.54E0  
-      18.519E0        523.03E0  
-      19.133E0        612.99E0  
-      19.074E0        638.59E0  
-      19.239E0        641.36E0  
-      19.280E0        622.05E0  
-      19.101E0        631.50E0  
-      19.398E0        663.97E0  
-      19.252E0        646.9E0   
-      19.89E0         748.29E0  
-      20.007E0        749.21E0  
-      19.929E0        750.14E0  
-      19.268E0        647.04E0  
-      19.324E0        646.89E0  
-      20.049E0        746.9E0   
-      20.107E0        748.43E0  
-      20.062E0        747.35E0  
-      20.065E0        749.27E0  
-      19.286E0        647.61E0  
-      19.972E0        747.78E0  
-      20.088E0        750.51E0  
-      20.743E0        851.37E0  
-      20.83E0         845.97E0  
-      20.935E0        847.54E0  
-      21.035E0        849.93E0  
-      20.93E0         851.61E0  
-      21.074E0        849.75E0  
-      21.085E0        850.98E0  
-      20.935E0        848.23E0  
+NIST/ITL StRD
+Dataset Name:  Hahn1             (Hahn1.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to  47)
+               Certified Values  (lines 41 to  52)
+               Data              (lines 61 to 296)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study involving
+               the thermal expansion of copper.  The response 
+               variable is the coefficient of thermal expansion, and
+               the predictor variable is temperature in degrees 
+               kelvin.
+
+
+Reference:     Hahn, T., NIST (197?). 
+               Copper Thermal Expansion Study.
+
+
+
+
+
+Data:          1 Response  (y = coefficient of thermal expansion)
+               1 Predictor (x = temperature, degrees kelvin)
+               236 Observations
+               Average Level of Difficulty
+               Observed Data
+
+Model:         Rational Class (cubic/cubic)
+               7 Parameters (b1 to b7)
+
+               y = (b1+b2*x+b3*x**2+b4*x**3) /
+                   (1+b5*x+b6*x**2+b7*x**3)  +  e
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   10           1            1.0776351733E+00  1.7070154742E-01
+  b2 =   -1          -0.1         -1.2269296921E-01  1.2000289189E-02
+  b3 =    0.05        0.005        4.0863750610E-03  2.2508314937E-04
+  b4 =   -0.00001    -0.000001    -1.4262662514E-06  2.7578037666E-07
+  b5 =   -0.05       -0.005       -5.7609940901E-03  2.4712888219E-04
+  b6 =    0.001       0.0001       2.4053735503E-04  1.0449373768E-05
+  b7 =   -0.000001   -0.0000001   -1.2314450199E-07  1.3027335327E-08
+
+Residual Sum of Squares:                    1.5324382854E+00 
+Residual Standard Deviation:                8.1803852243E-02
+Degrees of Freedom:                               229
+Number of Observations:                           236
+
+
+
+
+
+
+  
+Data:   y              x
+        .591E0         24.41E0  
+       1.547E0         34.82E0  
+       2.902E0         44.09E0  
+       2.894E0         45.07E0  
+       4.703E0         54.98E0  
+       6.307E0         65.51E0  
+       7.03E0          70.53E0  
+       7.898E0         75.70E0  
+       9.470E0         89.57E0  
+       9.484E0         91.14E0  
+      10.072E0         96.40E0  
+      10.163E0         97.19E0  
+      11.615E0        114.26E0  
+      12.005E0        120.25E0  
+      12.478E0        127.08E0  
+      12.982E0        133.55E0  
+      12.970E0        133.61E0  
+      13.926E0        158.67E0  
+      14.452E0        172.74E0  
+      14.404E0        171.31E0  
+      15.190E0        202.14E0  
+      15.550E0        220.55E0  
+      15.528E0        221.05E0  
+      15.499E0        221.39E0  
+      16.131E0        250.99E0  
+      16.438E0        268.99E0  
+      16.387E0        271.80E0  
+      16.549E0        271.97E0  
+      16.872E0        321.31E0  
+      16.830E0        321.69E0  
+      16.926E0        330.14E0  
+      16.907E0        333.03E0  
+      16.966E0        333.47E0  
+      17.060E0        340.77E0  
+      17.122E0        345.65E0  
+      17.311E0        373.11E0  
+      17.355E0        373.79E0  
+      17.668E0        411.82E0  
+      17.767E0        419.51E0  
+      17.803E0        421.59E0  
+      17.765E0        422.02E0  
+      17.768E0        422.47E0  
+      17.736E0        422.61E0  
+      17.858E0        441.75E0  
+      17.877E0        447.41E0  
+      17.912E0        448.7E0   
+      18.046E0        472.89E0  
+      18.085E0        476.69E0  
+      18.291E0        522.47E0  
+      18.357E0        522.62E0  
+      18.426E0        524.43E0  
+      18.584E0        546.75E0  
+      18.610E0        549.53E0  
+      18.870E0        575.29E0  
+      18.795E0        576.00E0  
+      19.111E0        625.55E0  
+        .367E0         20.15E0  
+        .796E0         28.78E0  
+       0.892E0         29.57E0  
+       1.903E0         37.41E0  
+       2.150E0         39.12E0  
+       3.697E0         50.24E0  
+       5.870E0         61.38E0  
+       6.421E0         66.25E0  
+       7.422E0         73.42E0  
+       9.944E0         95.52E0  
+      11.023E0        107.32E0  
+      11.87E0         122.04E0  
+      12.786E0        134.03E0  
+      14.067E0        163.19E0  
+      13.974E0        163.48E0  
+      14.462E0        175.70E0  
+      14.464E0        179.86E0  
+      15.381E0        211.27E0  
+      15.483E0        217.78E0  
+      15.59E0         219.14E0  
+      16.075E0        262.52E0  
+      16.347E0        268.01E0  
+      16.181E0        268.62E0  
+      16.915E0        336.25E0  
+      17.003E0        337.23E0  
+      16.978E0        339.33E0  
+      17.756E0        427.38E0  
+      17.808E0        428.58E0  
+      17.868E0        432.68E0  
+      18.481E0        528.99E0  
+      18.486E0        531.08E0  
+      19.090E0        628.34E0  
+      16.062E0        253.24E0  
+      16.337E0        273.13E0  
+      16.345E0        273.66E0  
+      16.388E0        282.10E0  
+      17.159E0        346.62E0  
+      17.116E0        347.19E0  
+      17.164E0        348.78E0  
+      17.123E0        351.18E0  
+      17.979E0        450.10E0  
+      17.974E0        450.35E0  
+      18.007E0        451.92E0  
+      17.993E0        455.56E0  
+      18.523E0        552.22E0  
+      18.669E0        553.56E0  
+      18.617E0        555.74E0  
+      19.371E0        652.59E0  
+      19.330E0        656.20E0  
+       0.080E0         14.13E0  
+       0.248E0         20.41E0  
+       1.089E0         31.30E0  
+       1.418E0         33.84E0  
+       2.278E0         39.70E0  
+       3.624E0         48.83E0  
+       4.574E0         54.50E0  
+       5.556E0         60.41E0  
+       7.267E0         72.77E0  
+       7.695E0         75.25E0  
+       9.136E0         86.84E0  
+       9.959E0         94.88E0  
+       9.957E0         96.40E0  
+      11.600E0        117.37E0  
+      13.138E0        139.08E0  
+      13.564E0        147.73E0  
+      13.871E0        158.63E0  
+      13.994E0        161.84E0  
+      14.947E0        192.11E0  
+      15.473E0        206.76E0  
+      15.379E0        209.07E0  
+      15.455E0        213.32E0  
+      15.908E0        226.44E0  
+      16.114E0        237.12E0  
+      17.071E0        330.90E0  
+      17.135E0        358.72E0  
+      17.282E0        370.77E0  
+      17.368E0        372.72E0  
+      17.483E0        396.24E0  
+      17.764E0        416.59E0  
+      18.185E0        484.02E0  
+      18.271E0        495.47E0  
+      18.236E0        514.78E0  
+      18.237E0        515.65E0  
+      18.523E0        519.47E0  
+      18.627E0        544.47E0  
+      18.665E0        560.11E0  
+      19.086E0        620.77E0  
+       0.214E0         18.97E0  
+       0.943E0         28.93E0  
+       1.429E0         33.91E0  
+       2.241E0         40.03E0  
+       2.951E0         44.66E0  
+       3.782E0         49.87E0  
+       4.757E0         55.16E0  
+       5.602E0         60.90E0  
+       7.169E0         72.08E0  
+       8.920E0         85.15E0  
+      10.055E0         97.06E0  
+      12.035E0        119.63E0  
+      12.861E0        133.27E0  
+      13.436E0        143.84E0  
+      14.167E0        161.91E0  
+      14.755E0        180.67E0  
+      15.168E0        198.44E0  
+      15.651E0        226.86E0  
+      15.746E0        229.65E0  
+      16.216E0        258.27E0  
+      16.445E0        273.77E0  
+      16.965E0        339.15E0  
+      17.121E0        350.13E0  
+      17.206E0        362.75E0  
+      17.250E0        371.03E0  
+      17.339E0        393.32E0  
+      17.793E0        448.53E0  
+      18.123E0        473.78E0  
+      18.49E0         511.12E0  
+      18.566E0        524.70E0  
+      18.645E0        548.75E0  
+      18.706E0        551.64E0  
+      18.924E0        574.02E0  
+      19.1E0          623.86E0  
+       0.375E0         21.46E0  
+       0.471E0         24.33E0  
+       1.504E0         33.43E0  
+       2.204E0         39.22E0  
+       2.813E0         44.18E0  
+       4.765E0         55.02E0  
+       9.835E0         94.33E0  
+      10.040E0         96.44E0  
+      11.946E0        118.82E0  
+      12.596E0        128.48E0  
+      13.303E0        141.94E0  
+      13.922E0        156.92E0  
+      14.440E0        171.65E0  
+      14.951E0        190.00E0  
+      15.627E0        223.26E0  
+      15.639E0        223.88E0  
+      15.814E0        231.50E0  
+      16.315E0        265.05E0  
+      16.334E0        269.44E0  
+      16.430E0        271.78E0  
+      16.423E0        273.46E0  
+      17.024E0        334.61E0  
+      17.009E0        339.79E0  
+      17.165E0        349.52E0  
+      17.134E0        358.18E0  
+      17.349E0        377.98E0  
+      17.576E0        394.77E0  
+      17.848E0        429.66E0  
+      18.090E0        468.22E0  
+      18.276E0        487.27E0  
+      18.404E0        519.54E0  
+      18.519E0        523.03E0  
+      19.133E0        612.99E0  
+      19.074E0        638.59E0  
+      19.239E0        641.36E0  
+      19.280E0        622.05E0  
+      19.101E0        631.50E0  
+      19.398E0        663.97E0  
+      19.252E0        646.9E0   
+      19.89E0         748.29E0  
+      20.007E0        749.21E0  
+      19.929E0        750.14E0  
+      19.268E0        647.04E0  
+      19.324E0        646.89E0  
+      20.049E0        746.9E0   
+      20.107E0        748.43E0  
+      20.062E0        747.35E0  
+      20.065E0        749.27E0  
+      19.286E0        647.61E0  
+      19.972E0        747.78E0  
+      20.088E0        750.51E0  
+      20.743E0        851.37E0  
+      20.83E0         845.97E0  
+      20.935E0        847.54E0  
+      21.035E0        849.93E0  
+      20.93E0         851.61E0  
+      21.074E0        849.75E0  
+      21.085E0        850.98E0  
+      20.935E0        848.23E0  
diff --git a/NIST_STRD/Kirby2.dat b/NIST_STRD/Kirby2.dat
index 04df176..75cd80f 100644
--- a/NIST_STRD/Kirby2.dat
+++ b/NIST_STRD/Kirby2.dat
@@ -1,211 +1,211 @@
-NIST/ITL StRD
-Dataset Name:  Kirby2            (Kirby2.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to  45)
-               Certified Values  (lines 41 to  50)
-               Data              (lines 61 to 211)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study involving
-               scanning electron microscope line with standards.
-
-
-Reference:     Kirby, R., NIST (197?).  
-               Scanning electron microscope line width standards.
-
-
-
-
-
-
-
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               151 Observations
-               Average Level of Difficulty
-               Observed Data
-
-Model:         Rational Class (quadratic/quadratic)
-               5 Parameters (b1 to b5)
-
-               y = (b1 + b2*x + b3*x**2) /
-                   (1 + b4*x + b5*x**2)  +  e
-
- 
-          Starting values                  Certified Values
- 
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =    2           1.5          1.6745063063E+00  8.7989634338E-02
-  b2 =   -0.1        -0.15        -1.3927397867E-01  4.1182041386E-03
-  b3 =    0.003       0.0025       2.5961181191E-03  4.1856520458E-05
-  b4 =   -0.001      -0.0015      -1.7241811870E-03  5.8931897355E-05
-  b5 =    0.00001     0.00002      2.1664802578E-05  2.0129761919E-07
-
-Residual Sum of Squares:                    3.9050739624E+00
-Residual Standard Deviation:                1.6354535131E-01
-Degrees of Freedom:                               146
-Number of Observations:                           151
-
-
-
-
-
-
-
-
-
-Data:   y             x
-       0.0082E0      9.65E0
-       0.0112E0     10.74E0
-       0.0149E0     11.81E0
-       0.0198E0     12.88E0
-       0.0248E0     14.06E0
-       0.0324E0     15.28E0
-       0.0420E0     16.63E0
-       0.0549E0     18.19E0
-       0.0719E0     19.88E0
-       0.0963E0     21.84E0
-       0.1291E0     24.00E0
-       0.1710E0     26.25E0
-       0.2314E0     28.86E0
-       0.3227E0     31.85E0
-       0.4809E0     35.79E0
-       0.7084E0     40.18E0
-       1.0220E0     44.74E0
-       1.4580E0     49.53E0
-       1.9520E0     53.94E0
-       2.5410E0     58.29E0
-       3.2230E0     62.63E0
-       3.9990E0     67.03E0
-       4.8520E0     71.25E0
-       5.7320E0     75.22E0
-       6.7270E0     79.33E0
-       7.8350E0     83.56E0
-       9.0250E0     87.75E0
-      10.2670E0     91.93E0
-      11.5780E0     96.10E0
-      12.9440E0    100.28E0
-      14.3770E0    104.46E0
-      15.8560E0    108.66E0
-      17.3310E0    112.71E0
-      18.8850E0    116.88E0
-      20.5750E0    121.33E0
-      22.3200E0    125.79E0
-      22.3030E0    125.79E0
-      23.4600E0    128.74E0
-      24.0600E0    130.27E0
-      25.2720E0    133.33E0
-      25.8530E0    134.79E0
-      27.1100E0    137.93E0
-      27.6580E0    139.33E0
-      28.9240E0    142.46E0
-      29.5110E0    143.90E0
-      30.7100E0    146.91E0
-      31.3500E0    148.51E0
-      32.5200E0    151.41E0
-      33.2300E0    153.17E0
-      34.3300E0    155.97E0
-      35.0600E0    157.76E0
-      36.1700E0    160.56E0
-      36.8400E0    162.30E0
-      38.0100E0    165.21E0
-      38.6700E0    166.90E0
-      39.8700E0    169.92E0
-      40.0300E0    170.32E0
-      40.5000E0    171.54E0
-      41.3700E0    173.79E0
-      41.6700E0    174.57E0
-      42.3100E0    176.25E0
-      42.7300E0    177.34E0
-      43.4600E0    179.19E0
-      44.1400E0    181.02E0
-      44.5500E0    182.08E0
-      45.2200E0    183.88E0
-      45.9200E0    185.75E0
-      46.3000E0    186.80E0
-      47.0000E0    188.63E0
-      47.6800E0    190.45E0
-      48.0600E0    191.48E0
-      48.7400E0    193.35E0
-      49.4100E0    195.22E0
-      49.7600E0    196.23E0
-      50.4300E0    198.05E0
-      51.1100E0    199.97E0
-      51.5000E0    201.06E0
-      52.1200E0    202.83E0
-      52.7600E0    204.69E0
-      53.1800E0    205.86E0
-      53.7800E0    207.58E0
-      54.4600E0    209.50E0
-      54.8300E0    210.65E0
-      55.4000E0    212.33E0
-      56.4300E0    215.43E0
-      57.0300E0    217.16E0
-      58.0000E0    220.21E0
-      58.6100E0    221.98E0
-      59.5800E0    225.06E0
-      60.1100E0    226.79E0
-      61.1000E0    229.92E0
-      61.6500E0    231.69E0
-      62.5900E0    234.77E0
-      63.1200E0    236.60E0
-      64.0300E0    239.63E0
-      64.6200E0    241.50E0
-      65.4900E0    244.48E0
-      66.0300E0    246.40E0
-      66.8900E0    249.35E0
-      67.4200E0    251.32E0
-      68.2300E0    254.22E0
-      68.7700E0    256.24E0
-      69.5900E0    259.11E0
-      70.1100E0    261.18E0
-      70.8600E0    264.02E0
-      71.4300E0    266.13E0
-      72.1600E0    268.94E0
-      72.7000E0    271.09E0
-      73.4000E0    273.87E0
-      73.9300E0    276.08E0
-      74.6000E0    278.83E0
-      75.1600E0    281.08E0
-      75.8200E0    283.81E0
-      76.3400E0    286.11E0
-      76.9800E0    288.81E0
-      77.4800E0    291.08E0
-      78.0800E0    293.75E0
-      78.6000E0    295.99E0
-      79.1700E0    298.64E0
-      79.6200E0    300.84E0
-      79.8800E0    302.02E0
-      80.1900E0    303.48E0
-      80.6600E0    305.65E0
-      81.2200E0    308.27E0
-      81.6600E0    310.41E0
-      82.1600E0    313.01E0
-      82.5900E0    315.12E0
-      83.1400E0    317.71E0
-      83.5000E0    319.79E0
-      84.0000E0    322.36E0
-      84.4000E0    324.42E0
-      84.8900E0    326.98E0
-      85.2600E0    329.01E0
-      85.7400E0    331.56E0
-      86.0700E0    333.56E0
-      86.5400E0    336.10E0
-      86.8900E0    338.08E0
-      87.3200E0    340.60E0
-      87.6500E0    342.57E0
-      88.1000E0    345.08E0
-      88.4300E0    347.02E0
-      88.8300E0    349.52E0
-      89.1200E0    351.44E0
-      89.5400E0    353.93E0
-      89.8500E0    355.83E0
-      90.2500E0    358.32E0
-      90.5500E0    360.20E0
-      90.9300E0    362.67E0
-      91.2000E0    364.53E0
-      91.5500E0    367.00E0
-      92.2000E0    371.30E0
+NIST/ITL StRD
+Dataset Name:  Kirby2            (Kirby2.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to  45)
+               Certified Values  (lines 41 to  50)
+               Data              (lines 61 to 211)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study involving
+               scanning electron microscope line with standards.
+
+
+Reference:     Kirby, R., NIST (197?).  
+               Scanning electron microscope line width standards.
+
+
+
+
+
+
+
+
+Data:          1 Response  (y)
+               1 Predictor (x)
+               151 Observations
+               Average Level of Difficulty
+               Observed Data
+
+Model:         Rational Class (quadratic/quadratic)
+               5 Parameters (b1 to b5)
+
+               y = (b1 + b2*x + b3*x**2) /
+                   (1 + b4*x + b5*x**2)  +  e
+
+ 
+          Starting values                  Certified Values
+ 
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =    2           1.5          1.6745063063E+00  8.7989634338E-02
+  b2 =   -0.1        -0.15        -1.3927397867E-01  4.1182041386E-03
+  b3 =    0.003       0.0025       2.5961181191E-03  4.1856520458E-05
+  b4 =   -0.001      -0.0015      -1.7241811870E-03  5.8931897355E-05
+  b5 =    0.00001     0.00002      2.1664802578E-05  2.0129761919E-07
+
+Residual Sum of Squares:                    3.9050739624E+00
+Residual Standard Deviation:                1.6354535131E-01
+Degrees of Freedom:                               146
+Number of Observations:                           151
+
+
+
+
+
+
+
+
+
+Data:   y             x
+       0.0082E0      9.65E0
+       0.0112E0     10.74E0
+       0.0149E0     11.81E0
+       0.0198E0     12.88E0
+       0.0248E0     14.06E0
+       0.0324E0     15.28E0
+       0.0420E0     16.63E0
+       0.0549E0     18.19E0
+       0.0719E0     19.88E0
+       0.0963E0     21.84E0
+       0.1291E0     24.00E0
+       0.1710E0     26.25E0
+       0.2314E0     28.86E0
+       0.3227E0     31.85E0
+       0.4809E0     35.79E0
+       0.7084E0     40.18E0
+       1.0220E0     44.74E0
+       1.4580E0     49.53E0
+       1.9520E0     53.94E0
+       2.5410E0     58.29E0
+       3.2230E0     62.63E0
+       3.9990E0     67.03E0
+       4.8520E0     71.25E0
+       5.7320E0     75.22E0
+       6.7270E0     79.33E0
+       7.8350E0     83.56E0
+       9.0250E0     87.75E0
+      10.2670E0     91.93E0
+      11.5780E0     96.10E0
+      12.9440E0    100.28E0
+      14.3770E0    104.46E0
+      15.8560E0    108.66E0
+      17.3310E0    112.71E0
+      18.8850E0    116.88E0
+      20.5750E0    121.33E0
+      22.3200E0    125.79E0
+      22.3030E0    125.79E0
+      23.4600E0    128.74E0
+      24.0600E0    130.27E0
+      25.2720E0    133.33E0
+      25.8530E0    134.79E0
+      27.1100E0    137.93E0
+      27.6580E0    139.33E0
+      28.9240E0    142.46E0
+      29.5110E0    143.90E0
+      30.7100E0    146.91E0
+      31.3500E0    148.51E0
+      32.5200E0    151.41E0
+      33.2300E0    153.17E0
+      34.3300E0    155.97E0
+      35.0600E0    157.76E0
+      36.1700E0    160.56E0
+      36.8400E0    162.30E0
+      38.0100E0    165.21E0
+      38.6700E0    166.90E0
+      39.8700E0    169.92E0
+      40.0300E0    170.32E0
+      40.5000E0    171.54E0
+      41.3700E0    173.79E0
+      41.6700E0    174.57E0
+      42.3100E0    176.25E0
+      42.7300E0    177.34E0
+      43.4600E0    179.19E0
+      44.1400E0    181.02E0
+      44.5500E0    182.08E0
+      45.2200E0    183.88E0
+      45.9200E0    185.75E0
+      46.3000E0    186.80E0
+      47.0000E0    188.63E0
+      47.6800E0    190.45E0
+      48.0600E0    191.48E0
+      48.7400E0    193.35E0
+      49.4100E0    195.22E0
+      49.7600E0    196.23E0
+      50.4300E0    198.05E0
+      51.1100E0    199.97E0
+      51.5000E0    201.06E0
+      52.1200E0    202.83E0
+      52.7600E0    204.69E0
+      53.1800E0    205.86E0
+      53.7800E0    207.58E0
+      54.4600E0    209.50E0
+      54.8300E0    210.65E0
+      55.4000E0    212.33E0
+      56.4300E0    215.43E0
+      57.0300E0    217.16E0
+      58.0000E0    220.21E0
+      58.6100E0    221.98E0
+      59.5800E0    225.06E0
+      60.1100E0    226.79E0
+      61.1000E0    229.92E0
+      61.6500E0    231.69E0
+      62.5900E0    234.77E0
+      63.1200E0    236.60E0
+      64.0300E0    239.63E0
+      64.6200E0    241.50E0
+      65.4900E0    244.48E0
+      66.0300E0    246.40E0
+      66.8900E0    249.35E0
+      67.4200E0    251.32E0
+      68.2300E0    254.22E0
+      68.7700E0    256.24E0
+      69.5900E0    259.11E0
+      70.1100E0    261.18E0
+      70.8600E0    264.02E0
+      71.4300E0    266.13E0
+      72.1600E0    268.94E0
+      72.7000E0    271.09E0
+      73.4000E0    273.87E0
+      73.9300E0    276.08E0
+      74.6000E0    278.83E0
+      75.1600E0    281.08E0
+      75.8200E0    283.81E0
+      76.3400E0    286.11E0
+      76.9800E0    288.81E0
+      77.4800E0    291.08E0
+      78.0800E0    293.75E0
+      78.6000E0    295.99E0
+      79.1700E0    298.64E0
+      79.6200E0    300.84E0
+      79.8800E0    302.02E0
+      80.1900E0    303.48E0
+      80.6600E0    305.65E0
+      81.2200E0    308.27E0
+      81.6600E0    310.41E0
+      82.1600E0    313.01E0
+      82.5900E0    315.12E0
+      83.1400E0    317.71E0
+      83.5000E0    319.79E0
+      84.0000E0    322.36E0
+      84.4000E0    324.42E0
+      84.8900E0    326.98E0
+      85.2600E0    329.01E0
+      85.7400E0    331.56E0
+      86.0700E0    333.56E0
+      86.5400E0    336.10E0
+      86.8900E0    338.08E0
+      87.3200E0    340.60E0
+      87.6500E0    342.57E0
+      88.1000E0    345.08E0
+      88.4300E0    347.02E0
+      88.8300E0    349.52E0
+      89.1200E0    351.44E0
+      89.5400E0    353.93E0
+      89.8500E0    355.83E0
+      90.2500E0    358.32E0
+      90.5500E0    360.20E0
+      90.9300E0    362.67E0
+      91.2000E0    364.53E0
+      91.5500E0    367.00E0
+      92.2000E0    371.30E0
diff --git a/NIST_STRD/Lanczos1.dat b/NIST_STRD/Lanczos1.dat
index 8107320..d23d5e4 100644
--- a/NIST_STRD/Lanczos1.dat
+++ b/NIST_STRD/Lanczos1.dat
@@ -1,84 +1,84 @@
-NIST/ITL StRD
-Dataset Name:  Lanczos1          (Lanczos1.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 46)
-               Certified Values  (lines 41 to 51)
-               Data              (lines 61 to 84)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are taken from an example discussed in
-               Lanczos (1956).  The data were generated to 14-digits
-               of accuracy using
-               f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x) 
-                                     + 1.5576*exp(-5*x).
-
-
-Reference:     Lanczos, C. (1956).
-               Applied Analysis.
-               Englewood Cliffs, NJ:  Prentice Hall, pp. 272-280.
-
-
-
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               24 Observations
-               Average Level of Difficulty
-               Generated Data
-
-Model:         Exponential Class
-               6 Parameters (b1 to b6)
-
-               y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x)  +  e
-
-
- 
-          Starting values                  Certified Values
- 
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   1.2         0.5           9.5100000027E-02  5.3347304234E-11
-  b2 =   0.3         0.7           1.0000000001E+00  2.7473038179E-10
-  b3 =   5.6         3.6           8.6070000013E-01  1.3576062225E-10
-  b4 =   5.5         4.2           3.0000000002E+00  3.3308253069E-10
-  b5 =   6.5         4             1.5575999998E+00  1.8815731448E-10
-  b6 =   7.6         6.3           5.0000000001E+00  1.1057500538E-10
-
-Residual Sum of Squares:                    1.4307867721E-25
-Residual Standard Deviation:                8.9156129349E-14
-Degrees of Freedom:                                18
-Number of Observations:                            24
-
-
-
-
-
-
-
-
-Data:   y                   x
-       2.513400000000E+00  0.000000000000E+00
-       2.044333373291E+00  5.000000000000E-02
-       1.668404436564E+00  1.000000000000E-01
-       1.366418021208E+00  1.500000000000E-01
-       1.123232487372E+00  2.000000000000E-01
-       9.268897180037E-01  2.500000000000E-01
-       7.679338563728E-01  3.000000000000E-01
-       6.388775523106E-01  3.500000000000E-01
-       5.337835317402E-01  4.000000000000E-01
-       4.479363617347E-01  4.500000000000E-01
-       3.775847884350E-01  5.000000000000E-01
-       3.197393199326E-01  5.500000000000E-01
-       2.720130773746E-01  6.000000000000E-01
-       2.324965529032E-01  6.500000000000E-01
-       1.996589546065E-01  7.000000000000E-01
-       1.722704126914E-01  7.500000000000E-01
-       1.493405660168E-01  8.000000000000E-01
-       1.300700206922E-01  8.500000000000E-01
-       1.138119324644E-01  9.000000000000E-01
-       1.000415587559E-01  9.500000000000E-01
-       8.833209084540E-02  1.000000000000E+00
-       7.833544019350E-02  1.050000000000E+00
-       6.976693743449E-02  1.100000000000E+00
-       6.239312536719E-02  1.150000000000E+00
+NIST/ITL StRD
+Dataset Name:  Lanczos1          (Lanczos1.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 46)
+               Certified Values  (lines 41 to 51)
+               Data              (lines 61 to 84)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are taken from an example discussed in
+               Lanczos (1956).  The data were generated to 14-digits
+               of accuracy using
+               f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x) 
+                                     + 1.5576*exp(-5*x).
+
+
+Reference:     Lanczos, C. (1956).
+               Applied Analysis.
+               Englewood Cliffs, NJ:  Prentice Hall, pp. 272-280.
+
+
+
+
+Data:          1 Response  (y)
+               1 Predictor (x)
+               24 Observations
+               Average Level of Difficulty
+               Generated Data
+
+Model:         Exponential Class
+               6 Parameters (b1 to b6)
+
+               y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x)  +  e
+
+
+ 
+          Starting values                  Certified Values
+ 
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   1.2         0.5           9.5100000027E-02  5.3347304234E-11
+  b2 =   0.3         0.7           1.0000000001E+00  2.7473038179E-10
+  b3 =   5.6         3.6           8.6070000013E-01  1.3576062225E-10
+  b4 =   5.5         4.2           3.0000000002E+00  3.3308253069E-10
+  b5 =   6.5         4             1.5575999998E+00  1.8815731448E-10
+  b6 =   7.6         6.3           5.0000000001E+00  1.1057500538E-10
+
+Residual Sum of Squares:                    1.4307867721E-25
+Residual Standard Deviation:                8.9156129349E-14
+Degrees of Freedom:                                18
+Number of Observations:                            24
+
+
+
+
+
+
+
+
+Data:   y                   x
+       2.513400000000E+00  0.000000000000E+00
+       2.044333373291E+00  5.000000000000E-02
+       1.668404436564E+00  1.000000000000E-01
+       1.366418021208E+00  1.500000000000E-01
+       1.123232487372E+00  2.000000000000E-01
+       9.268897180037E-01  2.500000000000E-01
+       7.679338563728E-01  3.000000000000E-01
+       6.388775523106E-01  3.500000000000E-01
+       5.337835317402E-01  4.000000000000E-01
+       4.479363617347E-01  4.500000000000E-01
+       3.775847884350E-01  5.000000000000E-01
+       3.197393199326E-01  5.500000000000E-01
+       2.720130773746E-01  6.000000000000E-01
+       2.324965529032E-01  6.500000000000E-01
+       1.996589546065E-01  7.000000000000E-01
+       1.722704126914E-01  7.500000000000E-01
+       1.493405660168E-01  8.000000000000E-01
+       1.300700206922E-01  8.500000000000E-01
+       1.138119324644E-01  9.000000000000E-01
+       1.000415587559E-01  9.500000000000E-01
+       8.833209084540E-02  1.000000000000E+00
+       7.833544019350E-02  1.050000000000E+00
+       6.976693743449E-02  1.100000000000E+00
+       6.239312536719E-02  1.150000000000E+00
diff --git a/NIST_STRD/Lanczos2.dat b/NIST_STRD/Lanczos2.dat
index fc98e69..f9f2b4b 100644
--- a/NIST_STRD/Lanczos2.dat
+++ b/NIST_STRD/Lanczos2.dat
@@ -1,84 +1,84 @@
-NIST/ITL StRD
-Dataset Name:  Lanczos2          (Lanczos2.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 46)
-               Certified Values  (lines 41 to 51)
-               Data              (lines 61 to 84)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are taken from an example discussed in
-               Lanczos (1956).  The data were generated to 6-digits
-               of accuracy using
-               f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x) 
-                                     + 1.5576*exp(-5*x).
-
-
-Reference:     Lanczos, C. (1956).
-               Applied Analysis.
-               Englewood Cliffs, NJ:  Prentice Hall, pp. 272-280.
-
-
-
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               24 Observations
-               Average Level of Difficulty
-               Generated Data
- 
-Model:         Exponential Class
-               6 Parameters (b1 to b6)
- 
-               y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x)  +  e
- 
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   1.2         0.5           9.6251029939E-02  6.6770575477E-04
-  b2 =   0.3         0.7           1.0057332849E+00  3.3989646176E-03
-  b3 =   5.6         3.6           8.6424689056E-01  1.7185846685E-03
-  b4 =   5.5         4.2           3.0078283915E+00  4.1707005856E-03
-  b5 =   6.5         4             1.5529016879E+00  2.3744381417E-03
-  b6 =   7.6         6.3           5.0028798100E+00  1.3958787284E-03
-
-Residual Sum of Squares:                    2.2299428125E-11
-Residual Standard Deviation:                1.1130395851E-06
-Degrees of Freedom:                                18
-Number of Observations:                            24
-
-
-
-
-
-
-
-
-Data:   y            x
-       2.51340E+00  0.00000E+00
-       2.04433E+00  5.00000E-02
-       1.66840E+00  1.00000E-01
-       1.36642E+00  1.50000E-01
-       1.12323E+00  2.00000E-01
-       9.26890E-01  2.50000E-01
-       7.67934E-01  3.00000E-01
-       6.38878E-01  3.50000E-01
-       5.33784E-01  4.00000E-01
-       4.47936E-01  4.50000E-01
-       3.77585E-01  5.00000E-01
-       3.19739E-01  5.50000E-01
-       2.72013E-01  6.00000E-01
-       2.32497E-01  6.50000E-01
-       1.99659E-01  7.00000E-01
-       1.72270E-01  7.50000E-01
-       1.49341E-01  8.00000E-01
-       1.30070E-01  8.50000E-01
-       1.13812E-01  9.00000E-01
-       1.00042E-01  9.50000E-01
-       8.83321E-02  1.00000E+00
-       7.83354E-02  1.05000E+00
-       6.97669E-02  1.10000E+00
-       6.23931E-02  1.15000E+00
+NIST/ITL StRD
+Dataset Name:  Lanczos2          (Lanczos2.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 46)
+               Certified Values  (lines 41 to 51)
+               Data              (lines 61 to 84)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are taken from an example discussed in
+               Lanczos (1956).  The data were generated to 6-digits
+               of accuracy using
+               f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x) 
+                                     + 1.5576*exp(-5*x).
+
+
+Reference:     Lanczos, C. (1956).
+               Applied Analysis.
+               Englewood Cliffs, NJ:  Prentice Hall, pp. 272-280.
+
+
+
+
+Data:          1 Response  (y)
+               1 Predictor (x)
+               24 Observations
+               Average Level of Difficulty
+               Generated Data
+ 
+Model:         Exponential Class
+               6 Parameters (b1 to b6)
+ 
+               y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x)  +  e
+ 
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   1.2         0.5           9.6251029939E-02  6.6770575477E-04
+  b2 =   0.3         0.7           1.0057332849E+00  3.3989646176E-03
+  b3 =   5.6         3.6           8.6424689056E-01  1.7185846685E-03
+  b4 =   5.5         4.2           3.0078283915E+00  4.1707005856E-03
+  b5 =   6.5         4             1.5529016879E+00  2.3744381417E-03
+  b6 =   7.6         6.3           5.0028798100E+00  1.3958787284E-03
+
+Residual Sum of Squares:                    2.2299428125E-11
+Residual Standard Deviation:                1.1130395851E-06
+Degrees of Freedom:                                18
+Number of Observations:                            24
+
+
+
+
+
+
+
+
+Data:   y            x
+       2.51340E+00  0.00000E+00
+       2.04433E+00  5.00000E-02
+       1.66840E+00  1.00000E-01
+       1.36642E+00  1.50000E-01
+       1.12323E+00  2.00000E-01
+       9.26890E-01  2.50000E-01
+       7.67934E-01  3.00000E-01
+       6.38878E-01  3.50000E-01
+       5.33784E-01  4.00000E-01
+       4.47936E-01  4.50000E-01
+       3.77585E-01  5.00000E-01
+       3.19739E-01  5.50000E-01
+       2.72013E-01  6.00000E-01
+       2.32497E-01  6.50000E-01
+       1.99659E-01  7.00000E-01
+       1.72270E-01  7.50000E-01
+       1.49341E-01  8.00000E-01
+       1.30070E-01  8.50000E-01
+       1.13812E-01  9.00000E-01
+       1.00042E-01  9.50000E-01
+       8.83321E-02  1.00000E+00
+       7.83354E-02  1.05000E+00
+       6.97669E-02  1.10000E+00
+       6.23931E-02  1.15000E+00
diff --git a/NIST_STRD/Lanczos3.dat b/NIST_STRD/Lanczos3.dat
index d930d65..67c1512 100644
--- a/NIST_STRD/Lanczos3.dat
+++ b/NIST_STRD/Lanczos3.dat
@@ -1,84 +1,84 @@
-NIST/ITL StRD
-Dataset Name:  Lanczos3          (Lanczos3.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 46)
-               Certified Values  (lines 41 to 51)
-               Data              (lines 61 to 84)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are taken from an example discussed in
-               Lanczos (1956).  The data were generated to 5-digits
-               of accuracy using
-               f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x) 
-                                     + 1.5576*exp(-5*x).
-
-
-Reference:     Lanczos, C. (1956).
-               Applied Analysis.
-               Englewood Cliffs, NJ:  Prentice Hall, pp. 272-280.
-
-
-
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               24 Observations
-               Lower Level of Difficulty
-               Generated Data
- 
-Model:         Exponential Class
-               6 Parameters (b1 to b6)
- 
-               y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x)  +  e
-
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   1.2         0.5           8.6816414977E-02  1.7197908859E-02
-  b2 =   0.3         0.7           9.5498101505E-01  9.7041624475E-02
-  b3 =   5.6         3.6           8.4400777463E-01  4.1488663282E-02
-  b4 =   5.5         4.2           2.9515951832E+00  1.0766312506E-01
-  b5 =   6.5         4             1.5825685901E+00  5.8371576281E-02
-  b6 =   7.6         6.3           4.9863565084E+00  3.4436403035E-02
-
-Residual Sum of Squares:                    1.6117193594E-08
-Residual Standard Deviation:                2.9923229172E-05
-Degrees of Freedom:                                18
-Number of Observations:                            24
-
-
-
-
-
-
-
-
-Data:   y           x
-       2.5134E+00  0.00000E+00
-       2.0443E+00  5.00000E-02
-       1.6684E+00  1.00000E-01
-       1.3664E+00  1.50000E-01
-       1.1232E+00  2.00000E-01
-       0.9269E+00  2.50000E-01
-       0.7679E+00  3.00000E-01
-       0.6389E+00  3.50000E-01
-       0.5338E+00  4.00000E-01
-       0.4479E+00  4.50000E-01
-       0.3776E+00  5.00000E-01
-       0.3197E+00  5.50000E-01
-       0.2720E+00  6.00000E-01
-       0.2325E+00  6.50000E-01
-       0.1997E+00  7.00000E-01
-       0.1723E+00  7.50000E-01
-       0.1493E+00  8.00000E-01
-       0.1301E+00  8.50000E-01
-       0.1138E+00  9.00000E-01
-       0.1000E+00  9.50000E-01
-       0.0883E+00  1.00000E+00
-       0.0783E+00  1.05000E+00
-       0.0698E+00  1.10000E+00
-       0.0624E+00  1.15000E+00
+NIST/ITL StRD
+Dataset Name:  Lanczos3          (Lanczos3.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 46)
+               Certified Values  (lines 41 to 51)
+               Data              (lines 61 to 84)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are taken from an example discussed in
+               Lanczos (1956).  The data were generated to 5-digits
+               of accuracy using
+               f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x) 
+                                     + 1.5576*exp(-5*x).
+
+
+Reference:     Lanczos, C. (1956).
+               Applied Analysis.
+               Englewood Cliffs, NJ:  Prentice Hall, pp. 272-280.
+
+
+
+
+Data:          1 Response  (y)
+               1 Predictor (x)
+               24 Observations
+               Lower Level of Difficulty
+               Generated Data
+ 
+Model:         Exponential Class
+               6 Parameters (b1 to b6)
+ 
+               y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x)  +  e
+
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   1.2         0.5           8.6816414977E-02  1.7197908859E-02
+  b2 =   0.3         0.7           9.5498101505E-01  9.7041624475E-02
+  b3 =   5.6         3.6           8.4400777463E-01  4.1488663282E-02
+  b4 =   5.5         4.2           2.9515951832E+00  1.0766312506E-01
+  b5 =   6.5         4             1.5825685901E+00  5.8371576281E-02
+  b6 =   7.6         6.3           4.9863565084E+00  3.4436403035E-02
+
+Residual Sum of Squares:                    1.6117193594E-08
+Residual Standard Deviation:                2.9923229172E-05
+Degrees of Freedom:                                18
+Number of Observations:                            24
+
+
+
+
+
+
+
+
+Data:   y           x
+       2.5134E+00  0.00000E+00
+       2.0443E+00  5.00000E-02
+       1.6684E+00  1.00000E-01
+       1.3664E+00  1.50000E-01
+       1.1232E+00  2.00000E-01
+       0.9269E+00  2.50000E-01
+       0.7679E+00  3.00000E-01
+       0.6389E+00  3.50000E-01
+       0.5338E+00  4.00000E-01
+       0.4479E+00  4.50000E-01
+       0.3776E+00  5.00000E-01
+       0.3197E+00  5.50000E-01
+       0.2720E+00  6.00000E-01
+       0.2325E+00  6.50000E-01
+       0.1997E+00  7.00000E-01
+       0.1723E+00  7.50000E-01
+       0.1493E+00  8.00000E-01
+       0.1301E+00  8.50000E-01
+       0.1138E+00  9.00000E-01
+       0.1000E+00  9.50000E-01
+       0.0883E+00  1.00000E+00
+       0.0783E+00  1.05000E+00
+       0.0698E+00  1.10000E+00
+       0.0624E+00  1.15000E+00
diff --git a/NIST_STRD/MGH09.dat b/NIST_STRD/MGH09.dat
index 1f19af8..55a2d42 100644
--- a/NIST_STRD/MGH09.dat
+++ b/NIST_STRD/MGH09.dat
@@ -1,71 +1,71 @@
-NIST/ITL StRD
-Dataset Name:  MGH09             (MGH09.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 44)
-               Certified Values  (lines 41 to 49)
-               Data              (lines 61 to 71)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   This problem was found to be difficult for some very 
-               good algorithms.  There is a local minimum at (+inf,
-               -14.07..., -inf, -inf) with final sum of squares 
-               0.00102734....
-
-               See More, J. J., Garbow, B. S., and Hillstrom, K. E. 
-               (1981).  Testing unconstrained optimization software.
-               ACM Transactions on Mathematical Software. 7(1): 
-               pp. 17-41.
-
-Reference:     Kowalik, J.S., and M. R. Osborne, (1978).  
-               Methods for Unconstrained Optimization Problems.  
-               New York, NY:  Elsevier North-Holland.
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               11 Observations
-               Higher Level of Difficulty
-               Generated Data
- 
-Model:         Rational Class (linear/quadratic)
-               4 Parameters (b1 to b4)
- 
-               y = b1*(x**2+x*b2) / (x**2+x*b3+b4)  +  e
- 
-
- 
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   25          0.25          1.9280693458E-01  1.1435312227E-02
-  b2 =   39          0.39          1.9128232873E-01  1.9633220911E-01
-  b3 =   41.5        0.415         1.2305650693E-01  8.0842031232E-02
-  b4 =   39          0.39          1.3606233068E-01  9.0025542308E-02
-
-Residual Sum of Squares:                    3.0750560385E-04
-Residual Standard Deviation:                6.6279236551E-03
-Degrees of Freedom:                                7
-Number of Observations:                           11
- 
- 
-
-
-
-
-
- 
- 
- 
-Data:  y               x
-       1.957000E-01    4.000000E+00
-       1.947000E-01    2.000000E+00
-       1.735000E-01    1.000000E+00
-       1.600000E-01    5.000000E-01
-       8.440000E-02    2.500000E-01
-       6.270000E-02    1.670000E-01
-       4.560000E-02    1.250000E-01
-       3.420000E-02    1.000000E-01
-       3.230000E-02    8.330000E-02
-       2.350000E-02    7.140000E-02
-       2.460000E-02    6.250000E-02
+NIST/ITL StRD
+Dataset Name:  MGH09             (MGH09.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 44)
+               Certified Values  (lines 41 to 49)
+               Data              (lines 61 to 71)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   This problem was found to be difficult for some very 
+               good algorithms.  There is a local minimum at (+inf,
+               -14.07..., -inf, -inf) with final sum of squares 
+               0.00102734....
+
+               See More, J. J., Garbow, B. S., and Hillstrom, K. E. 
+               (1981).  Testing unconstrained optimization software.
+               ACM Transactions on Mathematical Software. 7(1): 
+               pp. 17-41.
+
+Reference:     Kowalik, J.S., and M. R. Osborne, (1978).  
+               Methods for Unconstrained Optimization Problems.  
+               New York, NY:  Elsevier North-Holland.
+
+Data:          1 Response  (y)
+               1 Predictor (x)
+               11 Observations
+               Higher Level of Difficulty
+               Generated Data
+ 
+Model:         Rational Class (linear/quadratic)
+               4 Parameters (b1 to b4)
+ 
+               y = b1*(x**2+x*b2) / (x**2+x*b3+b4)  +  e
+ 
+
+ 
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   25          0.25          1.9280693458E-01  1.1435312227E-02
+  b2 =   39          0.39          1.9128232873E-01  1.9633220911E-01
+  b3 =   41.5        0.415         1.2305650693E-01  8.0842031232E-02
+  b4 =   39          0.39          1.3606233068E-01  9.0025542308E-02
+
+Residual Sum of Squares:                    3.0750560385E-04
+Residual Standard Deviation:                6.6279236551E-03
+Degrees of Freedom:                                7
+Number of Observations:                           11
+ 
+ 
+
+
+
+
+
+ 
+ 
+ 
+Data:  y               x
+       1.957000E-01    4.000000E+00
+       1.947000E-01    2.000000E+00
+       1.735000E-01    1.000000E+00
+       1.600000E-01    5.000000E-01
+       8.440000E-02    2.500000E-01
+       6.270000E-02    1.670000E-01
+       4.560000E-02    1.250000E-01
+       3.420000E-02    1.000000E-01
+       3.230000E-02    8.330000E-02
+       2.350000E-02    7.140000E-02
+       2.460000E-02    6.250000E-02
diff --git a/NIST_STRD/MGH10.dat b/NIST_STRD/MGH10.dat
index df88ea4..b2ffbec 100644
--- a/NIST_STRD/MGH10.dat
+++ b/NIST_STRD/MGH10.dat
@@ -1,76 +1,76 @@
-NIST/ITL StRD
-Dataset Name:  MGH10             (MGH10.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 43)
-               Certified Values  (lines 41 to 48)
-               Data              (lines 61 to 76)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   This problem was found to be difficult for some very
-               good algorithms.
-
-               See More, J. J., Garbow, B. S., and Hillstrom, K. E. 
-               (1981).  Testing unconstrained optimization software.
-               ACM Transactions on Mathematical Software. 7(1): 
-               pp. 17-41.
-
-Reference:     Meyer, R. R. (1970).  
-               Theoretical and computational aspects of nonlinear 
-               regression.  In Nonlinear Programming, Rosen, 
-               Mangasarian and Ritter (Eds).  
-               New York, NY: Academic Press, pp. 465-486.
-
-Data:          1 Response  (y)
-               1 Predictor (x)
-               16 Observations
-               Higher Level of Difficulty
-               Generated Data
- 
-Model:         Exponential Class
-               3 Parameters (b1 to b3)
- 
-               y = b1 * exp[b2/(x+b3)]  +  e
-
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =        2         0.02       5.6096364710E-03  1.5687892471E-04
-  b2 =   400000      4000          6.1813463463E+03  2.3309021107E+01
-  b3 =    25000       250          3.4522363462E+02  7.8486103508E-01
-
-Residual Sum of Squares:                    8.7945855171E+01
-Residual Standard Deviation:                2.6009740065E+00
-Degrees of Freedom:                                13
-Number of Observations:                            16
-
-
-
-
-
-
-
-
-
-
-
-Data:  y               x
-      3.478000E+04    5.000000E+01
-      2.861000E+04    5.500000E+01
-      2.365000E+04    6.000000E+01
-      1.963000E+04    6.500000E+01
-      1.637000E+04    7.000000E+01
-      1.372000E+04    7.500000E+01
-      1.154000E+04    8.000000E+01
-      9.744000E+03    8.500000E+01
-      8.261000E+03    9.000000E+01
-      7.030000E+03    9.500000E+01
-      6.005000E+03    1.000000E+02
-      5.147000E+03    1.050000E+02
-      4.427000E+03    1.100000E+02
-      3.820000E+03    1.150000E+02
-      3.307000E+03    1.200000E+02
-      2.872000E+03    1.250000E+02
+NIST/ITL StRD
+Dataset Name:  MGH10             (MGH10.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 43)
+               Certified Values  (lines 41 to 48)
+               Data              (lines 61 to 76)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   This problem was found to be difficult for some very
+               good algorithms.
+
+               See More, J. J., Garbow, B. S., and Hillstrom, K. E. 
+               (1981).  Testing unconstrained optimization software.
+               ACM Transactions on Mathematical Software. 7(1): 
+               pp. 17-41.
+
+Reference:     Meyer, R. R. (1970).  
+               Theoretical and computational aspects of nonlinear 
+               regression.  In Nonlinear Programming, Rosen, 
+               Mangasarian and Ritter (Eds).  
+               New York, NY: Academic Press, pp. 465-486.
+
+Data:          1 Response  (y)
+               1 Predictor (x)
+               16 Observations
+               Higher Level of Difficulty
+               Generated Data
+ 
+Model:         Exponential Class
+               3 Parameters (b1 to b3)
+ 
+               y = b1 * exp[b2/(x+b3)]  +  e
+
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =        2         0.02       5.6096364710E-03  1.5687892471E-04
+  b2 =   400000      4000          6.1813463463E+03  2.3309021107E+01
+  b3 =    25000       250          3.4522363462E+02  7.8486103508E-01
+
+Residual Sum of Squares:                    8.7945855171E+01
+Residual Standard Deviation:                2.6009740065E+00
+Degrees of Freedom:                                13
+Number of Observations:                            16
+
+
+
+
+
+
+
+
+
+
+
+Data:  y               x
+      3.478000E+04    5.000000E+01
+      2.861000E+04    5.500000E+01
+      2.365000E+04    6.000000E+01
+      1.963000E+04    6.500000E+01
+      1.637000E+04    7.000000E+01
+      1.372000E+04    7.500000E+01
+      1.154000E+04    8.000000E+01
+      9.744000E+03    8.500000E+01
+      8.261000E+03    9.000000E+01
+      7.030000E+03    9.500000E+01
+      6.005000E+03    1.000000E+02
+      5.147000E+03    1.050000E+02
+      4.427000E+03    1.100000E+02
+      3.820000E+03    1.150000E+02
+      3.307000E+03    1.200000E+02
+      2.872000E+03    1.250000E+02
diff --git a/NIST_STRD/MGH17.dat b/NIST_STRD/MGH17.dat
index 3b3b7e8..584f73c 100644
--- a/NIST_STRD/MGH17.dat
+++ b/NIST_STRD/MGH17.dat
@@ -1,93 +1,93 @@
-NIST/ITL StRD
-Dataset Name:  MGH17             (MGH17.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 45)
-               Certified Values  (lines 41 to 50)
-               Data              (lines 61 to 93)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   This problem was found to be difficult for some very
-               good algorithms.
-
-               See More, J. J., Garbow, B. S., and Hillstrom, K. E.
-               (1981).  Testing unconstrained optimization software.
-               ACM Transactions on Mathematical Software. 7(1):
-               pp. 17-41.
-
-Reference:     Osborne, M. R. (1972).  
-               Some aspects of nonlinear least squares 
-               calculations.  In Numerical Methods for Nonlinear 
-               Optimization, Lootsma (Ed).  
-               New York, NY:  Academic Press, pp. 171-189.
- 
-Data:          1 Response  (y)
-               1 Predictor (x)
-               33 Observations
-               Average Level of Difficulty
-               Generated Data
-
-Model:         Exponential Class
-               5 Parameters (b1 to b5)
-
-               y = b1 + b2*exp[-x*b4] + b3*exp[-x*b5]  +  e
-
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =     50         0.5          3.7541005211E-01  2.0723153551E-03
-  b2 =    150         1.5          1.9358469127E+00  2.2031669222E-01
-  b3 =   -100        -1           -1.4646871366E+00  2.2175707739E-01
-  b4 =      1          0.01        1.2867534640E-02  4.4861358114E-04
-  b5 =      2          0.02        2.2122699662E-02  8.9471996575E-04
-
-Residual Sum of Squares:                    5.4648946975E-05
-Residual Standard Deviation:                1.3970497866E-03
-Degrees of Freedom:                                28
-Number of Observations:                            33
-
-
-
-
-
-
-
-
-
-Data:  y               x
-      8.440000E-01    0.000000E+00
-      9.080000E-01    1.000000E+01
-      9.320000E-01    2.000000E+01
-      9.360000E-01    3.000000E+01
-      9.250000E-01    4.000000E+01
-      9.080000E-01    5.000000E+01
-      8.810000E-01    6.000000E+01
-      8.500000E-01    7.000000E+01
-      8.180000E-01    8.000000E+01
-      7.840000E-01    9.000000E+01
-      7.510000E-01    1.000000E+02
-      7.180000E-01    1.100000E+02
-      6.850000E-01    1.200000E+02
-      6.580000E-01    1.300000E+02
-      6.280000E-01    1.400000E+02
-      6.030000E-01    1.500000E+02
-      5.800000E-01    1.600000E+02
-      5.580000E-01    1.700000E+02
-      5.380000E-01    1.800000E+02
-      5.220000E-01    1.900000E+02
-      5.060000E-01    2.000000E+02
-      4.900000E-01    2.100000E+02
-      4.780000E-01    2.200000E+02
-      4.670000E-01    2.300000E+02
-      4.570000E-01    2.400000E+02
-      4.480000E-01    2.500000E+02
-      4.380000E-01    2.600000E+02
-      4.310000E-01    2.700000E+02
-      4.240000E-01    2.800000E+02
-      4.200000E-01    2.900000E+02
-      4.140000E-01    3.000000E+02
-      4.110000E-01    3.100000E+02
-      4.060000E-01    3.200000E+02
+NIST/ITL StRD
+Dataset Name:  MGH17             (MGH17.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 45)
+               Certified Values  (lines 41 to 50)
+               Data              (lines 61 to 93)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   This problem was found to be difficult for some very
+               good algorithms.
+
+               See More, J. J., Garbow, B. S., and Hillstrom, K. E.
+               (1981).  Testing unconstrained optimization software.
+               ACM Transactions on Mathematical Software. 7(1):
+               pp. 17-41.
+
+Reference:     Osborne, M. R. (1972).  
+               Some aspects of nonlinear least squares 
+               calculations.  In Numerical Methods for Nonlinear 
+               Optimization, Lootsma (Ed).  
+               New York, NY:  Academic Press, pp. 171-189.
+ 
+Data:          1 Response  (y)
+               1 Predictor (x)
+               33 Observations
+               Average Level of Difficulty
+               Generated Data
+
+Model:         Exponential Class
+               5 Parameters (b1 to b5)
+
+               y = b1 + b2*exp[-x*b4] + b3*exp[-x*b5]  +  e
+
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =     50         0.5          3.7541005211E-01  2.0723153551E-03
+  b2 =    150         1.5          1.9358469127E+00  2.2031669222E-01
+  b3 =   -100        -1           -1.4646871366E+00  2.2175707739E-01
+  b4 =      1          0.01        1.2867534640E-02  4.4861358114E-04
+  b5 =      2          0.02        2.2122699662E-02  8.9471996575E-04
+
+Residual Sum of Squares:                    5.4648946975E-05
+Residual Standard Deviation:                1.3970497866E-03
+Degrees of Freedom:                                28
+Number of Observations:                            33
+
+
+
+
+
+
+
+
+
+Data:  y               x
+      8.440000E-01    0.000000E+00
+      9.080000E-01    1.000000E+01
+      9.320000E-01    2.000000E+01
+      9.360000E-01    3.000000E+01
+      9.250000E-01    4.000000E+01
+      9.080000E-01    5.000000E+01
+      8.810000E-01    6.000000E+01
+      8.500000E-01    7.000000E+01
+      8.180000E-01    8.000000E+01
+      7.840000E-01    9.000000E+01
+      7.510000E-01    1.000000E+02
+      7.180000E-01    1.100000E+02
+      6.850000E-01    1.200000E+02
+      6.580000E-01    1.300000E+02
+      6.280000E-01    1.400000E+02
+      6.030000E-01    1.500000E+02
+      5.800000E-01    1.600000E+02
+      5.580000E-01    1.700000E+02
+      5.380000E-01    1.800000E+02
+      5.220000E-01    1.900000E+02
+      5.060000E-01    2.000000E+02
+      4.900000E-01    2.100000E+02
+      4.780000E-01    2.200000E+02
+      4.670000E-01    2.300000E+02
+      4.570000E-01    2.400000E+02
+      4.480000E-01    2.500000E+02
+      4.380000E-01    2.600000E+02
+      4.310000E-01    2.700000E+02
+      4.240000E-01    2.800000E+02
+      4.200000E-01    2.900000E+02
+      4.140000E-01    3.000000E+02
+      4.110000E-01    3.100000E+02
+      4.060000E-01    3.200000E+02
diff --git a/NIST_STRD/Misra1a.dat b/NIST_STRD/Misra1a.dat
index 332f37e..24f92a8 100644
--- a/NIST_STRD/Misra1a.dat
+++ b/NIST_STRD/Misra1a.dat
@@ -1,74 +1,74 @@
-NIST/ITL StRD
-Dataset Name:  Misra1a           (Misra1a.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 42)
-               Certified Values  (lines 41 to 47)
-               Data              (lines 61 to 74)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study regarding
-               dental research in monomolecular adsorption.  The
-               response variable is volume, and the predictor
-               variable is pressure.
-
-Reference:     Misra, D., NIST (1978).  
-               Dental Research Monomolecular Adsorption Study.
-
- 
-
-
-
-
-
-Data:          1 Response Variable  (y = volume)
-               1 Predictor Variable (x = pressure)
-               14 Observations
-               Lower Level of Difficulty
-               Observed Data
-
-Model:         Exponential Class
-               2 Parameters (b1 and b2)
-
-               y = b1*(1-exp[-b2*x])  +  e
-
-
- 
-          Starting values                  Certified Values
- 
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   500         250           2.3894212918E+02  2.7070075241E+00
-  b2 =     0.0001      0.0005      5.5015643181E-04  7.2668688436E-06
-
-Residual Sum of Squares:                    1.2455138894E-01
-Residual Standard Deviation:                1.0187876330E-01
-Degrees of Freedom:                                12
-Number of Observations:                            14
-
-
-
-
-
-
-
-
-
-
-
-
-Data:   y               x
-      10.07E0      77.6E0
-      14.73E0     114.9E0
-      17.94E0     141.1E0
-      23.93E0     190.8E0
-      29.61E0     239.9E0
-      35.18E0     289.0E0
-      40.02E0     332.8E0
-      44.82E0     378.4E0
-      50.76E0     434.8E0
-      55.05E0     477.3E0
-      61.01E0     536.8E0
-      66.40E0     593.1E0
-      75.47E0     689.1E0
-      81.78E0     760.0E0
+NIST/ITL StRD
+Dataset Name:  Misra1a           (Misra1a.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 42)
+               Certified Values  (lines 41 to 47)
+               Data              (lines 61 to 74)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study regarding
+               dental research in monomolecular adsorption.  The
+               response variable is volume, and the predictor
+               variable is pressure.
+
+Reference:     Misra, D., NIST (1978).  
+               Dental Research Monomolecular Adsorption Study.
+
+ 
+
+
+
+
+
+Data:          1 Response Variable  (y = volume)
+               1 Predictor Variable (x = pressure)
+               14 Observations
+               Lower Level of Difficulty
+               Observed Data
+
+Model:         Exponential Class
+               2 Parameters (b1 and b2)
+
+               y = b1*(1-exp[-b2*x])  +  e
+
+
+ 
+          Starting values                  Certified Values
+ 
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   500         250           2.3894212918E+02  2.7070075241E+00
+  b2 =     0.0001      0.0005      5.5015643181E-04  7.2668688436E-06
+
+Residual Sum of Squares:                    1.2455138894E-01
+Residual Standard Deviation:                1.0187876330E-01
+Degrees of Freedom:                                12
+Number of Observations:                            14
+
+
+
+
+
+
+
+
+
+
+
+
+Data:   y               x
+      10.07E0      77.6E0
+      14.73E0     114.9E0
+      17.94E0     141.1E0
+      23.93E0     190.8E0
+      29.61E0     239.9E0
+      35.18E0     289.0E0
+      40.02E0     332.8E0
+      44.82E0     378.4E0
+      50.76E0     434.8E0
+      55.05E0     477.3E0
+      61.01E0     536.8E0
+      66.40E0     593.1E0
+      75.47E0     689.1E0
+      81.78E0     760.0E0
diff --git a/NIST_STRD/Misra1b.dat b/NIST_STRD/Misra1b.dat
index 7923d40..a0da9d3 100644
--- a/NIST_STRD/Misra1b.dat
+++ b/NIST_STRD/Misra1b.dat
@@ -1,74 +1,74 @@
-NIST/ITL StRD
-Dataset Name:  Misra1b           (Misra1b.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 42)
-               Certified Values  (lines 41 to 47)
-               Data              (lines 61 to 74)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study regarding
-               dental research in monomolecular adsorption.  The
-               response variable is volume, and the predictor
-               variable is pressure.
-
-Reference:     Misra, D., NIST (1978).  
-               Dental Research Monomolecular Adsorption Study.
-
-
-
-
-
-
-
-Data:          1 Response  (y = volume)
-               1 Predictor (x = pressure)
-               14 Observations
-               Lower Level of Difficulty
-               Observed Data
-
-Model:         Miscellaneous Class
-               2 Parameters (b1 and b2)
-
-               y = b1 * (1-(1+b2*x/2)**(-2))  +  e
-
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   500         300           3.3799746163E+02  3.1643950207E+00
-  b2 =     0.0001      0.0002      3.9039091287E-04  4.2547321834E-06
- 
-Residual Sum of Squares:                    7.5464681533E-02
-Residual Standard Deviation:                7.9301471998E-02
-Degrees of Freedom:                                12
-Number of Observations:                            14
-
-
-
-
-
-
-
-
-
-
- 
- 
-Data:   y               x
-      10.07E0      77.6E0
-      14.73E0     114.9E0
-      17.94E0     141.1E0
-      23.93E0     190.8E0
-      29.61E0     239.9E0
-      35.18E0     289.0E0
-      40.02E0     332.8E0
-      44.82E0     378.4E0
-      50.76E0     434.8E0
-      55.05E0     477.3E0
-      61.01E0     536.8E0
-      66.40E0     593.1E0
-      75.47E0     689.1E0
-      81.78E0     760.0E0
+NIST/ITL StRD
+Dataset Name:  Misra1b           (Misra1b.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 42)
+               Certified Values  (lines 41 to 47)
+               Data              (lines 61 to 74)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study regarding
+               dental research in monomolecular adsorption.  The
+               response variable is volume, and the predictor
+               variable is pressure.
+
+Reference:     Misra, D., NIST (1978).  
+               Dental Research Monomolecular Adsorption Study.
+
+
+
+
+
+
+
+Data:          1 Response  (y = volume)
+               1 Predictor (x = pressure)
+               14 Observations
+               Lower Level of Difficulty
+               Observed Data
+
+Model:         Miscellaneous Class
+               2 Parameters (b1 and b2)
+
+               y = b1 * (1-(1+b2*x/2)**(-2))  +  e
+
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   500         300           3.3799746163E+02  3.1643950207E+00
+  b2 =     0.0001      0.0002      3.9039091287E-04  4.2547321834E-06
+ 
+Residual Sum of Squares:                    7.5464681533E-02
+Residual Standard Deviation:                7.9301471998E-02
+Degrees of Freedom:                                12
+Number of Observations:                            14
+
+
+
+
+
+
+
+
+
+
+ 
+ 
+Data:   y               x
+      10.07E0      77.6E0
+      14.73E0     114.9E0
+      17.94E0     141.1E0
+      23.93E0     190.8E0
+      29.61E0     239.9E0
+      35.18E0     289.0E0
+      40.02E0     332.8E0
+      44.82E0     378.4E0
+      50.76E0     434.8E0
+      55.05E0     477.3E0
+      61.01E0     536.8E0
+      66.40E0     593.1E0
+      75.47E0     689.1E0
+      81.78E0     760.0E0
diff --git a/NIST_STRD/Misra1c.dat b/NIST_STRD/Misra1c.dat
index d86bc82..64681d3 100644
--- a/NIST_STRD/Misra1c.dat
+++ b/NIST_STRD/Misra1c.dat
@@ -1,74 +1,74 @@
-NIST/ITL StRD
-Dataset Name:  Misra1c           (Misra1c.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 42)
-               Certified Values  (lines 41 to 47)
-               Data              (lines 61 to 74)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study regarding
-               dental research in monomolecular adsorption.  The
-               response variable is volume, and the predictor
-               variable is pressure.
-
-Reference:     Misra, D., NIST (1978).  
-               Dental Research Monomolecular Adsorption.
-
-
-
-
-
-
-
-Data:          1 Response  (y = volume)
-               1 Predictor (x = pressure)
-               14 Observations
-               Average Level of Difficulty
-               Observed Data
-
-Model:         Miscellaneous Class
-               2 Parameters (b1 and b2)
-
-               y = b1 * (1-(1+2*b2*x)**(-.5))  +  e
-
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   500         600           6.3642725809E+02  4.6638326572E+00
-  b2 =     0.0001      0.0002      2.0813627256E-04  1.7728423155E-06
-  
-Residual Sum of Squares:                    4.0966836971E-02
-Residual Standard Deviation:                5.8428615257E-02
-Degrees of Freedom:                                12
-Number of Observations:                            14
- 
-
- 
- 
- 
- 
- 
- 
- 
-  
-  
-  
-Data:   y            x 
-      10.07E0      77.6E0
-      14.73E0     114.9E0
-      17.94E0     141.1E0
-      23.93E0     190.8E0
-      29.61E0     239.9E0
-      35.18E0     289.0E0
-      40.02E0     332.8E0
-      44.82E0     378.4E0
-      50.76E0     434.8E0
-      55.05E0     477.3E0
-      61.01E0     536.8E0
-      66.40E0     593.1E0
-      75.47E0     689.1E0
-      81.78E0     760.0E0
+NIST/ITL StRD
+Dataset Name:  Misra1c           (Misra1c.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 42)
+               Certified Values  (lines 41 to 47)
+               Data              (lines 61 to 74)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study regarding
+               dental research in monomolecular adsorption.  The
+               response variable is volume, and the predictor
+               variable is pressure.
+
+Reference:     Misra, D., NIST (1978).  
+               Dental Research Monomolecular Adsorption.
+
+
+
+
+
+
+
+Data:          1 Response  (y = volume)
+               1 Predictor (x = pressure)
+               14 Observations
+               Average Level of Difficulty
+               Observed Data
+
+Model:         Miscellaneous Class
+               2 Parameters (b1 and b2)
+
+               y = b1 * (1-(1+2*b2*x)**(-.5))  +  e
+
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   500         600           6.3642725809E+02  4.6638326572E+00
+  b2 =     0.0001      0.0002      2.0813627256E-04  1.7728423155E-06
+  
+Residual Sum of Squares:                    4.0966836971E-02
+Residual Standard Deviation:                5.8428615257E-02
+Degrees of Freedom:                                12
+Number of Observations:                            14
+ 
+
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+  
+  
+  
+Data:   y            x 
+      10.07E0      77.6E0
+      14.73E0     114.9E0
+      17.94E0     141.1E0
+      23.93E0     190.8E0
+      29.61E0     239.9E0
+      35.18E0     289.0E0
+      40.02E0     332.8E0
+      44.82E0     378.4E0
+      50.76E0     434.8E0
+      55.05E0     477.3E0
+      61.01E0     536.8E0
+      66.40E0     593.1E0
+      75.47E0     689.1E0
+      81.78E0     760.0E0
diff --git a/NIST_STRD/Misra1d.dat b/NIST_STRD/Misra1d.dat
index 237de46..fcf12d3 100644
--- a/NIST_STRD/Misra1d.dat
+++ b/NIST_STRD/Misra1d.dat
@@ -1,74 +1,74 @@
-NIST/ITL StRD
-Dataset Name:  Misra1d           (Misra1d.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 42)
-               Certified Values  (lines 41 to 47)
-               Data              (lines 61 to 74)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study regarding
-               dental research in monomolecular adsorption.  The
-               response variable is volume, and the predictor
-               variable is pressure.
-
-Reference:     Misra, D., NIST (1978).  
-               Dental Research Monomolecular Adsorption Study.
-
-
-
-
-
-
-
-Data:          1 Response  (y = volume)
-               1 Predictor (x = pressure)
-               14 Observations
-               Average Level of Difficulty
-               Observed Data
-
-Model:         Miscellaneous Class
-               2 Parameters (b1 and b2)
-
-               y = b1*b2*x*((1+b2*x)**(-1))  +  e
-
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   500         450           4.3736970754E+02  3.6489174345E+00
-  b2 =     0.0001      0.0003      3.0227324449E-04  2.9334354479E-06
-
-Residual Sum of Squares:                    5.6419295283E-02
-Residual Standard Deviation:                6.8568272111E-02
-Degrees of Freedom:                                12
-Number of Observations:                            14
-
-
-
-
-
-
-
-
-
-
-
-
-Data:   y            x
-      10.07E0      77.6E0
-      14.73E0     114.9E0
-      17.94E0     141.1E0
-      23.93E0     190.8E0
-      29.61E0     239.9E0
-      35.18E0     289.0E0
-      40.02E0     332.8E0
-      44.82E0     378.4E0
-      50.76E0     434.8E0
-      55.05E0     477.3E0
-      61.01E0     536.8E0
-      66.40E0     593.1E0
-      75.47E0     689.1E0
-      81.78E0     760.0E0
+NIST/ITL StRD
+Dataset Name:  Misra1d           (Misra1d.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 42)
+               Certified Values  (lines 41 to 47)
+               Data              (lines 61 to 74)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study regarding
+               dental research in monomolecular adsorption.  The
+               response variable is volume, and the predictor
+               variable is pressure.
+
+Reference:     Misra, D., NIST (1978).  
+               Dental Research Monomolecular Adsorption Study.
+
+
+
+
+
+
+
+Data:          1 Response  (y = volume)
+               1 Predictor (x = pressure)
+               14 Observations
+               Average Level of Difficulty
+               Observed Data
+
+Model:         Miscellaneous Class
+               2 Parameters (b1 and b2)
+
+               y = b1*b2*x*((1+b2*x)**(-1))  +  e
+
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   500         450           4.3736970754E+02  3.6489174345E+00
+  b2 =     0.0001      0.0003      3.0227324449E-04  2.9334354479E-06
+
+Residual Sum of Squares:                    5.6419295283E-02
+Residual Standard Deviation:                6.8568272111E-02
+Degrees of Freedom:                                12
+Number of Observations:                            14
+
+
+
+
+
+
+
+
+
+
+
+
+Data:   y            x
+      10.07E0      77.6E0
+      14.73E0     114.9E0
+      17.94E0     141.1E0
+      23.93E0     190.8E0
+      29.61E0     239.9E0
+      35.18E0     289.0E0
+      40.02E0     332.8E0
+      44.82E0     378.4E0
+      50.76E0     434.8E0
+      55.05E0     477.3E0
+      61.01E0     536.8E0
+      66.40E0     593.1E0
+      75.47E0     689.1E0
+      81.78E0     760.0E0
diff --git a/NIST_STRD/Nelson.dat b/NIST_STRD/Nelson.dat
index 5ce1003..a6dc9e2 100644
--- a/NIST_STRD/Nelson.dat
+++ b/NIST_STRD/Nelson.dat
@@ -1,188 +1,188 @@
-NIST/ITL StRD
-Dataset Name:  Nelson            (Nelson.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 43)
-               Certified Values  (lines 41 to 48)
-               Data              (lines 61 to 188)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a study involving
-               the analysis of performance degradation data from
-               accelerated tests, published in IEEE Transactions
-               on Reliability.  The response variable is dialectric
-               breakdown strength in kilo-volts, and the predictor
-               variables are time in weeks and temperature in degrees
-               Celcius.
-
-
-Reference:     Nelson, W. (1981).  
-               Analysis of Performance-Degradation Data.  
-               IEEE Transactions on Reliability.
-               Vol. 2, R-30, No. 2, pp. 149-155.
-
-Data:          1 Response   ( y = dialectric breakdown strength) 
-               2 Predictors (x1 = time; x2 = temperature)
-               128 Observations
-               Average Level of Difficulty
-               Observed Data
-
-Model:         Exponential Class
-               3 Parameters (b1 to b3)
-
-               log[y] = b1 - b2*x1 * exp[-b3*x2]  +  e
-
-
-
-          Starting values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =    2           2.5          2.5906836021E+00  1.9149996413E-02
-  b2 =    0.0001      0.000000005  5.6177717026E-09  6.1124096540E-09
-  b3 =   -0.01       -0.05        -5.7701013174E-02  3.9572366543E-03
-
-Residual Sum of Squares:                    3.7976833176E+00
-Residual Standard Deviation:                1.7430280130E-01
-Degrees of Freedom:                               125
-Number of Observations:                           128
-
-
-
-
-
-
-
-
-
-
-
-Data:   y              x1            x2
-      15.00E0         1E0         180E0
-      17.00E0         1E0         180E0
-      15.50E0         1E0         180E0
-      16.50E0         1E0         180E0
-      15.50E0         1E0         225E0
-      15.00E0         1E0         225E0
-      16.00E0         1E0         225E0
-      14.50E0         1E0         225E0
-      15.00E0         1E0         250E0
-      14.50E0         1E0         250E0
-      12.50E0         1E0         250E0
-      11.00E0         1E0         250E0
-      14.00E0         1E0         275E0
-      13.00E0         1E0         275E0
-      14.00E0         1E0         275E0
-      11.50E0         1E0         275E0
-      14.00E0         2E0         180E0
-      16.00E0         2E0         180E0
-      13.00E0         2E0         180E0
-      13.50E0         2E0         180E0
-      13.00E0         2E0         225E0
-      13.50E0         2E0         225E0
-      12.50E0         2E0         225E0
-      12.50E0         2E0         225E0
-      12.50E0         2E0         250E0
-      12.00E0         2E0         250E0
-      11.50E0         2E0         250E0
-      12.00E0         2E0         250E0
-      13.00E0         2E0         275E0
-      11.50E0         2E0         275E0
-      13.00E0         2E0         275E0
-      12.50E0         2E0         275E0
-      13.50E0         4E0         180E0
-      17.50E0         4E0         180E0
-      17.50E0         4E0         180E0
-      13.50E0         4E0         180E0
-      12.50E0         4E0         225E0
-      12.50E0         4E0         225E0
-      15.00E0         4E0         225E0
-      13.00E0         4E0         225E0
-      12.00E0         4E0         250E0
-      13.00E0         4E0         250E0
-      12.00E0         4E0         250E0
-      13.50E0         4E0         250E0
-      10.00E0         4E0         275E0
-      11.50E0         4E0         275E0
-      11.00E0         4E0         275E0
-       9.50E0         4E0         275E0
-      15.00E0         8E0         180E0
-      15.00E0         8E0         180E0
-      15.50E0         8E0         180E0
-      16.00E0         8E0         180E0
-      13.00E0         8E0         225E0
-      10.50E0         8E0         225E0
-      13.50E0         8E0         225E0
-      14.00E0         8E0         225E0
-      12.50E0         8E0         250E0
-      12.00E0         8E0         250E0
-      11.50E0         8E0         250E0
-      11.50E0         8E0         250E0
-       6.50E0         8E0         275E0
-       5.50E0         8E0         275E0
-       6.00E0         8E0         275E0
-       6.00E0         8E0         275E0
-      18.50E0        16E0         180E0
-      17.00E0        16E0         180E0
-      15.30E0        16E0         180E0
-      16.00E0        16E0         180E0
-      13.00E0        16E0         225E0
-      14.00E0        16E0         225E0
-      12.50E0        16E0         225E0
-      11.00E0        16E0         225E0
-      12.00E0        16E0         250E0
-      12.00E0        16E0         250E0
-      11.50E0        16E0         250E0
-      12.00E0        16E0         250E0
-       6.00E0        16E0         275E0
-       6.00E0        16E0         275E0
-       5.00E0        16E0         275E0
-       5.50E0        16E0         275E0
-      12.50E0        32E0         180E0
-      13.00E0        32E0         180E0
-      16.00E0        32E0         180E0
-      12.00E0        32E0         180E0
-      11.00E0        32E0         225E0
-       9.50E0        32E0         225E0
-      11.00E0        32E0         225E0
-      11.00E0        32E0         225E0
-      11.00E0        32E0         250E0
-      10.00E0        32E0         250E0
-      10.50E0        32E0         250E0
-      10.50E0        32E0         250E0
-       2.70E0        32E0         275E0
-       2.70E0        32E0         275E0
-       2.50E0        32E0         275E0
-       2.40E0        32E0         275E0
-      13.00E0        48E0         180E0
-      13.50E0        48E0         180E0
-      16.50E0        48E0         180E0
-      13.60E0        48E0         180E0
-      11.50E0        48E0         225E0
-      10.50E0        48E0         225E0
-      13.50E0        48E0         225E0
-      12.00E0        48E0         225E0
-       7.00E0        48E0         250E0
-       6.90E0        48E0         250E0
-       8.80E0        48E0         250E0
-       7.90E0        48E0         250E0
-       1.20E0        48E0         275E0
-       1.50E0        48E0         275E0
-       1.00E0        48E0         275E0
-       1.50E0        48E0         275E0
-      13.00E0        64E0         180E0
-      12.50E0        64E0         180E0
-      16.50E0        64E0         180E0
-      16.00E0        64E0         180E0
-      11.00E0        64E0         225E0
-      11.50E0        64E0         225E0
-      10.50E0        64E0         225E0
-      10.00E0        64E0         225E0
-       7.27E0        64E0         250E0
-       7.50E0        64E0         250E0
-       6.70E0        64E0         250E0
-       7.60E0        64E0         250E0
-       1.50E0        64E0         275E0
-       1.00E0        64E0         275E0
-       1.20E0        64E0         275E0
-       1.20E0        64E0         275E0
+NIST/ITL StRD
+Dataset Name:  Nelson            (Nelson.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 43)
+               Certified Values  (lines 41 to 48)
+               Data              (lines 61 to 188)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a study involving
+               the analysis of performance degradation data from
+               accelerated tests, published in IEEE Transactions
+               on Reliability.  The response variable is dialectric
+               breakdown strength in kilo-volts, and the predictor
+               variables are time in weeks and temperature in degrees
+               Celcius.
+
+
+Reference:     Nelson, W. (1981).  
+               Analysis of Performance-Degradation Data.  
+               IEEE Transactions on Reliability.
+               Vol. 2, R-30, No. 2, pp. 149-155.
+
+Data:          1 Response   ( y = dialectric breakdown strength) 
+               2 Predictors (x1 = time; x2 = temperature)
+               128 Observations
+               Average Level of Difficulty
+               Observed Data
+
+Model:         Exponential Class
+               3 Parameters (b1 to b3)
+
+               log[y] = b1 - b2*x1 * exp[-b3*x2]  +  e
+
+
+
+          Starting values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =    2           2.5          2.5906836021E+00  1.9149996413E-02
+  b2 =    0.0001      0.000000005  5.6177717026E-09  6.1124096540E-09
+  b3 =   -0.01       -0.05        -5.7701013174E-02  3.9572366543E-03
+
+Residual Sum of Squares:                    3.7976833176E+00
+Residual Standard Deviation:                1.7430280130E-01
+Degrees of Freedom:                               125
+Number of Observations:                           128
+
+
+
+
+
+
+
+
+
+
+
+Data:   y              x1            x2
+      15.00E0         1E0         180E0
+      17.00E0         1E0         180E0
+      15.50E0         1E0         180E0
+      16.50E0         1E0         180E0
+      15.50E0         1E0         225E0
+      15.00E0         1E0         225E0
+      16.00E0         1E0         225E0
+      14.50E0         1E0         225E0
+      15.00E0         1E0         250E0
+      14.50E0         1E0         250E0
+      12.50E0         1E0         250E0
+      11.00E0         1E0         250E0
+      14.00E0         1E0         275E0
+      13.00E0         1E0         275E0
+      14.00E0         1E0         275E0
+      11.50E0         1E0         275E0
+      14.00E0         2E0         180E0
+      16.00E0         2E0         180E0
+      13.00E0         2E0         180E0
+      13.50E0         2E0         180E0
+      13.00E0         2E0         225E0
+      13.50E0         2E0         225E0
+      12.50E0         2E0         225E0
+      12.50E0         2E0         225E0
+      12.50E0         2E0         250E0
+      12.00E0         2E0         250E0
+      11.50E0         2E0         250E0
+      12.00E0         2E0         250E0
+      13.00E0         2E0         275E0
+      11.50E0         2E0         275E0
+      13.00E0         2E0         275E0
+      12.50E0         2E0         275E0
+      13.50E0         4E0         180E0
+      17.50E0         4E0         180E0
+      17.50E0         4E0         180E0
+      13.50E0         4E0         180E0
+      12.50E0         4E0         225E0
+      12.50E0         4E0         225E0
+      15.00E0         4E0         225E0
+      13.00E0         4E0         225E0
+      12.00E0         4E0         250E0
+      13.00E0         4E0         250E0
+      12.00E0         4E0         250E0
+      13.50E0         4E0         250E0
+      10.00E0         4E0         275E0
+      11.50E0         4E0         275E0
+      11.00E0         4E0         275E0
+       9.50E0         4E0         275E0
+      15.00E0         8E0         180E0
+      15.00E0         8E0         180E0
+      15.50E0         8E0         180E0
+      16.00E0         8E0         180E0
+      13.00E0         8E0         225E0
+      10.50E0         8E0         225E0
+      13.50E0         8E0         225E0
+      14.00E0         8E0         225E0
+      12.50E0         8E0         250E0
+      12.00E0         8E0         250E0
+      11.50E0         8E0         250E0
+      11.50E0         8E0         250E0
+       6.50E0         8E0         275E0
+       5.50E0         8E0         275E0
+       6.00E0         8E0         275E0
+       6.00E0         8E0         275E0
+      18.50E0        16E0         180E0
+      17.00E0        16E0         180E0
+      15.30E0        16E0         180E0
+      16.00E0        16E0         180E0
+      13.00E0        16E0         225E0
+      14.00E0        16E0         225E0
+      12.50E0        16E0         225E0
+      11.00E0        16E0         225E0
+      12.00E0        16E0         250E0
+      12.00E0        16E0         250E0
+      11.50E0        16E0         250E0
+      12.00E0        16E0         250E0
+       6.00E0        16E0         275E0
+       6.00E0        16E0         275E0
+       5.00E0        16E0         275E0
+       5.50E0        16E0         275E0
+      12.50E0        32E0         180E0
+      13.00E0        32E0         180E0
+      16.00E0        32E0         180E0
+      12.00E0        32E0         180E0
+      11.00E0        32E0         225E0
+       9.50E0        32E0         225E0
+      11.00E0        32E0         225E0
+      11.00E0        32E0         225E0
+      11.00E0        32E0         250E0
+      10.00E0        32E0         250E0
+      10.50E0        32E0         250E0
+      10.50E0        32E0         250E0
+       2.70E0        32E0         275E0
+       2.70E0        32E0         275E0
+       2.50E0        32E0         275E0
+       2.40E0        32E0         275E0
+      13.00E0        48E0         180E0
+      13.50E0        48E0         180E0
+      16.50E0        48E0         180E0
+      13.60E0        48E0         180E0
+      11.50E0        48E0         225E0
+      10.50E0        48E0         225E0
+      13.50E0        48E0         225E0
+      12.00E0        48E0         225E0
+       7.00E0        48E0         250E0
+       6.90E0        48E0         250E0
+       8.80E0        48E0         250E0
+       7.90E0        48E0         250E0
+       1.20E0        48E0         275E0
+       1.50E0        48E0         275E0
+       1.00E0        48E0         275E0
+       1.50E0        48E0         275E0
+      13.00E0        64E0         180E0
+      12.50E0        64E0         180E0
+      16.50E0        64E0         180E0
+      16.00E0        64E0         180E0
+      11.00E0        64E0         225E0
+      11.50E0        64E0         225E0
+      10.50E0        64E0         225E0
+      10.00E0        64E0         225E0
+       7.27E0        64E0         250E0
+       7.50E0        64E0         250E0
+       6.70E0        64E0         250E0
+       7.60E0        64E0         250E0
+       1.50E0        64E0         275E0
+       1.00E0        64E0         275E0
+       1.20E0        64E0         275E0
+       1.20E0        64E0         275E0
diff --git a/NIST_STRD/Rat42.dat b/NIST_STRD/Rat42.dat
index e112fbb..5468df8 100644
--- a/NIST_STRD/Rat42.dat
+++ b/NIST_STRD/Rat42.dat
@@ -1,69 +1,69 @@
-NIST/ITL StRD
-Dataset Name:  Rat42             (Rat42.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 43)
-               Certified Values  (lines 41 to 48)
-               Data              (lines 61 to 69)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   This model and data are an example of fitting
-               sigmoidal growth curves taken from Ratkowsky (1983).
-               The response variable is pasture yield, and the
-               predictor variable is growing time.
-
-
-Reference:     Ratkowsky, D.A. (1983).  
-               Nonlinear Regression Modeling.
-               New York, NY:  Marcel Dekker, pp. 61 and 88.
-
-
-
-
-
-Data:          1 Response  (y = pasture yield)
-               1 Predictor (x = growing time)
-               9 Observations
-               Higher Level of Difficulty
-               Observed Data
-
-Model:         Exponential Class
-               3 Parameters (b1 to b3)
-
-               y = b1 / (1+exp[b2-b3*x])  +  e
-
-
-
-          Starting Values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   100         75            7.2462237576E+01  1.7340283401E+00
-  b2 =     1          2.5          2.6180768402E+00  8.8295217536E-02
-  b3 =     0.1        0.07         6.7359200066E-02  3.4465663377E-03
-
-Residual Sum of Squares:                    8.0565229338E+00
-Residual Standard Deviation:                1.1587725499E+00
-Degrees of Freedom:                                6
-Number of Observations:                            9 
-
-
-
-
-
-
-
-
-
-
-
-Data:   y              x
-       8.930E0        9.000E0
-      10.800E0       14.000E0
-      18.590E0       21.000E0
-      22.330E0       28.000E0
-      39.350E0       42.000E0
-      56.110E0       57.000E0
-      61.730E0       63.000E0
-      64.620E0       70.000E0
-      67.080E0       79.000E0
+NIST/ITL StRD
+Dataset Name:  Rat42             (Rat42.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 43)
+               Certified Values  (lines 41 to 48)
+               Data              (lines 61 to 69)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   This model and data are an example of fitting
+               sigmoidal growth curves taken from Ratkowsky (1983).
+               The response variable is pasture yield, and the
+               predictor variable is growing time.
+
+
+Reference:     Ratkowsky, D.A. (1983).  
+               Nonlinear Regression Modeling.
+               New York, NY:  Marcel Dekker, pp. 61 and 88.
+
+
+
+
+
+Data:          1 Response  (y = pasture yield)
+               1 Predictor (x = growing time)
+               9 Observations
+               Higher Level of Difficulty
+               Observed Data
+
+Model:         Exponential Class
+               3 Parameters (b1 to b3)
+
+               y = b1 / (1+exp[b2-b3*x])  +  e
+
+
+
+          Starting Values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   100         75            7.2462237576E+01  1.7340283401E+00
+  b2 =     1          2.5          2.6180768402E+00  8.8295217536E-02
+  b3 =     0.1        0.07         6.7359200066E-02  3.4465663377E-03
+
+Residual Sum of Squares:                    8.0565229338E+00
+Residual Standard Deviation:                1.1587725499E+00
+Degrees of Freedom:                                6
+Number of Observations:                            9 
+
+
+
+
+
+
+
+
+
+
+
+Data:   y              x
+       8.930E0        9.000E0
+      10.800E0       14.000E0
+      18.590E0       21.000E0
+      22.330E0       28.000E0
+      39.350E0       42.000E0
+      56.110E0       57.000E0
+      61.730E0       63.000E0
+      64.620E0       70.000E0
+      67.080E0       79.000E0
diff --git a/NIST_STRD/Rat43.dat b/NIST_STRD/Rat43.dat
index 347d846..ca6d1dc 100644
--- a/NIST_STRD/Rat43.dat
+++ b/NIST_STRD/Rat43.dat
@@ -1,75 +1,75 @@
-NIST/ITL StRD
-Dataset Name:  Rat43             (Rat43.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 44)
-               Certified Values  (lines 41 to 49)
-               Data              (lines 61 to 75)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   This model and data are an example of fitting  
-               sigmoidal growth curves taken from Ratkowsky (1983).  
-               The response variable is the dry weight of onion bulbs 
-               and tops, and the predictor variable is growing time. 
-
-
-Reference:     Ratkowsky, D.A. (1983).  
-               Nonlinear Regression Modeling.
-               New York, NY:  Marcel Dekker, pp. 62 and 88.
-
-
-
-
-
-Data:          1 Response  (y = onion bulb dry weight)
-               1 Predictor (x = growing time)
-               15 Observations
-               Higher Level of Difficulty
-               Observed Data
-
-Model:         Exponential Class
-               4 Parameters (b1 to b4)
-
-               y = b1 / ((1+exp[b2-b3*x])**(1/b4))  +  e
-
-
-
-          Starting Values                  Certified Values
- 
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   100         700           6.9964151270E+02  1.6302297817E+01
-  b2 =    10           5           5.2771253025E+00  2.0828735829E+00
-  b3 =     1           0.75        7.5962938329E-01  1.9566123451E-01
-  b4 =     1           1.3         1.2792483859E+00  6.8761936385E-01
- 
-Residual Sum of Squares:                    8.7864049080E+03
-Residual Standard Deviation:                2.8262414662E+01
-Degrees of Freedom:                                9
-Number of Observations:                           15 
- 
- 
- 
- 
- 
- 
- 
- 
- 
- 
-Data:   y          x
-      16.08E0     1.0E0
-      33.83E0     2.0E0
-      65.80E0     3.0E0
-      97.20E0     4.0E0
-     191.55E0     5.0E0
-     326.20E0     6.0E0
-     386.87E0     7.0E0
-     520.53E0     8.0E0
-     590.03E0     9.0E0
-     651.92E0    10.0E0
-     724.93E0    11.0E0
-     699.56E0    12.0E0
-     689.96E0    13.0E0
-     637.56E0    14.0E0
-     717.41E0    15.0E0
+NIST/ITL StRD
+Dataset Name:  Rat43             (Rat43.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 44)
+               Certified Values  (lines 41 to 49)
+               Data              (lines 61 to 75)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   This model and data are an example of fitting  
+               sigmoidal growth curves taken from Ratkowsky (1983).  
+               The response variable is the dry weight of onion bulbs 
+               and tops, and the predictor variable is growing time. 
+
+
+Reference:     Ratkowsky, D.A. (1983).  
+               Nonlinear Regression Modeling.
+               New York, NY:  Marcel Dekker, pp. 62 and 88.
+
+
+
+
+
+Data:          1 Response  (y = onion bulb dry weight)
+               1 Predictor (x = growing time)
+               15 Observations
+               Higher Level of Difficulty
+               Observed Data
+
+Model:         Exponential Class
+               4 Parameters (b1 to b4)
+
+               y = b1 / ((1+exp[b2-b3*x])**(1/b4))  +  e
+
+
+
+          Starting Values                  Certified Values
+ 
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   100         700           6.9964151270E+02  1.6302297817E+01
+  b2 =    10           5           5.2771253025E+00  2.0828735829E+00
+  b3 =     1           0.75        7.5962938329E-01  1.9566123451E-01
+  b4 =     1           1.3         1.2792483859E+00  6.8761936385E-01
+ 
+Residual Sum of Squares:                    8.7864049080E+03
+Residual Standard Deviation:                2.8262414662E+01
+Degrees of Freedom:                                9
+Number of Observations:                           15 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+ 
+Data:   y          x
+      16.08E0     1.0E0
+      33.83E0     2.0E0
+      65.80E0     3.0E0
+      97.20E0     4.0E0
+     191.55E0     5.0E0
+     326.20E0     6.0E0
+     386.87E0     7.0E0
+     520.53E0     8.0E0
+     590.03E0     9.0E0
+     651.92E0    10.0E0
+     724.93E0    11.0E0
+     699.56E0    12.0E0
+     689.96E0    13.0E0
+     637.56E0    14.0E0
+     717.41E0    15.0E0
diff --git a/NIST_STRD/Roszman1.dat b/NIST_STRD/Roszman1.dat
index 0296837..ddab210 100644
--- a/NIST_STRD/Roszman1.dat
+++ b/NIST_STRD/Roszman1.dat
@@ -1,85 +1,85 @@
-NIST/ITL StRD
-Dataset Name:  Roszman1          (Roszman1.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 44)
-               Certified Values  (lines 41 to 49)
-               Data              (lines 61 to 85)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study involving
-               quantum defects in iodine atoms.  The response
-               variable is the number of quantum defects, and the
-               predictor variable is the excited energy state.
-               The argument to the ARCTAN function is in radians.
-
-Reference:     Roszman, L., NIST (19??).  
-               Quantum Defects for Sulfur I Atom.
-
-
-
-
-
-
-Data:          1 Response  (y = quantum defect)
-               1 Predictor (x = excited state energy)
-               25 Observations
-               Average Level of Difficulty
-               Observed Data
-
-Model:         Miscellaneous Class
-               4 Parameters (b1 to b4)
-
-               pi = 3.141592653589793238462643383279E0
-               y =  b1 - b2*x - arctan[b3/(x-b4)]/pi  +  e
-
-
-          Starting Values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =      0.1         0.2         2.0196866396E-01  1.9172666023E-02
-  b2 =     -0.00001    -0.000005   -6.1953516256E-06  3.2058931691E-06
-  b3 =   1000        1200           1.2044556708E+03  7.4050983057E+01
-  b4 =   -100        -150          -1.8134269537E+02  4.9573513849E+01
-
-Residual Sum of Squares:                    4.9484847331E-04
-Residual Standard Deviation:                4.8542984060E-03
-Degrees of Freedom:                                 21
-Number of Observations:                             25
-
-
-
-
-
-
-
-
-
-
-Data:   y           x
-       0.252429    -4868.68
-       0.252141    -4868.09
-       0.251809    -4867.41
-       0.297989    -3375.19
-       0.296257    -3373.14
-       0.295319    -3372.03
-       0.339603    -2473.74
-       0.337731    -2472.35
-       0.333820    -2469.45
-       0.389510    -1894.65
-       0.386998    -1893.40
-       0.438864    -1497.24
-       0.434887    -1495.85
-       0.427893    -1493.41
-       0.471568    -1208.68
-       0.461699    -1206.18
-       0.461144    -1206.04
-       0.513532     -997.92
-       0.506641     -996.61
-       0.505062     -996.31
-       0.535648     -834.94
-       0.533726     -834.66
-       0.568064     -710.03
-       0.612886     -530.16
-       0.624169     -464.17
+NIST/ITL StRD
+Dataset Name:  Roszman1          (Roszman1.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 44)
+               Certified Values  (lines 41 to 49)
+               Data              (lines 61 to 85)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study involving
+               quantum defects in iodine atoms.  The response
+               variable is the number of quantum defects, and the
+               predictor variable is the excited energy state.
+               The argument to the ARCTAN function is in radians.
+
+Reference:     Roszman, L., NIST (19??).  
+               Quantum Defects for Sulfur I Atom.
+
+
+
+
+
+
+Data:          1 Response  (y = quantum defect)
+               1 Predictor (x = excited state energy)
+               25 Observations
+               Average Level of Difficulty
+               Observed Data
+
+Model:         Miscellaneous Class
+               4 Parameters (b1 to b4)
+
+               pi = 3.141592653589793238462643383279E0
+               y =  b1 - b2*x - arctan[b3/(x-b4)]/pi  +  e
+
+
+          Starting Values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =      0.1         0.2         2.0196866396E-01  1.9172666023E-02
+  b2 =     -0.00001    -0.000005   -6.1953516256E-06  3.2058931691E-06
+  b3 =   1000        1200           1.2044556708E+03  7.4050983057E+01
+  b4 =   -100        -150          -1.8134269537E+02  4.9573513849E+01
+
+Residual Sum of Squares:                    4.9484847331E-04
+Residual Standard Deviation:                4.8542984060E-03
+Degrees of Freedom:                                 21
+Number of Observations:                             25
+
+
+
+
+
+
+
+
+
+
+Data:   y           x
+       0.252429    -4868.68
+       0.252141    -4868.09
+       0.251809    -4867.41
+       0.297989    -3375.19
+       0.296257    -3373.14
+       0.295319    -3372.03
+       0.339603    -2473.74
+       0.337731    -2472.35
+       0.333820    -2469.45
+       0.389510    -1894.65
+       0.386998    -1893.40
+       0.438864    -1497.24
+       0.434887    -1495.85
+       0.427893    -1493.41
+       0.471568    -1208.68
+       0.461699    -1206.18
+       0.461144    -1206.04
+       0.513532     -997.92
+       0.506641     -996.61
+       0.505062     -996.31
+       0.535648     -834.94
+       0.533726     -834.66
+       0.568064     -710.03
+       0.612886     -530.16
+       0.624169     -464.17
diff --git a/NIST_STRD/Thurber.dat b/NIST_STRD/Thurber.dat
index 6d72fd9..6ecdc77 100644
--- a/NIST_STRD/Thurber.dat
+++ b/NIST_STRD/Thurber.dat
@@ -1,97 +1,97 @@
-NIST/ITL StRD
-Dataset Name:  Thurber           (Thurber.dat)
-
-File Format:   ASCII
-               Starting Values   (lines 41 to 47)
-               Certified Values  (lines 41 to 52)
-               Data              (lines 61 to 97)
-
-Procedure:     Nonlinear Least Squares Regression
-
-Description:   These data are the result of a NIST study involving
-               semiconductor electron mobility.  The response 
-               variable is a measure of electron mobility, and the 
-               predictor variable is the natural log of the density.
-
-
-Reference:     Thurber, R., NIST (197?).  
-               Semiconductor electron mobility modeling.
-
-
-
-
-
-
-Data:          1 Response Variable  (y = electron mobility)
-               1 Predictor Variable (x = log[density])
-               37 Observations
-               Higher Level of Difficulty
-               Observed Data
-
-Model:         Rational Class (cubic/cubic)
-               7 Parameters (b1 to b7)
-
-               y = (b1 + b2*x + b3*x**2 + b4*x**3) / 
-                   (1 + b5*x + b6*x**2 + b7*x**3)  +  e
-
-
-          Starting Values                  Certified Values
-
-        Start 1     Start 2           Parameter     Standard Deviation
-  b1 =   1000        1300          1.2881396800E+03  4.6647963344E+00
-  b2 =   1000        1500          1.4910792535E+03  3.9571156086E+01
-  b3 =    400         500          5.8323836877E+02  2.8698696102E+01
-  b4 =     40          75          7.5416644291E+01  5.5675370270E+00
-  b5 =      0.7         1          9.6629502864E-01  3.1333340687E-02
-  b6 =      0.3         0.4        3.9797285797E-01  1.4984928198E-02
-  b7 =      0.03        0.05       4.9727297349E-02  6.5842344623E-03
-
-Residual Sum of Squares:                    5.6427082397E+03
-Residual Standard Deviation:                1.3714600784E+01
-Degrees of Freedom:                                30
-Number of Observations:                            37
-
-
-
-
-
-
-
-Data:   y             x
-      80.574E0      -3.067E0
-      84.248E0      -2.981E0
-      87.264E0      -2.921E0
-      87.195E0      -2.912E0
-      89.076E0      -2.840E0
-      89.608E0      -2.797E0
-      89.868E0      -2.702E0
-      90.101E0      -2.699E0
-      92.405E0      -2.633E0
-      95.854E0      -2.481E0
-     100.696E0      -2.363E0
-     101.060E0      -2.322E0
-     401.672E0      -1.501E0
-     390.724E0      -1.460E0
-     567.534E0      -1.274E0
-     635.316E0      -1.212E0
-     733.054E0      -1.100E0
-     759.087E0      -1.046E0
-     894.206E0      -0.915E0
-     990.785E0      -0.714E0
-    1090.109E0      -0.566E0
-    1080.914E0      -0.545E0
-    1122.643E0      -0.400E0
-    1178.351E0      -0.309E0
-    1260.531E0      -0.109E0
-    1273.514E0      -0.103E0
-    1288.339E0       0.010E0
-    1327.543E0       0.119E0
-    1353.863E0       0.377E0
-    1414.509E0       0.790E0
-    1425.208E0       0.963E0
-    1421.384E0       1.006E0
-    1442.962E0       1.115E0
-    1464.350E0       1.572E0
-    1468.705E0       1.841E0
-    1447.894E0       2.047E0
-    1457.628E0       2.200E0
+NIST/ITL StRD
+Dataset Name:  Thurber           (Thurber.dat)
+
+File Format:   ASCII
+               Starting Values   (lines 41 to 47)
+               Certified Values  (lines 41 to 52)
+               Data              (lines 61 to 97)
+
+Procedure:     Nonlinear Least Squares Regression
+
+Description:   These data are the result of a NIST study involving
+               semiconductor electron mobility.  The response 
+               variable is a measure of electron mobility, and the 
+               predictor variable is the natural log of the density.
+
+
+Reference:     Thurber, R., NIST (197?).  
+               Semiconductor electron mobility modeling.
+
+
+
+
+
+
+Data:          1 Response Variable  (y = electron mobility)
+               1 Predictor Variable (x = log[density])
+               37 Observations
+               Higher Level of Difficulty
+               Observed Data
+
+Model:         Rational Class (cubic/cubic)
+               7 Parameters (b1 to b7)
+
+               y = (b1 + b2*x + b3*x**2 + b4*x**3) / 
+                   (1 + b5*x + b6*x**2 + b7*x**3)  +  e
+
+
+          Starting Values                  Certified Values
+
+        Start 1     Start 2           Parameter     Standard Deviation
+  b1 =   1000        1300          1.2881396800E+03  4.6647963344E+00
+  b2 =   1000        1500          1.4910792535E+03  3.9571156086E+01
+  b3 =    400         500          5.8323836877E+02  2.8698696102E+01
+  b4 =     40          75          7.5416644291E+01  5.5675370270E+00
+  b5 =      0.7         1          9.6629502864E-01  3.1333340687E-02
+  b6 =      0.3         0.4        3.9797285797E-01  1.4984928198E-02
+  b7 =      0.03        0.05       4.9727297349E-02  6.5842344623E-03
+
+Residual Sum of Squares:                    5.6427082397E+03
+Residual Standard Deviation:                1.3714600784E+01
+Degrees of Freedom:                                30
+Number of Observations:                            37
+
+
+
+
+
+
+
+Data:   y             x
+      80.574E0      -3.067E0
+      84.248E0      -2.981E0
+      87.264E0      -2.921E0
+      87.195E0      -2.912E0
+      89.076E0      -2.840E0
+      89.608E0      -2.797E0
+      89.868E0      -2.702E0
+      90.101E0      -2.699E0
+      92.405E0      -2.633E0
+      95.854E0      -2.481E0
+     100.696E0      -2.363E0
+     101.060E0      -2.322E0
+     401.672E0      -1.501E0
+     390.724E0      -1.460E0
+     567.534E0      -1.274E0
+     635.316E0      -1.212E0
+     733.054E0      -1.100E0
+     759.087E0      -1.046E0
+     894.206E0      -0.915E0
+     990.785E0      -0.714E0
+    1090.109E0      -0.566E0
+    1080.914E0      -0.545E0
+    1122.643E0      -0.400E0
+    1178.351E0      -0.309E0
+    1260.531E0      -0.109E0
+    1273.514E0      -0.103E0
+    1288.339E0       0.010E0
+    1327.543E0       0.119E0
+    1353.863E0       0.377E0
+    1414.509E0       0.790E0
+    1425.208E0       0.963E0
+    1421.384E0       1.006E0
+    1442.962E0       1.115E0
+    1464.350E0       1.572E0
+    1468.705E0       1.841E0
+    1447.894E0       2.047E0
+    1457.628E0       2.200E0
diff --git a/PKG-INFO b/PKG-INFO
index 7c27e7d..3842e9b 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: lmfit
-Version: 0.9.2
+Version: 0.9.3
 Summary: Least-Squares Minimization with Bounds and Constraints
 Home-page: http://lmfit.github.io/lmfit-py/
 Author: LMFit Development Team
diff --git a/README b/README
deleted file mode 100644
index eacaee5..0000000
--- a/README
+++ /dev/null
@@ -1,65 +0,0 @@
-LMfit-py
-========
-
-[![build status](https://travis-ci.org/lmfit/lmfit-py.png?branch=master)](https://travis-ci.org/lmfit/lmfit-py)
-
-LMfit-py provides a Least-Squares Minimization routine and class
-with a simple, flexible approach to parameterizing a model for
-fitting to data.  Named Parameters can be held fixed or freely
-adjusted in the fit, or held between lower and upper bounds.  In
-addition, parameters can be constrained as a simple mathematical
-expression of other Parameters.
-
-To do this, the programmer defines a Parameters object, an enhanced
-dictionary, containing named parameters:
-
-    fit_params = Parameters()
-    fit_params['amp'] = Parameter(value=1.2, min=0.1, max=1000)
-    fit_params['cen'] = Parameter(value=40.0, vary=False),
-    fit_params['wid'] = Parameter(value=4, min=0)}
-
-or using the equivalent
-
-    fit_params = Parameters()
-    fit_params.add('amp', value=1.2, min=0.1, max=1000)
-    fit_params.add('cen', value=40.0, vary=False),
-    fit_params.add('wid', value=4, min=0)
-
-The programmer will also write a function to be minimized (in the
-least-squares sense) with its first argument being this Parameters object,
-and additional positional and keyword arguments as desired:
-
-    def myfunc(params, x, data, someflag=True):
-        amp = params['amp'].value
-        cen = params['cen'].value
-        wid = params['wid'].value
-        ...
-        return residual_array
-
-For each call of this function, the values for the params may have changed,
-subject to the bounds and constraint settings for each Parameter.  The function
-should return the residual (ie, data-model) array to be minimized.
-
-The advantage here is that the function to be minimized does not have to be
-changed if different bounds or constraints are placed on the fitting
-Parameters.  The fitting model (as described in myfunc) is instead written
-in terms of physical parameters of the system, and remains remains
-independent of what is actually varied in the fit.  In addition, which
-parameters are adjuested and which are fixed happens at run-time, so that
-changing what is varied and what constraints are placed on the parameters
-can easily be modified by the consumer in real-time data analysis.
-
-To perform the fit, the user calls
-
-    result = minimize(myfunc, fit_params, args=(x, data), kws={'someflag':True}, ....)
-
-After the fit, each real variable in the ``fit_params`` dictionary is updated
-to have best-fit values, estimated standard deviations, and correlations
-with other variables in the fit, while the results dictionary holds fit
-statistics and information.
-
-By default, the underlying fit algorithm is the Levenberg-Marquart
-algorithm with numerically-calculated derivatives from MINPACK's lmdif
-function, as used by scipy.optimize.leastsq.  Other solvers (currently
-Simulated Annealing and L-BFGS-B) are also available, though slightly less
-well-tested and supported.
diff --git a/THANKS.txt b/THANKS.txt
index 4436a80..b208054 100644
--- a/THANKS.txt
+++ b/THANKS.txt
@@ -1,24 +1,24 @@
-Many people have contributed to lmfit.
-
-Matthew Newville wrote the original version and maintains the project.
-Till Stensitzki wrote the improved estimates of confidence intervals, and
-    contributed many tests, bug fixes, and documentation.
-Daniel B. Allan wrote much of the high level Model code, and many
-    improvements to the testing and documentation.
-Antonino Ingargiola wrote much of the high level Model code and provided
-    many bug fixes.
-J. J. Helmus wrote the MINUT bounds for leastsq, originally in
-    leastsqbounds.py, and ported to lmfit.
-E. O. Le Bigot wrote the uncertainties package, a version of which is used
-    by lmfit.
-Michal Rawlik added plotting capabilities for Models.
-A. R. J. Nelson added differential_evolution, and greatly improved the code
-    in the docstrings.
-
-Additional patches, bug fixes, and suggestions have come from Christoph
-    Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau, nmearl,
-    Gustavo Pasquevich, Clemens Prescher, LiCode, and Ben Gamari.
-
-The lmfit code obviously depends on, and owes a very large debt to the code
-in scipy.optimize.  Several discussions on the scipy-user and lmfit mailing
-lists have also led to improvements in this code.
+Many people have contributed to lmfit.
+
+Matthew Newville wrote the original version and maintains the project.
+Till Stensitzki wrote the improved estimates of confidence intervals, and
+    contributed many tests, bug fixes, and documentation.
+Daniel B. Allan wrote much of the high level Model code, and many
+    improvements to the testing and documentation.
+Antonino Ingargiola wrote much of the high level Model code and provided
+    many bug fixes.
+J. J. Helmus wrote the MINUT bounds for leastsq, originally in
+    leastsqbounds.py, and ported to lmfit.
+E. O. Le Bigot wrote the uncertainties package, a version of which is used
+    by lmfit.
+Michal Rawlik added plotting capabilities for Models.
+A. R. J. Nelson added differential_evolution, emcee, and greatly improved the
+    code in the docstrings.
+
+Additional patches, bug fixes, and suggestions have come from Christoph
+    Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau, nmearl,
+    Gustavo Pasquevich, Clemens Prescher, LiCode, and Ben Gamari.
+
+The lmfit code obviously depends on, and owes a very large debt to the code
+in scipy.optimize.  Several discussions on the scipy-user and lmfit mailing
+lists have also led to improvements in this code.
diff --git a/doc/Makefile b/doc/Makefile
index 5add84d..1c72ec9 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -1,112 +1,112 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = _build
-INSTALLDIR = /home/newville/public_html/lmfit/
-
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest latexpdf htmlzip
-.PHONY: all install pdf
-
-html:
-	cp sphinx/ext_mathjax.py extensions.py
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "html build finished: $(BUILDDIR)/html."
-
-htmlzip: html
-	cp sphinx/ext_pngmath.py extensions.py
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/lmfit_doc
-	cd $(BUILDDIR) && zip -pur html/lmfit_doc.zip lmfit_doc
-
-epub:
-	cp sphinx/ext_pngmath.py extensions.py
-	$(SPHINXBUILD) -b epub  $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	cp -pr $(BUILDDIR)/epub/*.epub $(BUILDDIR)/html/.
-
-pdf: latex
-	cd $(BUILDDIR)/latex && make all-pdf
-	cp -pr $(BUILDDIR)/latex/lmfit.pdf $(BUILDDIR)/html/.
-
-all: html htmlzip epub pdf
-
-install: all
-	cd $(BUILDDIR)/latex && pdflatex lmfit.tex
-	cd $(BUILDDIR)/latex && makeindex -s python.ist lmfit.idx
-	cd $(BUILDDIR)/latex && pdflatex lmfit.tex
-	cp -pr $(BUILDDIR)/html/* $(INSTALLDIR)/.
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html      to make standalone HTML files"
-	@echo "  dirhtml   to make HTML files named index.html in directories"
-	@echo "  pickle    to make pickle files"
-	@echo "  json      to make JSON files"
-	@echo "  htmlhelp  to make HTML files and a HTML help project"
-	@echo "  qthelp    to make HTML files and a qthelp project"
-	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  changes   to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck to check all external links for integrity"
-	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	-rm -rf $(BUILDDIR)/*
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in _build/latex."
-	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
-	      "run these through (pdf)latex."
-
-latexpdf:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
-	@echo "Running LaTeX files through pdflatex..."
-	make -C _build/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in _build/latex."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+INSTALLDIR = /home/newville/public_html/lmfit/
+
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest latexpdf htmlzip
+.PHONY: all install pdf
+
+html:
+	cp sphinx/ext_mathjax.py extensions.py
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "html build finished: $(BUILDDIR)/html."
+
+htmlzip: html
+	cp sphinx/ext_pngmath.py extensions.py
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/lmfit_doc
+	cd $(BUILDDIR) && zip -pur html/lmfit_doc.zip lmfit_doc
+
+epub:
+	cp sphinx/ext_pngmath.py extensions.py
+	$(SPHINXBUILD) -b epub  $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	cp -pr $(BUILDDIR)/epub/*.epub $(BUILDDIR)/html/.
+
+pdf: latex
+	cd $(BUILDDIR)/latex && make all-pdf
+	cp -pr $(BUILDDIR)/latex/lmfit.pdf $(BUILDDIR)/html/.
+
+all: html htmlzip epub pdf
+
+install: all
+	cd $(BUILDDIR)/latex && pdflatex lmfit.tex
+	cd $(BUILDDIR)/latex && makeindex -s python.ist lmfit.idx
+	cd $(BUILDDIR)/latex && pdflatex lmfit.tex
+	cp -pr $(BUILDDIR)/html/* $(INSTALLDIR)/.
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  dirhtml   to make HTML files named index.html in directories"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf $(BUILDDIR)/*
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in _build/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex
+	@echo "Running LaTeX files through pdflatex..."
+	make -C _build/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in _build/latex."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/__pycache__/extensions.cpython-35.pyc b/doc/__pycache__/extensions.cpython-35.pyc
new file mode 100644
index 0000000..a9f415c
Binary files /dev/null and b/doc/__pycache__/extensions.cpython-35.pyc differ
diff --git a/doc/_images/emcee_dbl_exp.png b/doc/_images/emcee_dbl_exp.png
new file mode 100644
index 0000000..11bc045
Binary files /dev/null and b/doc/_images/emcee_dbl_exp.png differ
diff --git a/doc/_images/emcee_dbl_exp2.png b/doc/_images/emcee_dbl_exp2.png
new file mode 100644
index 0000000..b2dab72
Binary files /dev/null and b/doc/_images/emcee_dbl_exp2.png differ
diff --git a/doc/_images/emcee_triangle.png b/doc/_images/emcee_triangle.png
new file mode 100644
index 0000000..3e76a52
Binary files /dev/null and b/doc/_images/emcee_triangle.png differ
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
index 4098133..ceb1a92 100644
--- a/doc/_templates/indexsidebar.html
+++ b/doc/_templates/indexsidebar.html
@@ -1,24 +1,24 @@
-<h3>Getting LMFIT</h3>
-<p>Current version: <b>{{ release }}</b></p>
-<p>Download:   <a href="http://pypi.python.org/pypi/lmfit/">PyPI (Python.org)</a>
-<p>Install:    <tt>pip install lmfit</tt>
-<p>
-<p>Development version: <br>
-    <a href="https://github.com/lmfit/lmfit-py/">github.com</a> <br>
-
-<h3>Questions?</h3>
-
-  <a href="faq.html">Frequently Asked Questions</a><br>
-  <a href="https://groups.google.com/group/lmfit-py">Mailing List</a><br>
-  <a href="support.html">Getting Help</a><br>
-
-<h3>Off-line Documentation</h3>
-
-[<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.pdf">PDF</a>
-|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.epub">EPUB</a>
-|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit_doc.zip">HTML(zip)</a>
-]
-
-
-<hr>
-<p>
+<h3>Getting LMFIT</h3>
+<p>Current version: <b>{{ release }}</b></p>
+<p>Download:   <a href="http://pypi.python.org/pypi/lmfit/">PyPI (Python.org)</a>
+<p>Install:    <tt>pip install lmfit</tt>
+<p>
+<p>Development version: <br>
+    <a href="https://github.com/lmfit/lmfit-py/">github.com</a> <br>
+
+<h3>Questions?</h3>
+
+  <a href="faq.html">Frequently Asked Questions</a><br>
+  <a href="https://groups.google.com/group/lmfit-py">Mailing List</a><br>
+  <a href="support.html">Getting Help</a><br>
+
+<h3>Off-line Documentation</h3>
+
+[<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.pdf">PDF</a>
+|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit.epub">EPUB</a>
+|<a href="http://cars9.uchicago.edu/software/python/lmfit/lmfit_doc.zip">HTML(zip)</a>
+]
+
+
+<hr>
+<p>
diff --git a/doc/bounds.rst b/doc/bounds.rst
index f64ec6a..40f8390 100644
--- a/doc/bounds.rst
+++ b/doc/bounds.rst
@@ -1,79 +1,78 @@
-.. _bounds_chapter:
-
-=================================
-Bounds Implementation
-=================================
-
-.. _MINPACK-1: http://en.wikipedia.org/wiki/MINPACK
-.. _MINUIT: http://en.wikipedia.org/wiki/MINUIT
-.. _leastsqbound: https://github.com/jjhelmus/leastsqbound-scipy
-
-This section describes the implementation of :class:`Parameter` bounds.
-The `MINPACK-1`_ implementation used in :func:`scipy.optimize.leastsq` for
-the Levenberg-Marquardt algorithm does not explicitly support bounds on
-parameters, and expects to be able to fully explore the available range of
-values for any Parameter.  Simply placing hard constraints (that is,
-resetting the value when it exceeds the desired bounds) prevents the
-algorithm from determining the partial derivatives, and leads to unstable
-results.
-
-Instead of placing such hard constraints, bounded parameters are
-mathematically transformed using the formulation devised (and documented)
-for `MINUIT`_.  This is implemented following (and borrowing heavily from)
-the `leastsqbound`_ from J. J. Helmus.   Parameter values are mapped from
-internally used, freely variable values :math:`P_{\rm internal}` to bounded
-parameters :math:`P_{\rm bounded}`.   When both ``min`` and ``max`` bounds
-are specified, the mapping is
-
-.. math::
-   :nowrap:
-
-   \begin{eqnarray*}
-        P_{\rm internal} &=& \arcsin\big(\frac{2 (P_{\rm bounded} - {\rm min})}{({\rm max} - {\rm min})} - 1\big) \\
-	P_{\rm bounded}  &=& {\rm min} + \big(\sin(P_{\rm internal}) + 1\big) \frac{({\rm max} - {\rm min})}{2}
-    \end{eqnarray*}
-
-With only an upper limit ``max`` supplied, but ``min`` left unbounded, the
-mapping is:
-
-.. math::
-   :nowrap:
-
-   \begin{eqnarray*}
-        P_{\rm internal} &=& \sqrt{({\rm max} - P_{\rm bounded} + 1)^2 - 1} \\
-        P_{\rm bounded}  &=& {\rm max} + 1 - \sqrt{P_{\rm internal}^2 + 1}
-    \end{eqnarray*}
-
-With only a lower limit ``min`` supplied, but ``max`` left unbounded, the
-mapping is:
-
-.. math::
-   :nowrap:
-
-   \begin{eqnarray*}
-        P_{\rm internal} &=& \sqrt{(P_{\rm bounded} - {\rm min} + 1)^2 - 1} \\
-        P_{\rm bounded}  &=& {\rm min} - 1 + \sqrt{P_{\rm internal}^2 + 1}
-   \end{eqnarray*}
-
-With these mappings, the value for the bounded Parameter cannot exceed the
-specified bounds, though the internally varied value can be freely varied.
-
-It bears repeating that code from `leastsqbound`_ was adopted to implement
-the transformation described above.  The challenging part (Thanks again to
-Jonathan J. Helmus!) here is to re-transform the covariance matrix so that
-the uncertainties can be estimated for bounded Parameters.  This is
-included by using the derivate :math:`dP_{\rm internal}/dP_{\rm bounded}`
-from the equations above to re-scale the Jacobin matrix before
-constructing the covariance matrix from it.  Tests show that this
-re-scaling of the covariance matrix works quite well, and that
-uncertainties estimated for bounded are quite reasonable.  Of course, if
-the best fit value is very close to a boundary, the derivative estimated
-uncertainty and correlations for that parameter may not be reliable.
-
-The `MINUIT`_ documentation recommends caution in using bounds.  Setting
-bounds can certainly increase the number of function evaluations (and so
-computation time), and in some cases may cause some instabilities, as the
-range of acceptable parameter values is not fully explored.  On the other
-hand, preliminary tests suggest that using ``max`` and ``min`` to set
-clearly outlandish bounds does not greatly affect performance or results.
-
+.. _bounds_chapter:
+
+=================================
+Bounds Implementation
+=================================
+
+.. _MINPACK-1: http://en.wikipedia.org/wiki/MINPACK
+.. _MINUIT: http://en.wikipedia.org/wiki/MINUIT
+.. _leastsqbound: https://github.com/jjhelmus/leastsqbound-scipy
+
+This section describes the implementation of :class:`Parameter` bounds.
+The `MINPACK-1`_ implementation used in :scipydoc:`optimize.leastsq` for
+the Levenberg-Marquardt algorithm does not explicitly support bounds on
+parameters, and expects to be able to fully explore the available range of
+values for any Parameter.  Simply placing hard constraints (that is,
+resetting the value when it exceeds the desired bounds) prevents the
+algorithm from determining the partial derivatives, and leads to unstable
+results.
+
+Instead of placing such hard constraints, bounded parameters are
+mathematically transformed using the formulation devised (and documented)
+for `MINUIT`_.  This is implemented following (and borrowing heavily from)
+the `leastsqbound`_ from J. J. Helmus.   Parameter values are mapped from
+internally used, freely variable values :math:`P_{\rm internal}` to bounded
+parameters :math:`P_{\rm bounded}`.   When both ``min`` and ``max`` bounds
+are specified, the mapping is
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+        P_{\rm internal} &=& \arcsin\big(\frac{2 (P_{\rm bounded} - {\rm min})}{({\rm max} - {\rm min})} - 1\big) \\
+	P_{\rm bounded}  &=& {\rm min} + \big(\sin(P_{\rm internal}) + 1\big) \frac{({\rm max} - {\rm min})}{2}
+    \end{eqnarray*}
+
+With only an upper limit ``max`` supplied, but ``min`` left unbounded, the
+mapping is:
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+        P_{\rm internal} &=& \sqrt{({\rm max} - P_{\rm bounded} + 1)^2 - 1} \\
+        P_{\rm bounded}  &=& {\rm max} + 1 - \sqrt{P_{\rm internal}^2 + 1}
+    \end{eqnarray*}
+
+With only a lower limit ``min`` supplied, but ``max`` left unbounded, the
+mapping is:
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+        P_{\rm internal} &=& \sqrt{(P_{\rm bounded} - {\rm min} + 1)^2 - 1} \\
+        P_{\rm bounded}  &=& {\rm min} - 1 + \sqrt{P_{\rm internal}^2 + 1}
+   \end{eqnarray*}
+
+With these mappings, the value for the bounded Parameter cannot exceed the
+specified bounds, though the internally varied value can be freely varied.
+
+It bears repeating that code from `leastsqbound`_ was adopted to implement
+the transformation described above.  The challenging part (Thanks again to
+Jonathan J. Helmus!) here is to re-transform the covariance matrix so that
+the uncertainties can be estimated for bounded Parameters.  This is
+included by using the derivate :math:`dP_{\rm internal}/dP_{\rm bounded}`
+from the equations above to re-scale the Jacobin matrix before
+constructing the covariance matrix from it.  Tests show that this
+re-scaling of the covariance matrix works quite well, and that
+uncertainties estimated for bounded are quite reasonable.  Of course, if
+the best fit value is very close to a boundary, the derivative estimated
+uncertainty and correlations for that parameter may not be reliable.
+
+The `MINUIT`_ documentation recommends caution in using bounds.  Setting
+bounds can certainly increase the number of function evaluations (and so
+computation time), and in some cases may cause some instabilities, as the
+range of acceptable parameter values is not fully explored.  On the other
+hand, preliminary tests suggest that using ``max`` and ``min`` to set
+clearly outlandish bounds does not greatly affect performance or results.
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index 9d2671e..d1e68b6 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -1,943 +1,980 @@
-.. _builtin_models_chapter:
-
-=====================================================
-Built-in Fitting Models in the :mod:`models` module
-=====================================================
-
-.. module:: models
-
-Lmfit provides several builtin fitting models in the :mod:`models` module.
-These pre-defined models each subclass from the :class:`model.Model` class of the
-previous chapter and wrap relatively well-known functional forms, such as
-Gaussians, Lorentzian, and Exponentials that are used in a wide range of
-scientific domains.  In fact, all the models are all based on simple, plain
-python functions defined in the :mod:`lineshapes` module.  In addition to
-wrapping a function into a :class:`model.Model`, these models also provide a
-:meth:`guess` method that is intended to give a reasonable
-set of starting values from a data array that closely approximates the
-data to be fit.
-
-As shown in the previous chapter, a key feature of the :class:`mode.Model` class
-is that models can easily be combined to give a composite
-:class:`model.Model`. Thus while some of the models listed here may seem pretty
-trivial (notably, :class:`ConstantModel` and :class:`LinearModel`), the
-main point of having these is to be able to used in composite models.  For
-example,  a Lorentzian plus a linear background might be represented as::
-
-    >>> from lmfit.models import LinearModel, LorentzianModel
-    >>> peak = LorentzianModel()
-    >>> background  = LinearModel()
-    >>> model = peak + background
-
-All the models listed below are one dimensional, with an independent
-variable named ``x``.  Many of these models represent a function with a
-distinct peak, and so share common features.  To maintain uniformity,
-common parameter names are used whenever possible.  Thus, most models have
-a parameter called ``amplitude`` that represents the overall height (or
-area of) a peak or function, a ``center`` parameter that represents a peak
-centroid position, and a ``sigma`` parameter that gives a characteristic
-width.   Some peak shapes also have a parameter ``fwhm``, typically
-constrained by ``sigma`` to give the full width at half maximum.
-
-After a list of builtin models, a few examples of their use is given.
-
-Peak-like models
--------------------
-
-There are many peak-like models available.  These include
-:class:`GaussianModel`, :class:`LorentzianModel`, :class:`VoigtModel` and
-some less commonly used variations.  The :meth:`guess`
-methods for all of these make a fairly crude guess for the value of
-``amplitude``, but also set a lower bound of 0 on the value of ``sigma``.
-
-:class:`GaussianModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: GaussianModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Gaussian or normal distribution lineshape
-<http://en.wikipedia.org/wiki/Normal_distribution>`_.  Parameter names:
-``amplitude``, ``center``, and ``sigma``.  In addition, a constrained
-parameter ``fwhm`` is included.
-
-.. math::
-
-  f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]}
-
-where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
-:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The full width at
-half maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately
-:math:`2.3548\sigma`
-
-
-:class:`LorentzianModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: LorentzianModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Lorentzian or Cauchy-Lorentz distribution function
-<http://en.wikipedia.org/wiki/Cauchy_distribution>`_.  Parameter names:
-``amplitude``, ``center``, and ``sigma``.  In addition, a constrained
-parameter ``fwhm`` is included.
-
-.. math::
-
-  f(x; A, \mu, \sigma) = \frac{A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
-
-where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
-:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The full width at
-half maximum is :math:`2\sigma`.
-
-
-:class:`VoigtModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: VoigtModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Voigt distribution function
-<http://en.wikipedia.org/wiki/Voigt_profile>`_.  Parameter names:
-``amplitude``, ``center``, and ``sigma``.  A ``gamma`` parameter is also
-available.  By default, it is constrained to have value equal to ``sigma``,
-though this can be varied independently.  In addition, a constrained
-parameter ``fwhm`` is included.  The definition for the Voigt function used
-here is
-
-.. math::
-
-    f(x; A, \mu, \sigma, \gamma) = \frac{A \textrm{Re}[w(z)]}{\sigma\sqrt{2 \pi}}
-
-where
-
-.. math::
-   :nowrap:
-
-   \begin{eqnarray*}
-     z &=& \frac{x-\mu +i\gamma}{\sigma\sqrt{2}} \\
-     w(z) &=& e^{-z^2}{\operatorname{erfc}}(-iz)
-   \end{eqnarray*}
-
-and :func:`erfc` is the complimentary error function.  As above,
-``amplitude`` corresponds to :math:`A`, ``center`` to
-:math:`\mu`, and ``sigma`` to :math:`\sigma`. The parameter ``gamma``
-corresponds  to :math:`\gamma`.
-If ``gamma`` is kept at the default value (constrained to ``sigma``),
-the full width at half maximum is approximately :math:`3.6013\sigma`.
-
-
-:class:`PseudoVoigtModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: PseudoVoigtModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-a model based on a `pseudo-Voigt distribution function
-<http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation>`_,
-which is a weighted sum of a Gaussian and Lorentzian distribution functions
-with that share values for ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
-and full width at half maximum (and so have  constrained values of
-``sigma`` (:math:`\sigma`).  A parameter ``fraction`` (:math:`\alpha`)
-controls the relative weight of the Gaussian and Lorentzian components,
-giving the full definition of
-
-.. math::
-
-  f(x; A, \mu, \sigma, \alpha) = \frac{(1-\alpha)A}{\sigma_g\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma_g}^2}}]}
- + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
-
-where :math:`\sigma_g = {\sigma}/{\sqrt{2\ln{2}}}` so that the full width
-at half maximum of each component and of the sum is :math:`2\sigma`. The
-:meth:`guess` function always sets the starting value for ``fraction`` at 0.5.
-
-
-:class:`MoffatModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: MoffatModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-a model based on a `Moffat distribution function
-<https://en.wikipedia.org/wiki/Moffat_distribution>`_, the parameters are
-``amplitude`` (:math:`A`), ``center`` (:math:`\mu`),
-a width parameter ``sigma`` (:math:`\sigma`) and an exponent ``beta`` (:math:`\beta`).
-For (:math:`\beta=1`) the Moffat has a Lorentzian shape.
-
-.. math::
-
-  f(x; A, \mu, \sigma, \beta) = A \big[(\frac{x-\mu}{\sigma})^2+1\big]^{-\beta}
-
-the full width have maximum is :math:`2\sigma\sqrt{2^{1/\beta}-1}`.
-:meth:`guess` function always sets the starting value for ``beta`` to 1.
-
-
-:class:`Pearson7Model`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: Pearson7Model(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Pearson VII distribution
-<http://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_VII_distribution>`_.
-This is a Lorenztian-like distribution function.  It has the usual
-parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), and also an ``exponent`` (:math:`m`) in
-
-.. math::
-
-    f(x; A, \mu, \sigma, m) = \frac{A}{\sigma{\beta(m-\frac{1}{2}, \frac{1}{2})}} \bigl[1 + \frac{(x-\mu)^2}{\sigma^2}  \bigr]^{-m}
-
-where :math:`\beta` is the beta function (see :func:`scipy.special.beta` in
-:mod:`scipy.special`).  The :meth:`guess` function always
-gives a starting value for ``exponent`` of 1.5.
-
-:class:`StudentsTModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: StudentsTModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Student's t distribution function
-<http://en.wikipedia.org/wiki/Student%27s_t-distribution>`_, with the usual
-parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`) in
-
-.. math::
-
-    f(x; A, \mu, \sigma) = \frac{A \Gamma(\frac{\sigma+1}{2})} {\sqrt{\sigma\pi}\,\Gamma(\frac{\sigma}{2})} \Bigl[1+\frac{(x-\mu)^2}{\sigma}\Bigr]^{-\frac{\sigma+1}{2}}
-
-
-where :math:`\Gamma(x)` is the gamma function.
-
-
-:class:`BreitWignerModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: BreitWignerModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Breit-Wigner-Fano function
-<http://en.wikipedia.org/wiki/Fano_resonance>`_.  It has the usual
-parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), plus ``q`` (:math:`q`) in
-
-.. math::
-
-    f(x; A, \mu, \sigma, q) = \frac{A (q\sigma/2 + x - \mu)^2}{(\sigma/2)^2 + (x - \mu)^2}
-
-
-:class:`LognormalModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: LognormalModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on the `Log-normal distribution function
-<http://en.wikipedia.org/wiki/Lognormal>`_.
-It has the usual parameters
-``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma``
-(:math:`\sigma`) in
-
-.. math::
-
-    f(x; A, \mu, \sigma) = \frac{A e^{-(\ln(x) - \mu)/ 2\sigma^2}}{x}
-
-
-:class:`DampedOcsillatorModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: DampedOcsillatorModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on the `Damped Harmonic Oscillator Amplitude
-<http://en.wikipedia.org/wiki/Harmonic_oscillator#Amplitude_part>`_.
-It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`) in
-
-.. math::
-
-    f(x; A, \mu, \sigma) = \frac{A}{\sqrt{ [1 - (x/\mu)^2]^2 + (2\sigma x/\mu)^2}}
-
-
-:class:`ExponentialGaussianModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: ExponentialGaussianModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model of an `Exponentially modified Gaussian distribution
-<http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_.
-It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), and also ``gamma`` (:math:`\gamma`) in
-
-.. math::
-
-    f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2}
-    \exp\bigl[\gamma({\mu - x  + \gamma\sigma^2/2})\bigr]
-    {\operatorname{erfc}}\Bigl(\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\Bigr)
-
-
-where :func:`erfc` is the complimentary error function.
-
-:class:`SkewedGaussianModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: SkewedGaussianModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A variation of the above model, this is a `Skewed normal distribution
-<http://en.wikipedia.org/wiki/Skew_normal_distribution>`_.
-It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
-``sigma`` (:math:`\sigma`), and also ``gamma`` (:math:`\gamma`) in
-
-.. math::
-
-    f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}}
-  e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 +
-      {\operatorname{erf}}\bigl[
-         \frac{\gamma(x-\mu)}{\sigma\sqrt{2}}
-     \bigr] \Bigr\}
-
-
-where :func:`erf` is the error function.
-
-
-:class:`DonaichModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: DonaichModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model of an `Doniach Sunjic asymmetric lineshape
-<http://www.casaxps.com/help_manual/line_shapes.htm>`_, used in
-photo-emission. With the usual parameters ``amplitude`` (:math:`A`),
-``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`), and also ``gamma``
-(:math:`\gamma`) in
-
-.. math::
-
-    f(x; A, \mu, \sigma, \gamma) = A\frac{\cos\bigl[\pi\gamma/2 + (1-\gamma)
-    \arctan{(x - \mu)}/\sigma\bigr]} {\bigr[1 + (x-\mu)/\sigma\bigl]^{(1-\gamma)/2}}
-
-
-Linear and Polynomial Models
-------------------------------------
-
-These models correspond to polynomials of some degree.  Of course, lmfit is
-a very inefficient way to do linear regression (see :func:`numpy.polyfit`
-or :func:`scipy.stats.linregress`), but these models may be useful as one
-of many components of composite model.
-
-:class:`ConstantModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: ConstantModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-   a class that consists of a single value, ``c``.  This is constant in the
-   sense of having no dependence on the independent variable ``x``, not in
-   the sense of being non-varying.  To be clear, ``c`` will be a variable
-   Parameter.
-
-:class:`LinearModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: LinearModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-   a class that gives a linear model:
-
-.. math::
-
-    f(x; m, b) = m x + b
-
-with parameters ``slope`` for :math:`m` and  ``intercept`` for :math:`b`.
-
-
-:class:`QuadraticModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: QuadraticModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-
-   a class that gives a quadratic model:
-
-.. math::
-
-    f(x; a, b, c) = a x^2 + b x + c
-
-with parameters ``a``, ``b``, and ``c``.
-
-
-:class:`ParabolicModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: ParabolicModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-   same as :class:`QuadraticModel`.
-
-
-:class:`PolynomialModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-.. class:: PolynomialModel(degree, missing=None[, prefix=''[, name=None[, **kws]]])
-
-   a class that gives a polynomial model up to ``degree`` (with maximum
-   value of 7).
-
-.. math::
-
-    f(x; c_0, c_1, \ldots, c_7) = \sum_{i=0, 7} c_i  x^i
-
-with parameters ``c0``, ``c1``, ..., ``c7``.  The supplied ``degree``
-will specify how many of these are actual variable parameters.  This uses
-:func:`numpy.polyval` for its calculation of the polynomial.
-
-
-
-Step-like models
------------------------------------------------
-
-Two models represent step-like functions, and share many characteristics.
-
-:class:`StepModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: StepModel(form='linear'[, missing=None[, prefix=''[, name=None[, **kws]]]])
-
-A model based on a Step function, with four choices for functional form.
-The step function starts with a value 0, and ends with a value of :math:`A`
-(``amplitude``), rising to :math:`A/2` at :math:`\mu` (``center``),
-with :math:`\sigma` (``sigma``) setting the characteristic width. The
-supported functional forms are ``linear`` (the default), ``atan`` or
-``arctan`` for an arc-tangent function,  ``erf`` for an error function, or
-``logistic`` for a `logistic function <http://en.wikipedia.org/wiki/Logistic_function>`_.
-The forms are
-
-.. math::
-   :nowrap:
-
-   \begin{eqnarray*}
-   & f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}})  & = A \min{[1, \max{(0,  \alpha)}]} \\
-   & f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}})  & = A [1/2 + \arctan{(\alpha)}/{\pi}] \\
-   & f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}})     & = A [1 + {\operatorname{erf}}(\alpha)]/2 \\
-   & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A [1 - \frac{1}{1 +  e^{\alpha}} ]
-   \end{eqnarray*}
-
-where :math:`\alpha  = (x - \mu)/{\sigma}`.
-
-:class:`RectangleModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-.. class:: RectangleModel(form='linear'[, missing=None[, prefix=''[, name=None[, **kws]]]])
-
-A model based on a Step-up and Step-down function of the same form.  The
-same choices for functional form as for :class:`StepModel` are supported,
-with ``linear`` as the default.  The function starts with a value 0, and
-ends with a value of :math:`A` (``amplitude``), rising to :math:`A/2` at
-:math:`\mu_1` (``center1``), with :math:`\sigma_1` (``sigma1``) setting the
-characteristic width.  It drops to rising to :math:`A/2` at :math:`\mu_2`
-(``center2``), with characteristic width :math:`\sigma_2` (``sigma2``).
-
-.. math::
-   :nowrap:
-
-   \begin{eqnarray*}
-   &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}})   &= A \{ \min{[1, \max{(0, \alpha_1)}]} + \min{[-1, \max{(0,  \alpha_2)}]} \} \\
-   &f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}})   &= A [\arctan{(\alpha_1)} + \arctan{(\alpha_2)}]/{\pi} \\
-   &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}})      &= A [{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)]/2 \\
-   &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A [1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 +  e^{\alpha_2}} ]
-   \end{eqnarray*}
-
-
-where :math:`\alpha_1  = (x - \mu_1)/{\sigma_1}` and :math:`\alpha_2  = -(x - \mu_2)/{\sigma_2}`.
-
-
-Exponential and Power law models
------------------------------------------------
-
-:class:`ExponentialModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: ExponentialModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on an `exponential decay function
-<http://en.wikipedia.org/wiki/Exponential_decay>`_. With parameters named
-``amplitude`` (:math:`A`), and ``decay`` (:math:`\tau`), this has the form:
-
-.. math::
-
-   f(x; A, \tau) = A e^{-x/\tau}
-
-
-:class:`PowerLawModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: PowerLawModel(missing=None[, prefix=''[, name=None[, **kws]]])
-
-A model based on a `Power Law <http://en.wikipedia.org/wiki/Power_law>`_.
-With parameters
-named ``amplitude`` (:math:`A`), and ``exponent`` (:math:`k`), this has the
-form:
-
-.. math::
-
-   f(x; A, k) = A x^k
-
-
-User-defined Models
-----------------------------
-
-.. _asteval: http://newville.github.io/asteval/
-
-As shown in the previous chapter (:ref:`model_chapter`), it is fairly
-straightforward to build fitting models from parametrized python functions.
-The number of model classes listed so far in the present chapter should
-make it clear that this process is not too difficult.  Still, it is
-sometimes desirable to build models from a user-supplied function.  This
-may be especially true if model-building is built-in to some larger library
-or application for fitting in which the user may not be able to easily
-build and use a new model from python code.
-
-
-The :class:`ExpressionModel` allows a model to be built from a
-user-supplied expression.  This uses the `asteval`_ module also used for
-mathematical constraints as discussed in :ref:`constraints_chapter`.
-
-
-:class:`ExpressionModel`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. class:: ExpressionModel(expr, independent_vars=None, init_script=None, **kws)
-
-    A model using the user-supplied mathematical expression, which can be nearly any valid Python expresion.
-
-    :param expr: expression use to build model
-    :type expr: string
-    :param independent_vars: list of argument names in expression that are independent variables.
-    :type independent_vars: ``None`` (default) or list of strings for independent variables.
-    :param init_script: python script to run before parsing and evaluating expression.
-    :type init_script: ``None`` (default) or string
-
-with other parameters passed to :class:`model.Model`, with the notable
-exception that :class:`ExpressionModel` does **not** support the `prefix` argument.
-
-Since the point of this model is that an arbitrary expression will be
-supplied, the determination of what are the parameter names for the model
-happens when the model is created.  To do this, the expression is parsed,
-and all symbol names are found.  Names that are already known (there are
-over 500 function and value names in the asteval namespace, including most
-python builtins, more than 200 functions inherited from numpy, and more
-than 20 common lineshapes defined in the :mod:`lineshapes` module) are not
-converted to parameters.  Unrecognized name are expected to be names either
-of parameters or independent variables.  If `independent_vars` is the
-default value of ``None``, and if the expression contains a variable named
-`x`, that will be used as the independent variable.  Otherwise,
-`independent_vars` must be given.
-
-For example, if one creates an :class:`ExpressionModel` as::
-
-    >>> mod = ExpressionModel('off + amp * exp(-x/x0) * sin(x*phase)')
-
-The name `exp` will be recognized as the exponent function, so the model
-will be interpreted to have parameters named `off`, `amp`, `x0` and
-`phase`. In addition, `x` will be assumed to be the sole independent variable.
-In general, there is no obvious way to set default parameter values or
-parameter hints for bounds, so this will have to be handled explicitly.
-
-To evaluate this model, you might do the following::
-
-    >>> x = numpy.linspace(0, 10, 501)
-    >>> params = mod.make_params(off=0.25, amp=1.0, x0=2.0, phase=0.04)
-    >>> y = mod.eval(params, x=x)
-
-
-While many custom models can be built with a single line expression
-(especially since the names of the lineshapes like `gaussian`, `lorentzian`
-and so on, as well as many numpy functions, are available), more complex
-models will inevitably require multiple line functions.  You can include
-such Python code with the `init_script` argument.  The text of this script
-is evaluated when the model is initialized (and before the actual
-expression is parsed), so that you can define functions to be used
-in your expression.
-
-As a probably unphysical example, to make a model that is the derivative of
-a Gaussian function times the logarithm of a Lorentzian function you may
-could to define this in a script::
-
-    >>> script = """
-    def mycurve(x, amp, cen, sig):
-        loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
-        gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
-        return log(loren)*gradient(gauss)/gradient(x)
-    """
-
-and then use this with :class:`ExpressionModel` as::
-
-    >>> mod = ExpressionModel('mycurve(x, height, mid, wid)',
-                              init_script=script,
-                              independent_vars=['x'])
-
-As above, this will interpret the parameter names to be `height`, `mid`,
-and `wid`, and build a model that can be used to fit data.
-
-
-
-Example 1: Fit Peaked data to Gaussian, Lorentzian, and  Voigt profiles
-------------------------------------------------------------------------
-
-Here, we will fit data to three similar line shapes, in order to decide which
-might be the better model.  We will start with a Gaussian profile, as in
-the previous chapter, but use the built-in :class:`GaussianModel` instead
-of one we write ourselves.  This is a slightly different version from the
-one in previous example in that the parameter names are different, and have
-built-in default values.  So, we'll simply use::
-
-    from numpy import loadtxt
-    from lmfit.models import GaussianModel
-
-    data = loadtxt('test_peak.dat')
-    x = data[:, 0]
-    y = data[:, 1]
-
-    mod = GaussianModel()
-    pars = mod.guess(y, x=x)
-    out  = mod.fit(y, pars, x=x)
-    print(out.fit_report(min_correl=0.25))
-
-which prints out the results::
-
-    [[Model]]
-        gaussian
-    [[Fit Statistics]]
-        # function evals   = 21
-        # data points      = 401
-        # variables        = 3
-        chi-square         = 29.994
-        reduced chi-square = 0.075
-    [[Variables]]
-        amplitude:   30.3135571 +/- 0.157126 (0.52%) (init= 29.08159)
-        center:      9.24277049 +/- 0.007374 (0.08%) (init= 9.25)
-        fwhm:        2.90156963 +/- 0.017366 (0.60%)  == '2.3548200*sigma'
-        sigma:       1.23218319 +/- 0.007374 (0.60%) (init= 1.35)
-    [[Correlations]] (unreported correlations are <  0.250)
-        C(amplitude, sigma)          =  0.577
-
-
-[We see a few interesting differences from the results of the previous
- chapter. First, the parameter names are longer. Second, there is a
- ``fwhm`` parameter, defined as :math:`\sim 2.355\sigma`. And third, the
- automated initial guesses are pretty good. A plot of the fit shows not
- such a great fit:
-
-.. _figA1:
-
-  .. image::  _images/models_peak1.png
-     :target: _images/models_peak1.png
-     :width: 48 %
-  .. image::  _images/models_peak2.png
-     :target: _images/models_peak2.png
-     :width: 48 %
-
-  Fit to peak with Gaussian (left) and Lorentzian (right) models.
-
-suggesting that a different peak shape, with longer tails, should be used.
-Perhaps a Lorentzian would be better?  To do this, we simply replace
-``GaussianModel`` with ``LorentzianModel`` to get a
-:class:`LorentzianModel`::
-
-    from lmfit.models import LorentzianModel
-    mod = LorentzianModel()
-    pars = mod.guess(y, x=x)
-    out  = mod.fit(y, pars, x=x)
-    print(out.fit_report(min_correl=0.25))
-
-Predictably, the first thing we try gives results that are worse::
-
-    [[Model]]
-        lorentzian
-    [[Fit Statistics]]
-        # function evals   = 25
-        # data points      = 401
-        # variables        = 3
-        chi-square         = 53.754
-        reduced chi-square = 0.135
-    [[Variables]]
-        amplitude:   38.9728645 +/- 0.313857 (0.81%) (init= 36.35199)
-        center:      9.24438944 +/- 0.009275 (0.10%) (init= 9.25)
-        fwhm:        2.30969034 +/- 0.026312 (1.14%)  == '2.0000000*sigma'
-        sigma:       1.15484517 +/- 0.013156 (1.14%) (init= 1.35)
-    [[Correlations]] (unreported correlations are <  0.250)
-        C(amplitude, sigma)          =  0.709
-
-
-with the plot shown on the right in the figure above.
-
-A Voigt model does a better job.  Using :class:`VoigtModel`, this is
-as simple as::
-
-    from lmfit.models import VoigtModel
-    mod = VoigtModel()
-    pars = mod.guess(y, x=x)
-    out  = mod.fit(y, pars, x=x)
-    print(out.fit_report(min_correl=0.25))
-
-which gives::
-
-    [[Model]]
-        voigt
-    [[Fit Statistics]]
-        # function evals   = 17
-        # data points      = 401
-        # variables        = 3
-        chi-square         = 14.545
-        reduced chi-square = 0.037
-    [[Variables]]
-        amplitude:   35.7554017 +/- 0.138614 (0.39%) (init= 43.62238)
-        center:      9.24411142 +/- 0.005054 (0.05%) (init= 9.25)
-        fwhm:        2.62951718 +/- 0.013269 (0.50%)  == '3.6013100*sigma'
-        gamma:       0.73015574 +/- 0.003684 (0.50%)  == 'sigma'
-        sigma:       0.73015574 +/- 0.003684 (0.50%) (init= 0.8775)
-    [[Correlations]] (unreported correlations are <  0.250)
-        C(amplitude, sigma)          =  0.651
-
-
-with the much better value for :math:`\chi^2` and the obviously better
-match to the data as seen in the figure below (left).
-
-.. _figA2:
-
-  .. image::  _images/models_peak3.png
-     :target: _images/models_peak3.png
-     :width: 48 %
-  .. image::  _images/models_peak4.png
-     :target: _images/models_peak4.png
-     :width: 48 %
-
-  Fit to peak with Voigt model (left) and Voigt model with ``gamma``
-  varying independently of ``sigma`` (right).
-
-The Voigt function has a :math:`\gamma` parameter (``gamma``) that can be
-distinct from ``sigma``.  The default behavior used above constrains
-``gamma`` to have exactly the same value as ``sigma``.  If we allow these
-to vary separately, does the fit improve?  To do this, we have to change
-the ``gamma`` parameter from a constrained expression and give it a
-starting value::
-
-    mod = VoigtModel()
-    pars = mod.guess(y, x=x)
-    pars['gamma'].set(value=0.7, vary=True, expr='')
-
-    out  = mod.fit(y, pars, x=x)
-    print(out.fit_report(min_correl=0.25))
-
-which gives::
-
-    [[Model]]
-        voigt
-    [[Fit Statistics]]
-        # function evals   = 21
-        # data points      = 401
-        # variables        = 4
-        chi-square         = 10.930
-        reduced chi-square = 0.028
-    [[Variables]]
-        amplitude:   34.1914716 +/- 0.179468 (0.52%) (init= 43.62238)
-        center:      9.24374845 +/- 0.004419 (0.05%) (init= 9.25)
-        fwhm:        3.22385491 +/- 0.050974 (1.58%)  == '3.6013100*sigma'
-        gamma:       0.52540157 +/- 0.018579 (3.54%) (init= 0.7)
-        sigma:       0.89518950 +/- 0.014154 (1.58%) (init= 0.8775)
-    [[Correlations]] (unreported correlations are <  0.250)
-        C(amplitude, gamma)          =  0.821
-
-
-and the fit shown on the right above.
-
-Comparing the two fits with the Voigt function, we see that :math:`\chi^2`
-is definitely improved with a separately varying ``gamma`` parameter.  In
-addition, the two values for ``gamma`` and ``sigma`` differ significantly
--- well outside the estimated uncertainties.  Even more compelling, reduced
-:math:`\chi^2` is improved even though a fourth variable has been added to
-the fit.  In the simplest statistical sense, this suggests that ``gamma``
-is a significant variable in the model.
-
-
-This example shows how easy it can be to alter and compare fitting models
-for simple problems.  The example is included in the ``doc_peakmodels.py``
-file in the examples directory.
-
-
-Example 2: Fit data to a Composite Model with pre-defined models
-------------------------------------------------------------------
-
-Here, we repeat the point made at the end of the last chapter that
-instances of :class:`model.Model` class can be added together to make a
-*composite model*.  By using the large number of built-in models available,
-it is therefore very simple to build models that contain multiple peaks and
-various backgrounds.  An example of a simple fit to a noisy step function
-plus a constant:
-
-.. literalinclude:: ../examples/doc_stepmodel.py
-
-After constructing step-like data, we first create a :class:`StepModel`
-telling it to use the ``erf`` form (see details above), and a
-:class:`ConstantModel`.  We set initial values, in one case using the data
-and :meth:`guess` method for the initial step function paramaters, and
-:meth:`make_params` arguments for the linear component.
-After making a composite model, we run :meth:`fit` and report the
-results, which give::
-
-
-    [[Model]]
-     Composite Model:
-        step(prefix='step_',form='erf')
-        linear(prefix='line_')
-    [[Fit Statistics]]
-        # function evals   = 49
-        # data points      = 201
-        # variables        = 5
-        chi-square         = 633.465
-        reduced chi-square = 3.232
-    [[Variables]]
-        line_intercept:   11.5685248 +/- 0.285611 (2.47%) (init= 10.72406)
-        line_slope:       2.03270159 +/- 0.096041 (4.72%) (init= 0)
-        step_amplitude:   112.270535 +/- 0.674790 (0.60%) (init= 136.3006)
-        step_center:      3.12343845 +/- 0.005370 (0.17%) (init= 2.5)
-        step_sigma:       0.67468813 +/- 0.011336 (1.68%) (init= 1.428571)
-    [[Correlations]] (unreported correlations are <  0.100)
-        C(step_amplitude, step_sigma)  =  0.564
-        C(line_intercept, step_center)  =  0.428
-        C(step_amplitude, step_center)  =  0.109
-
-with a plot of
-
-.. image::  _images/models_stepfit.png
-   :target: _images/models_stepfit.png
-   :width: 50 %
-
-
-Example 3: Fitting Multiple Peaks -- and using Prefixes
-------------------------------------------------------------------
-
-.. _NIST StRD: http://itl.nist.gov/div898/strd/nls/nls_main.shtml
-
-As shown above, many of the models have similar parameter names.  For
-composite models, this could lead to a problem of having parameters for
-different parts of the model having the same name.  To overcome this, each
-:class:`model.Model` can have a ``prefix`` attribute (normally set to a blank
-string) that will be put at the beginning of each parameter name.  To
-illustrate, we fit one of the classic datasets from the `NIST StRD`_ suite
-involving a decaying exponential and two gaussians.
-
-.. literalinclude:: ../examples/doc_nistgauss.py
-
-
-where we give a separate prefix to each model (they all have an
-``amplitude`` parameter).  The ``prefix`` values are attached transparently
-to the models.
-
-MN----: Note that the calls to :meth:`make_param` used the bare
-name, without the prefix.  We could have used them, but because we used the
-individual model ``gauss1`` and ``gauss2``, there was no need.
-
-
-Note also in the example here that we explicitly set bounds on many of the
-parameter values.
-
-The fit results printed out are::
-
-    [[Model]]
-     Composite Model:
-        gaussian(prefix='g1_')
-        gaussian(prefix='g2_')
-        exponential(prefix='exp_')
-    [[Fit Statistics]]
-        # function evals   = 55
-        # data points      = 250
-        # variables        = 8
-        chi-square         = 1247.528
-        reduced chi-square = 5.155
-    [[Variables]]
-        exp_amplitude:   99.0183291 +/- 0.537487 (0.54%) (init= 162.2102)
-        exp_decay:       90.9508788 +/- 1.103104 (1.21%) (init= 93.24905)
-        g1_amplitude:    4257.77384 +/- 42.38354 (1.00%) (init= 2000)
-        g1_center:       107.030955 +/- 0.150068 (0.14%) (init= 105)
-        g1_fwhm:         39.2609205 +/- 0.377907 (0.96%)  == '2.3548200*g1_sigma'
-        g1_sigma:        16.6725781 +/- 0.160482 (0.96%) (init= 15)
-        g2_amplitude:    2493.41747 +/- 36.16907 (1.45%) (init= 2000)
-        g2_center:       153.270103 +/- 0.194665 (0.13%) (init= 155)
-        g2_fwhm:         32.5128760 +/- 0.439860 (1.35%)  == '2.3548200*g2_sigma'
-        g2_sigma:        13.8069474 +/- 0.186791 (1.35%) (init= 15)
-    [[Correlations]] (unreported correlations are <  0.500)
-        C(g1_amplitude, g1_sigma)    =  0.824
-        C(g2_amplitude, g2_sigma)    =  0.815
-        C(g1_sigma, g2_center)       =  0.684
-        C(g1_amplitude, g2_center)   =  0.648
-        C(g1_center, g2_center)      =  0.621
-        C(g1_center, g1_sigma)       =  0.507
-
-
-
-We get a very good fit to this challenging problem (described at the NIST
-site as of average difficulty, but the tests there are generally hard) by
-applying reasonable initial guesses and putting modest but explicit bounds
-on the parameter values.  This fit is shown on the left:
-
-.. _figA3:
-
-  .. image::  _images/models_nistgauss.png
-     :target: _images/models_nistgauss.png
-     :width: 48 %
-  .. image::  _images/models_nistgauss2.png
-     :target: _images/models_nistgauss2.png
-     :width: 48 %
-
-
-One final point on setting initial values.  From looking at the data
-itself, we can see the two Gaussian peaks are reasonably well separated but
-do overlap. Furthermore, we can tell that the initial guess for the
-decaying exponential component was poorly estimated because we used the
-full data range.  We can simplify the initial parameter values by using
-this, and by defining an :func:`index_of` function to limit the data range.
-That is, with::
-
-    def index_of(arrval, value):
-        "return index of array *at or below* value "
-        if value < min(arrval):  return 0
-        return max(np.where(arrval<=value)[0])
-
-    ix1 = index_of(x,  75)
-    ix2 = index_of(x, 135)
-    ix3 = index_of(x, 175)
-
-    exp_mod.guess(y[:ix1], x=x[:ix1])
-    gauss1.guess(y[ix1:ix2], x=x[ix1:ix2])
-    gauss2.guess(y[ix2:ix3], x=x[ix2:ix3])
-
-we can get a better initial estimate, and the fit converges in fewer steps,
-getting to identical values (to the precision printed out in the report),
-and without any bounds on parameters at all::
-
-    [[Model]]
-     Composite Model:
-        gaussian(prefix='g1_')
-        gaussian(prefix='g2_')
-        exponential(prefix='exp_')
-    [[Fit Statistics]]
-        # function evals   = 46
-        # data points      = 250
-        # variables        = 8
-        chi-square         = 1247.528
-        reduced chi-square = 5.155
-    [[Variables]]
-        exp_amplitude:   99.0183281 +/- 0.537487 (0.54%) (init= 94.53724)
-        exp_decay:       90.9508863 +/- 1.103105 (1.21%) (init= 111.1985)
-        g1_amplitude:    4257.77321 +/- 42.38338 (1.00%) (init= 2126.432)
-        g1_center:       107.030954 +/- 0.150067 (0.14%) (init= 106.5)
-        g1_fwhm:         39.2609141 +/- 0.377905 (0.96%)  == '2.3548200*g1_sigma'
-        g1_sigma:        16.6725754 +/- 0.160481 (0.96%) (init= 14.5)
-        g2_amplitude:    2493.41766 +/- 36.16948 (1.45%) (init= 1878.892)
-        g2_center:       153.270100 +/- 0.194667 (0.13%) (init= 150)
-        g2_fwhm:         32.5128777 +/- 0.439866 (1.35%)  == '2.3548200*g2_sigma'
-        g2_sigma:        13.8069481 +/- 0.186794 (1.35%) (init= 15)
-    [[Correlations]] (unreported correlations are <  0.500)
-        C(g1_amplitude, g1_sigma)    =  0.824
-        C(g2_amplitude, g2_sigma)    =  0.815
-        C(g1_sigma, g2_center)       =  0.684
-        C(g1_amplitude, g2_center)   =  0.648
-        C(g1_center, g2_center)      =  0.621
-        C(g1_center, g1_sigma)       =  0.507
-
-
-
-This example is in the file ``doc_nistgauss2.py`` in the examples folder,
-and the fit result shown on the right above shows an improved initial
-estimate of the data.
+.. _builtin_models_chapter:
+
+=====================================================
+Built-in Fitting Models in the :mod:`models` module
+=====================================================
+
+.. module:: models
+
+Lmfit provides several builtin fitting models in the :mod:`models` module.
+These pre-defined models each subclass from the :class:`model.Model` class of the
+previous chapter and wrap relatively well-known functional forms, such as
+Gaussians, Lorentzian, and Exponentials that are used in a wide range of
+scientific domains.  In fact, all the models are all based on simple, plain
+python functions defined in the :mod:`lineshapes` module.  In addition to
+wrapping a function into a :class:`model.Model`, these models also provide a
+:meth:`guess` method that is intended to give a reasonable
+set of starting values from a data array that closely approximates the
+data to be fit.
+
+As shown in the previous chapter, a key feature of the :class:`mode.Model` class
+is that models can easily be combined to give a composite
+:class:`model.Model`. Thus while some of the models listed here may seem pretty
+trivial (notably, :class:`ConstantModel` and :class:`LinearModel`), the
+main point of having these is to be able to used in composite models.  For
+example,  a Lorentzian plus a linear background might be represented as::
+
+    >>> from lmfit.models import LinearModel, LorentzianModel
+    >>> peak = LorentzianModel()
+    >>> background  = LinearModel()
+    >>> model = peak + background
+
+All the models listed below are one dimensional, with an independent
+variable named ``x``.  Many of these models represent a function with a
+distinct peak, and so share common features.  To maintain uniformity,
+common parameter names are used whenever possible.  Thus, most models have
+a parameter called ``amplitude`` that represents the overall height (or
+area of) a peak or function, a ``center`` parameter that represents a peak
+centroid position, and a ``sigma`` parameter that gives a characteristic
+width.  Many peak shapes also have a parameter ``fwhm`` (constrained by
+``sigma``) giving the full width at half maximum and a parameter ``height``
+(constrained by ``sigma`` and ``amplitude``) to give the maximum peak
+height.
+
+After a list of builtin models, a few examples of their use is given.
+
+Peak-like models
+-------------------
+
+There are many peak-like models available.  These include
+:class:`GaussianModel`, :class:`LorentzianModel`, :class:`VoigtModel` and
+some less commonly used variations.  The :meth:`guess`
+methods for all of these make a fairly crude guess for the value of
+``amplitude``, but also set a lower bound of 0 on the value of ``sigma``.
+
+:class:`GaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: GaussianModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on a `Gaussian or normal distribution lineshape
+<http://en.wikipedia.org/wiki/Normal_distribution>`_.  Parameter names:
+``amplitude``, ``center``, and ``sigma``.
+In addition, parameters ``fwhm`` and ``height`` are included as constraints
+to report full width at half maximum and maximum peak height, respectively.
+
+.. math::
+
+  f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]}
+
+where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
+:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The full width at
+half maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately
+:math:`2.3548\sigma`
+
+
+:class:`LorentzianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: LorentzianModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on a `Lorentzian or Cauchy-Lorentz distribution function
+<http://en.wikipedia.org/wiki/Cauchy_distribution>`_.  Parameter names:
+``amplitude``, ``center``, and ``sigma``.
+In addition, parameters ``fwhm`` and ``height`` are included as constraints
+to report full width at half maximum and maximum peak height, respectively.
+
+.. math::
+
+  f(x; A, \mu, \sigma) = \frac{A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
+
+where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
+:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The full width at
+half maximum is :math:`2\sigma`.
+
+
+:class:`VoigtModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: VoigtModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on a `Voigt distribution function
+<http://en.wikipedia.org/wiki/Voigt_profile>`_.  Parameter names:
+``amplitude``, ``center``, and ``sigma``.  A ``gamma`` parameter is also
+available.  By default, it is constrained to have value equal to ``sigma``,
+though this can be varied independently.  In addition, parameters ``fwhm``
+and ``height`` are included as constraints to report full width at half
+maximum and maximum peak height, respectively.  The definition for the
+Voigt function used here is
+
+.. math::
+
+    f(x; A, \mu, \sigma, \gamma) = \frac{A \textrm{Re}[w(z)]}{\sigma\sqrt{2 \pi}}
+
+where
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+     z &=& \frac{x-\mu +i\gamma}{\sigma\sqrt{2}} \\
+     w(z) &=& e^{-z^2}{\operatorname{erfc}}(-iz)
+   \end{eqnarray*}
+
+and :func:`erfc` is the complimentary error function.  As above,
+``amplitude`` corresponds to :math:`A`, ``center`` to
+:math:`\mu`, and ``sigma`` to :math:`\sigma`. The parameter ``gamma``
+corresponds  to :math:`\gamma`.
+If ``gamma`` is kept at the default value (constrained to ``sigma``),
+the full width at half maximum is approximately :math:`3.6013\sigma`.
+
+
+:class:`PseudoVoigtModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: PseudoVoigtModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+a model based on a `pseudo-Voigt distribution function
+<http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation>`_,
+which is a weighted sum of a Gaussian and Lorentzian distribution functions
+with that share values for ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
+and full width at half maximum (and so have  constrained values of
+``sigma`` (:math:`\sigma`).  A parameter ``fraction`` (:math:`\alpha`)
+controls the relative weight of the Gaussian and Lorentzian components,
+giving the full definition of
+
+.. math::
+
+  f(x; A, \mu, \sigma, \alpha) = \frac{(1-\alpha)A}{\sigma_g\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma_g}^2}}]}
+ + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
+
+where :math:`\sigma_g = {\sigma}/{\sqrt{2\ln{2}}}` so that the full width
+at half maximum of each component and of the sum is :math:`2\sigma`. The
+:meth:`guess` function always sets the starting value for ``fraction`` at 0.5.
+
+
+:class:`MoffatModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: MoffatModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+a model based on a `Moffat distribution function
+<https://en.wikipedia.org/wiki/Moffat_distribution>`_, the parameters are
+``amplitude`` (:math:`A`), ``center`` (:math:`\mu`),
+a width parameter ``sigma`` (:math:`\sigma`) and an exponent ``beta`` (:math:`\beta`).
+For (:math:`\beta=1`) the Moffat has a Lorentzian shape.
+
+.. math::
+
+  f(x; A, \mu, \sigma, \beta) = A \big[(\frac{x-\mu}{\sigma})^2+1\big]^{-\beta}
+
+the full width have maximum is :math:`2\sigma\sqrt{2^{1/\beta}-1}`.
+:meth:`guess` function always sets the starting value for ``beta`` to 1.
+
+
+:class:`Pearson7Model`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: Pearson7Model(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on a `Pearson VII distribution
+<http://en.wikipedia.org/wiki/Pearson_distribution#The_Pearson_type_VII_distribution>`_.
+This is a Lorenztian-like distribution function.  It has the usual
+parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`), and also an ``exponent`` (:math:`m`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, m) = \frac{A}{\sigma{\beta(m-\frac{1}{2}, \frac{1}{2})}} \bigl[1 + \frac{(x-\mu)^2}{\sigma^2}  \bigr]^{-m}
+
+where :math:`\beta` is the beta function (see :scipydoc:`special.beta` in
+:mod:`scipy.special`).  The :meth:`guess` function always
+gives a starting value for ``exponent`` of 1.5.
+
+:class:`StudentsTModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: StudentsTModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on a `Student's t distribution function
+<http://en.wikipedia.org/wiki/Student%27s_t-distribution>`_, with the usual
+parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma) = \frac{A \Gamma(\frac{\sigma+1}{2})} {\sqrt{\sigma\pi}\,\Gamma(\frac{\sigma}{2})} \Bigl[1+\frac{(x-\mu)^2}{\sigma}\Bigr]^{-\frac{\sigma+1}{2}}
+
+
+where :math:`\Gamma(x)` is the gamma function.
+
+
+:class:`BreitWignerModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: BreitWignerModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on a `Breit-Wigner-Fano function
+<http://en.wikipedia.org/wiki/Fano_resonance>`_.  It has the usual
+parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`), plus ``q`` (:math:`q`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, q) = \frac{A (q\sigma/2 + x - \mu)^2}{(\sigma/2)^2 + (x - \mu)^2}
+
+
+:class:`LognormalModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: LognormalModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on the `Log-normal distribution function
+<http://en.wikipedia.org/wiki/Lognormal>`_.
+It has the usual parameters
+``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and ``sigma``
+(:math:`\sigma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma) = \frac{A e^{-(\ln(x) - \mu)/ 2\sigma^2}}{x}
+
+
+:class:`DampedOcsillatorModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: DampedOcsillatorModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on the `Damped Harmonic Oscillator Amplitude
+<http://en.wikipedia.org/wiki/Harmonic_oscillator#Amplitude_part>`_.
+It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma) = \frac{A}{\sqrt{ [1 - (x/\mu)^2]^2 + (2\sigma x/\mu)^2}}
+
+
+:class:`ExponentialGaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: ExponentialGaussianModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model of an `Exponentially modified Gaussian distribution
+<http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_.
+It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`), and also ``gamma`` (:math:`\gamma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2}
+    \exp\bigl[\gamma({\mu - x  + \gamma\sigma^2/2})\bigr]
+    {\operatorname{erfc}}\Bigl(\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\Bigr)
+
+
+where :func:`erfc` is the complimentary error function.
+
+:class:`SkewedGaussianModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: SkewedGaussianModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A variation of the above model, this is a `Skewed normal distribution
+<http://en.wikipedia.org/wiki/Skew_normal_distribution>`_.
+It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`) and
+``sigma`` (:math:`\sigma`), and also ``gamma`` (:math:`\gamma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}}
+  e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 +
+      {\operatorname{erf}}\bigl[
+         \frac{\gamma(x-\mu)}{\sigma\sqrt{2}}
+     \bigr] \Bigr\}
+
+
+where :func:`erf` is the error function.
+
+
+:class:`DonaichModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: DonaichModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model of an `Doniach Sunjic asymmetric lineshape
+<http://www.casaxps.com/help_manual/line_shapes.htm>`_, used in
+photo-emission. With the usual parameters ``amplitude`` (:math:`A`),
+``center`` (:math:`\mu`) and ``sigma`` (:math:`\sigma`), and also ``gamma``
+(:math:`\gamma`) in
+
+.. math::
+
+    f(x; A, \mu, \sigma, \gamma) = A\frac{\cos\bigl[\pi\gamma/2 + (1-\gamma)
+    \arctan{(x - \mu)}/\sigma\bigr]} {\bigr[1 + (x-\mu)/\sigma\bigl]^{(1-\gamma)/2}}
+
+
+Linear and Polynomial Models
+------------------------------------
+
+These models correspond to polynomials of some degree.  Of course, lmfit is
+a very inefficient way to do linear regression (see :numpydoc:`polyfit`
+or :scipydoc:`stats.linregress`), but these models may be useful as one
+of many components of composite model.
+
+:class:`ConstantModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: ConstantModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+   a class that consists of a single value, ``c``.  This is constant in the
+   sense of having no dependence on the independent variable ``x``, not in
+   the sense of being non-varying.  To be clear, ``c`` will be a variable
+   Parameter.
+
+:class:`LinearModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: LinearModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+   a class that gives a linear model:
+
+.. math::
+
+    f(x; m, b) = m x + b
+
+with parameters ``slope`` for :math:`m` and  ``intercept`` for :math:`b`.
+
+
+:class:`QuadraticModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: QuadraticModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+
+   a class that gives a quadratic model:
+
+.. math::
+
+    f(x; a, b, c) = a x^2 + b x + c
+
+with parameters ``a``, ``b``, and ``c``.
+
+
+:class:`ParabolicModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: ParabolicModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+   same as :class:`QuadraticModel`.
+
+
+:class:`PolynomialModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+.. class:: PolynomialModel(degree, missing=None[, prefix=''[, name=None[, **kws]]])
+
+   a class that gives a polynomial model up to ``degree`` (with maximum
+   value of 7).
+
+.. math::
+
+    f(x; c_0, c_1, \ldots, c_7) = \sum_{i=0, 7} c_i  x^i
+
+with parameters ``c0``, ``c1``, ..., ``c7``.  The supplied ``degree``
+will specify how many of these are actual variable parameters.  This uses
+:numpydoc:`polyval` for its calculation of the polynomial.
+
+
+
+Step-like models
+-----------------------------------------------
+
+Two models represent step-like functions, and share many characteristics.
+
+:class:`StepModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: StepModel(form='linear'[, missing=None[, prefix=''[, name=None[, **kws]]]])
+
+A model based on a Step function, with four choices for functional form.
+The step function starts with a value 0, and ends with a value of :math:`A`
+(``amplitude``), rising to :math:`A/2` at :math:`\mu` (``center``),
+with :math:`\sigma` (``sigma``) setting the characteristic width. The
+supported functional forms are ``linear`` (the default), ``atan`` or
+``arctan`` for an arc-tangent function,  ``erf`` for an error function, or
+``logistic`` for a `logistic function <http://en.wikipedia.org/wiki/Logistic_function>`_.
+The forms are
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+   & f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}})  & = A \min{[1, \max{(0,  \alpha)}]} \\
+   & f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}})  & = A [1/2 + \arctan{(\alpha)}/{\pi}] \\
+   & f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}})     & = A [1 + {\operatorname{erf}}(\alpha)]/2 \\
+   & f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}})& = A [1 - \frac{1}{1 +  e^{\alpha}} ]
+   \end{eqnarray*}
+
+where :math:`\alpha  = (x - \mu)/{\sigma}`.
+
+:class:`RectangleModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+.. class:: RectangleModel(form='linear'[, missing=None[, prefix=''[, name=None[, **kws]]]])
+
+A model based on a Step-up and Step-down function of the same form.  The
+same choices for functional form as for :class:`StepModel` are supported,
+with ``linear`` as the default.  The function starts with a value 0, and
+ends with a value of :math:`A` (``amplitude``), rising to :math:`A/2` at
+:math:`\mu_1` (``center1``), with :math:`\sigma_1` (``sigma1``) setting the
+characteristic width.  It drops to rising to :math:`A/2` at :math:`\mu_2`
+(``center2``), with characteristic width :math:`\sigma_2` (``sigma2``).
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+   &f(x; A, \mu, \sigma, {\mathrm{form={}'linear{}'}})   &= A \{ \min{[1, \max{(0, \alpha_1)}]} + \min{[-1, \max{(0,  \alpha_2)}]} \} \\
+   &f(x; A, \mu, \sigma, {\mathrm{form={}'arctan{}'}})   &= A [\arctan{(\alpha_1)} + \arctan{(\alpha_2)}]/{\pi} \\
+   &f(x; A, \mu, \sigma, {\mathrm{form={}'erf{}'}})      &= A [{\operatorname{erf}}(\alpha_1) + {\operatorname{erf}}(\alpha_2)]/2 \\
+   &f(x; A, \mu, \sigma, {\mathrm{form={}'logistic{}'}}) &= A [1 - \frac{1}{1 + e^{\alpha_1}} - \frac{1}{1 +  e^{\alpha_2}} ]
+   \end{eqnarray*}
+
+
+where :math:`\alpha_1  = (x - \mu_1)/{\sigma_1}` and :math:`\alpha_2  = -(x - \mu_2)/{\sigma_2}`.
+
+
+Exponential and Power law models
+-----------------------------------------------
+
+:class:`ExponentialModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: ExponentialModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on an `exponential decay function
+<http://en.wikipedia.org/wiki/Exponential_decay>`_. With parameters named
+``amplitude`` (:math:`A`), and ``decay`` (:math:`\tau`), this has the form:
+
+.. math::
+
+   f(x; A, \tau) = A e^{-x/\tau}
+
+
+:class:`PowerLawModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: PowerLawModel(missing=None[, prefix=''[, name=None[, **kws]]])
+
+A model based on a `Power Law <http://en.wikipedia.org/wiki/Power_law>`_.
+With parameters
+named ``amplitude`` (:math:`A`), and ``exponent`` (:math:`k`), this has the
+form:
+
+.. math::
+
+   f(x; A, k) = A x^k
+
+
+User-defined Models
+----------------------------
+
+.. _asteval: http://newville.github.io/asteval/
+
+As shown in the previous chapter (:ref:`model_chapter`), it is fairly
+straightforward to build fitting models from parametrized python functions.
+The number of model classes listed so far in the present chapter should
+make it clear that this process is not too difficult.  Still, it is
+sometimes desirable to build models from a user-supplied function.  This
+may be especially true if model-building is built-in to some larger library
+or application for fitting in which the user may not be able to easily
+build and use a new model from python code.
+
+
+The :class:`ExpressionModel` allows a model to be built from a
+user-supplied expression.  This uses the `asteval`_ module also used for
+mathematical constraints as discussed in :ref:`constraints_chapter`.
+
+
+:class:`ExpressionModel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. class:: ExpressionModel(expr, independent_vars=None, init_script=None, **kws)
+
+    A model using the user-supplied mathematical expression, which can be nearly any valid Python expresion.
+
+    :param expr: expression use to build model
+    :type expr: string
+    :param independent_vars: list of argument names in expression that are independent variables.
+    :type independent_vars: ``None`` (default) or list of strings for independent variables.
+    :param init_script: python script to run before parsing and evaluating expression.
+    :type init_script: ``None`` (default) or string
+
+with other parameters passed to :class:`model.Model`, with the notable
+exception that :class:`ExpressionModel` does **not** support the `prefix` argument.
+
+Since the point of this model is that an arbitrary expression will be
+supplied, the determination of what are the parameter names for the model
+happens when the model is created.  To do this, the expression is parsed,
+and all symbol names are found.  Names that are already known (there are
+over 500 function and value names in the asteval namespace, including most
+python builtins, more than 200 functions inherited from numpy, and more
+than 20 common lineshapes defined in the :mod:`lineshapes` module) are not
+converted to parameters.  Unrecognized name are expected to be names either
+of parameters or independent variables.  If `independent_vars` is the
+default value of ``None``, and if the expression contains a variable named
+`x`, that will be used as the independent variable.  Otherwise,
+`independent_vars` must be given.
+
+For example, if one creates an :class:`ExpressionModel` as::
+
+    >>> mod = ExpressionModel('off + amp * exp(-x/x0) * sin(x*phase)')
+
+The name `exp` will be recognized as the exponent function, so the model
+will be interpreted to have parameters named `off`, `amp`, `x0` and
+`phase`. In addition, `x` will be assumed to be the sole independent variable.
+In general, there is no obvious way to set default parameter values or
+parameter hints for bounds, so this will have to be handled explicitly.
+
+To evaluate this model, you might do the following::
+
+    >>> x = numpy.linspace(0, 10, 501)
+    >>> params = mod.make_params(off=0.25, amp=1.0, x0=2.0, phase=0.04)
+    >>> y = mod.eval(params, x=x)
+
+
+While many custom models can be built with a single line expression
+(especially since the names of the lineshapes like `gaussian`, `lorentzian`
+and so on, as well as many numpy functions, are available), more complex
+models will inevitably require multiple line functions.  You can include
+such Python code with the `init_script` argument.  The text of this script
+is evaluated when the model is initialized (and before the actual
+expression is parsed), so that you can define functions to be used
+in your expression.
+
+As a probably unphysical example, to make a model that is the derivative of
+a Gaussian function times the logarithm of a Lorentzian function you may
+could to define this in a script::
+
+    >>> script = """
+    def mycurve(x, amp, cen, sig):
+        loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
+        gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
+        return log(loren)*gradient(gauss)/gradient(x)
+    """
+
+and then use this with :class:`ExpressionModel` as::
+
+    >>> mod = ExpressionModel('mycurve(x, height, mid, wid)',
+                              init_script=script,
+                              independent_vars=['x'])
+
+As above, this will interpret the parameter names to be `height`, `mid`,
+and `wid`, and build a model that can be used to fit data.
+
+
+
+Example 1: Fit Peaked data to Gaussian, Lorentzian, and  Voigt profiles
+------------------------------------------------------------------------
+
+Here, we will fit data to three similar line shapes, in order to decide which
+might be the better model.  We will start with a Gaussian profile, as in
+the previous chapter, but use the built-in :class:`GaussianModel` instead
+of writing one ourselves.  This is a slightly different version rom the
+one in previous example in that the parameter names are different, and have
+built-in default values.  We'll simply use::
+
+     from numpy import loadtxt
+     from lmfit.models import GaussianModel
+
+     data = loadtxt('test_peak.dat')
+     x = data[:, 0]
+     y = data[:, 1]
+
+     mod = GaussianModel()
+
+     pars = mod.guess(y, x=x)
+     out  = mod.fit(y, pars, x=x)
+     print(out.fit_report(min_correl=0.25))
+
+
+which prints out the results::
+
+    [[Model]]
+        gaussian
+    [[Fit Statistics]]
+        # function evals   = 23
+        # data points      = 401
+        # variables        = 3
+        chi-square         = 29.994
+        reduced chi-square = 0.075
+        Akaike info crit   = -1030.763
+        Bayesian info crit = -1018.781
+    [[Variables]]
+        sigma:       1.23218319 +/- 0.007374 (0.60%) (init= 1.35)
+        fwhm:        2.90156963 +/- 0.017366 (0.60%)  == '2.3548200*sigma'
+        height:      9.81457973 +/- 0.050872 (0.52%)  == '0.3989423*amplitude/sigma'
+        center:      9.24277049 +/- 0.007374 (0.08%) (init= 9.25)
+        amplitude:   30.3135571 +/- 0.157126 (0.52%) (init= 29.08159)
+    [[Correlations]] (unreported correlations are <  0.250)
+        C(sigma, amplitude)          =  0.577
+
+We see a few interesting differences from the results of the previous
+chapter. First, the parameter names are longer. Second, there are ``fwhm``
+and ``height`` parameters, to give the full width at half maximum and
+maximum peak height.  And third, the automated initial guesses are pretty
+good. A plot of the fit:
+
+.. _figA1:
+
+  .. image::  _images/models_peak1.png
+     :target: _images/models_peak1.png
+     :width: 48 %
+  .. image::  _images/models_peak2.png
+     :target: _images/models_peak2.png
+     :width: 48 %
+
+  Fit to peak with Gaussian (left) and Lorentzian (right) models.
+
+shows a decent match to the data -- the fit worked with no explicit setting
+of initial parameter values.  Looking more closing, the fit is not perfect,
+especially in the tails of the peak, suggesting that a different peak
+shape, with longer tails, should be used.  Perhaps a Lorentzian would be
+better?  To do this, we simply replace ``GaussianModel`` with
+``LorentzianModel`` to get a :class:`LorentzianModel`::
+
+    from lmfit.models import LorentzianModel
+    mod = LorentzianModel()
+
+with the rest of the script as above.  Perhaps predictably, the first thing
+we try gives results that are worse::
+
+    [[Model]]
+        Model(lorentzian)
+    [[Fit Statistics]]
+        # function evals   = 27
+        # data points      = 401
+        # variables        = 3
+        chi-square         = 53.754
+        reduced chi-square = 0.135
+        Akaike info crit   = -796.819
+        Bayesian info crit = -784.837
+    [[Variables]]
+        sigma:       1.15484517 +/- 0.013156 (1.14%) (init= 1.35)
+        fwhm:        2.30969034 +/- 0.026312 (1.14%)  == '2.0000000*sigma'
+        height:      10.7420881 +/- 0.086336 (0.80%)  ==
+        '0.3183099*amplitude/sigm    a'
+        center:      9.24438944 +/- 0.009275 (0.10%) (init= 9.25)
+        amplitude:   38.9728645 +/- 0.313857 (0.81%) (init= 36.35199)
+    [[Correlations]] (unreported correlations are <  0.250)
+        C(sigma, amplitude)          =  0.709
+
+
+with the plot shown on the right in the figure above.  The tails are now
+too big, and the value for :math:`\chi^2` almost doubled.  A Voigt model
+does a better job.  Using :class:`VoigtModel`, this is as simple as using::
+
+    from lmfit.models import VoigtModel
+    mod = VoigtModel()
+
+with all the rest of the script as above.  This gives::
+
+    [[Model]]
+        Model(voigt)
+    [[Fit Statistics]]
+        # function evals   = 19
+        # data points      = 401
+        # variables        = 3
+        chi-square         = 14.545
+        reduced chi-square = 0.037
+        Akaike info crit   = -1320.995
+        Bayesian info crit = -1309.013
+    [[Variables]]
+        sigma:       0.73015574 +/- 0.003684 (0.50%) (init= 0.8775)
+        gamma:       0.73015574 +/- 0.003684 (0.50%)  == 'sigma'
+        fwhm:        2.62951718 +/- 0.013269 (0.50%)  == '3.6013100*sigma'
+        height:      19.5360268 +/- 0.075691 (0.39%)  ==  '0.3989423*amplitude/sigm    a'
+        center:      9.24411142 +/- 0.005054 (0.05%) (init= 9.25)
+        amplitude:   35.7554017 +/- 0.138614 (0.39%) (init= 43.62238)
+    [[Correlations]] (unreported correlations are <  0.250)
+        C(sigma, amplitude)          =  0.651
+
+
+which has a much better value for :math:`\chi^2` and an obviously better
+match to the data as seen in the figure below (left).
+
+.. _figA2:
+
+  .. image::  _images/models_peak3.png
+     :target: _images/models_peak3.png
+     :width: 48 %
+  .. image::  _images/models_peak4.png
+     :target: _images/models_peak4.png
+     :width: 48 %
+
+  Fit to peak with Voigt model (left) and Voigt model with ``gamma``
+  varying independently of ``sigma`` (right).
+
+Can we do better? The Voigt function has a :math:`\gamma` parameter
+(``gamma``) that can be distinct from ``sigma``.  The default behavior used
+above constrains ``gamma`` to have exactly the same value as ``sigma``.  If
+we allow these to vary separately, does the fit improve?  To do this, we
+have to change the ``gamma`` parameter from a constrained expression and
+give it a starting value using something like::
+
+   mod = VoigtModel()
+   pars = mod.guess(y, x=x)
+   pars['gamma'].set(value=0.7, vary=True, expr='')
+
+
+which gives::
+
+    [[Model]]
+        Model(voigt)
+    [[Fit Statistics]]
+        # function evals   = 23
+        # data points      = 401
+        # variables        = 4
+        chi-square         = 10.930
+        reduced chi-square = 0.028
+        Akaike info crit   = -1432.556
+        Bayesian info crit = -1416.580
+    [[Variables]]
+        sigma:       0.89518950 +/- 0.014154 (1.58%) (init= 0.8775)
+        gamma:       0.52540156 +/- 0.018579 (3.54%) (init= 0.7)
+        fwhm:        3.22385492 +/- 0.050974 (1.58%)  == '3.6013100*sigma'
+        height:      15.2374711 +/- 0.299235 (1.96%)  ==
+    '0.3989423*amplitude/sigm    a'
+        center:      9.24374845 +/- 0.004419 (0.05%) (init= 9.25)
+        amplitude:   34.1914716 +/- 0.179468 (0.52%) (init= 43.62238)
+    [[Correlations]] (unreported correlations are <  0.250)
+        C(sigma, gamma)              = -0.928
+        C(gamma, amplitude)          =  0.821
+        C(sigma, amplitude)          = -0.651
+
+and the fit shown on the right above.
+
+Comparing the two fits with the Voigt function, we see that :math:`\chi^2`
+is definitely improved with a separately varying ``gamma`` parameter.  In
+addition, the two values for ``gamma`` and ``sigma`` differ significantly
+-- well outside the estimated uncertainties.  More compelling, reduced
+:math:`\chi^2` is improved even though a fourth variable has been added to
+the fit.  In the simplest statistical sense, this suggests that ``gamma``
+is a significant variable in the model.  In addition, we can use both the
+Akaike or Bayesian Information Criteria (see
+:ref:`information_criteria_label`) to assess how likely the model with
+variable ``gamma`` is to explain the data than the model with ``gamma``
+fixed to the value of ``sigma``.  According to theory,
+:math:`\exp(-(\rm{AIC1}-\rm{AIC0})/2)` gives the probably that a model with
+AIC` is more likely than a model with AIC0.  For the two models here, with
+AIC values of -1432 and -1321 (Note: if we had more carefully set the value
+for ``weights`` based on the noise in the data, these values might be
+positive, but there difference would be roughly the same), this says that
+the model with ``gamma`` fixed to ``sigma`` has a probably less than 1.e-25
+of being the better model.
+
+
+Example 2: Fit data to a Composite Model with pre-defined models
+------------------------------------------------------------------
+
+Here, we repeat the point made at the end of the last chapter that
+instances of :class:`model.Model` class can be added together to make a
+*composite model*.  By using the large number of built-in models available,
+it is therefore very simple to build models that contain multiple peaks and
+various backgrounds.  An example of a simple fit to a noisy step function
+plus a constant:
+
+.. literalinclude:: ../examples/doc_stepmodel.py
+
+After constructing step-like data, we first create a :class:`StepModel`
+telling it to use the ``erf`` form (see details above), and a
+:class:`ConstantModel`.  We set initial values, in one case using the data
+and :meth:`guess` method for the initial step function paramaters, and
+:meth:`make_params` arguments for the linear component.
+After making a composite model, we run :meth:`fit` and report the
+results, which gives::
+
+    [[Model]]
+    [[Model]]
+        (Model(step, prefix='step_', form='erf') + Model(linear, prefix='line_'))
+    [[Fit Statistics]]
+        # function evals   = 51
+        # data points      = 201
+        # variables        = 5
+        chi-square         = 648.584
+        reduced chi-square = 3.309
+        Akaike info crit   = 250.532
+        Bayesian info crit = 267.048
+    [[Variables]]
+        line_slope:       2.06986083 +/- 0.097005 (4.69%) (init= 0)
+        line_intercept:   11.7526825 +/- 0.288725 (2.46%) (init= 10.7017)
+        step_center:      3.12329688 +/- 0.005441 (0.17%) (init= 2.5)
+        step_sigma:       0.67050317 +/- 0.011480 (1.71%) (init= 1.428571)
+        step_amplitude:   111.673928 +/- 0.681024 (0.61%) (init= 134.6809)
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(line_slope, step_amplitude)  = -0.878
+        C(step_sigma, step_amplitude)  =  0.563
+        C(line_slope, step_sigma)    = -0.455
+        C(line_intercept, step_center)  =  0.426
+        C(line_slope, line_intercept)  = -0.307
+        C(line_slope, step_center)   = -0.234
+        C(line_intercept, step_sigma)  = -0.139
+        C(line_intercept, step_amplitude)  = -0.122
+        C(step_center, step_amplitude)  =  0.108
+
+with a plot of
+
+.. image::  _images/models_stepfit.png
+   :target: _images/models_stepfit.png
+   :width: 50 %
+
+
+Example 3: Fitting Multiple Peaks -- and using Prefixes
+------------------------------------------------------------------
+
+.. _NIST StRD: http://itl.nist.gov/div898/strd/nls/nls_main.shtml
+
+As shown above, many of the models have similar parameter names.  For
+composite models, this could lead to a problem of having parameters for
+different parts of the model having the same name.  To overcome this, each
+:class:`model.Model` can have a ``prefix`` attribute (normally set to a blank
+string) that will be put at the beginning of each parameter name.  To
+illustrate, we fit one of the classic datasets from the `NIST StRD`_ suite
+involving a decaying exponential and two gaussians.
+
+.. literalinclude:: ../examples/doc_nistgauss.py
+
+where we give a separate prefix to each model (they all have an
+``amplitude`` parameter).  The ``prefix`` values are attached transparently
+to the models.
+
+Note that the calls to :meth:`make_param` used the bare name, without the
+prefix.  We could have used the prefixes, but because we used the
+individual model ``gauss1`` and ``gauss2``, there was no need.
+
+Note also in the example here that we explicitly set bounds on many of the
+parameter values.
+
+The fit results printed out are::
+
+    [[Model]]
+        ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) + Model(exponential, prefix='exp_'))
+    [[Fit Statistics]]
+        # function evals   = 66
+        # data points      = 250
+        # variables        = 8
+        chi-square         = 1247.528
+        reduced chi-square = 5.155
+        Akaike info crit   = 425.995
+        Bayesian info crit = 454.167
+    [[Variables]]
+        exp_amplitude:   99.0183282 +/- 0.537487 (0.54%) (init= 162.2102)
+        exp_decay:       90.9508861 +/- 1.103105 (1.21%) (init= 93.24905)
+        g1_amplitude:    4257.77318 +/- 42.38336 (1.00%) (init= 2000)
+        g1_sigma:        16.6725753 +/- 0.160481 (0.96%) (init= 15)
+        g1_center:       107.030954 +/- 0.150067 (0.14%) (init= 105)
+        g1_fwhm:         39.2609137 +/- 0.377905 (0.96%)  == '2.3548200*g1_sigma'
+        g1_height:       101.880231 +/- 0.592170 (0.58%)  ==
+        '0.3989423*g1_amplitude/g1_    sigma'
+        g2_amplitude:    2493.41770 +/- 36.16947 (1.45%) (init= 2000)
+        g2_sigma:        13.8069484 +/- 0.186794 (1.35%) (init= 15)
+        g2_center:       153.270100 +/- 0.194667 (0.13%) (init= 155)
+        g2_fwhm:         32.5128783 +/- 0.439866 (1.35%)  == '2.3548200*g2_sigma'
+        g2_height:       72.0455934 +/- 0.617220 (0.86%)  ==
+        '0.3989423*g2_amplitude/g2_    sigma'
+    [[Correlations]] (unreported correlations are <  0.500)
+        C(g1_amplitude, g1_sigma)    =  0.824
+        C(g2_amplitude, g2_sigma)    =  0.815
+        C(exp_amplitude, exp_decay)  = -0.695
+        C(g1_sigma, g2_center)       =  0.684
+        C(g1_center, g2_amplitude)   = -0.669
+        C(g1_center, g2_sigma)       = -0.652
+        C(g1_amplitude, g2_center)   =  0.648
+        C(g1_center, g2_center)      =  0.621
+        C(g1_sigma, g1_center)       =  0.507
+        C(exp_decay, g1_amplitude)   = -0.507
+
+
+We get a very good fit to this problem (described at the NIST site as of
+average difficulty, but the tests there are generally deliberately challenging) by
+applying reasonable initial guesses and putting modest but explicit bounds
+on the parameter values.  This fit is shown on the left:
+
+.. _figA3:
+
+  .. image::  _images/models_nistgauss.png
+     :target: _images/models_nistgauss.png
+     :width: 48 %
+  .. image::  _images/models_nistgauss2.png
+     :target: _images/models_nistgauss2.png
+     :width: 48 %
+
+
+One final point on setting initial values.  From looking at the data
+itself, we can see the two Gaussian peaks are reasonably well separated but
+do overlap. Furthermore, we can tell that the initial guess for the
+decaying exponential component was poorly estimated because we used the
+full data range.  We can simplify the initial parameter values by using
+this, and by defining an :func:`index_of` function to limit the data range.
+That is, with::
+
+    def index_of(arrval, value):
+        "return index of array *at or below* value "
+        if value < min(arrval):  return 0
+        return max(np.where(arrval<=value)[0])
+
+    ix1 = index_of(x,  75)
+    ix2 = index_of(x, 135)
+    ix3 = index_of(x, 175)
+
+    exp_mod.guess(y[:ix1], x=x[:ix1])
+    gauss1.guess(y[ix1:ix2], x=x[ix1:ix2])
+    gauss2.guess(y[ix2:ix3], x=x[ix2:ix3])
+
+we can get a better initial estimate.  The fit converges to the same answer,
+giving to identical values (to the precision printed out in the report),
+but in few steps, and without any bounds on parameters at all::
+
+    [[Model]]
+        ((Model(gaussian, prefix='g1_') + Model(gaussian, prefix='g2_')) +  Model(exponential,  prefix='exp_'))
+    [[Fit Statistics]]
+        # function evals   = 48
+        # data points      = 250
+        # variables        = 8
+        chi-square         = 1247.528
+        reduced chi-square = 5.155
+        Akaike info crit   = 425.995
+        Bayesian info crit = 454.167
+    [[Variables]]
+        exp_amplitude:   99.0183281 +/- 0.537487 (0.54%) (init= 94.53724)
+        exp_decay:       90.9508862 +/- 1.103105 (1.21%) (init= 111.1985)
+        g1_amplitude:    4257.77322 +/- 42.38338 (1.00%) (init= 2126.432)
+        g1_sigma:        16.6725754 +/- 0.160481 (0.96%) (init= 14.5)
+        g1_center:       107.030954 +/- 0.150067 (0.14%) (init= 106.5)
+        g1_fwhm:         39.2609141 +/- 0.377905 (0.96%)  == '2.3548200*g1_sigma'
+        g1_height:       101.880231 +/- 0.592171 (0.58%)  ==
+        '0.3989423*g1_amplitude/g1_    sigma'
+        g2_amplitude:    2493.41766 +/- 36.16947 (1.45%) (init= 1878.892)
+        g2_sigma:        13.8069481 +/- 0.186794 (1.35%) (init= 15)
+        g2_center:       153.270100 +/- 0.194667 (0.13%) (init= 150)
+        g2_fwhm:         32.5128777 +/- 0.439866 (1.35%)  == '2.3548200*g2_sigma'
+        g2_height:       72.0455935 +/- 0.617221 (0.86%)  ==
+        '0.3989423*g2_amplitude/g2_    sigma'
+    [[Correlations]] (unreported correlations are <  0.500)
+        C(g1_amplitude, g1_sigma)    =  0.824
+        C(g2_amplitude, g2_sigma)    =  0.815
+        C(exp_amplitude, exp_decay)  = -0.695
+        C(g1_sigma, g2_center)       =  0.684
+        C(g1_center, g2_amplitude)   = -0.669
+        C(g1_center, g2_sigma)       = -0.652
+        C(g1_amplitude, g2_center)   =  0.648
+        C(g1_center, g2_center)      =  0.621
+        C(g1_sigma, g1_center)       =  0.507
+        C(exp_decay, g1_amplitude)   = -0.507
+
+
+This script is in the file ``doc_nistgauss2.py`` in the examples folder,
+and the fit result shown on the right above shows an improved initial
+estimate of the data.
diff --git a/doc/conf.py b/doc/conf.py
index e452697..490a01d 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -1,176 +1,187 @@
-# -*- coding: utf-8 -*-
-#
-# lmfit documentation build configuration file
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
-sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
-sys.path.append(os.path.abspath(os.path.join('.')))
-# -- General configuration -----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-from extensions import extensions
-
-try:
-    import IPython.sphinxext.ipython_directive
-    extensions.extend(['IPython.sphinxext.ipython_directive',
-                       'IPython.sphinxext.ipython_console_highlighting'])
-except ImportError:
-    pass
-
-
-intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
-                       'numpy': ('http://docs.scipy.org/doc/numpy/', None),
-                       'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
-                       }
-
-intersphinx_cache_limit = 10
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'lmfit'
-copyright = u'2014, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-try:
-    import lmfit
-    release = lmfit.__version__
-# The full version, including alpha/beta/rc tags.
-except ImportError:
-    release = 'latest'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = ['_build']
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-add_module_names = False
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-html_theme_path = ['sphinx/theme']
-html_theme = 'lmfitdoc'
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-html_short_title = 'Minimization and Curve-Fitting for Python'
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
-
-html_domain_indices = False
-html_use_index = True
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'lmfitdoc'
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-  ('index', 'lmfit.tex',
-   'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
-   'Matthew Newville, Till Stensitzki, and others', 'manual'),
-]
-
+# -*- coding: utf-8 -*-
+#
+# lmfit documentation build configuration file
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.append(os.path.abspath(os.path.join('..', 'lmfit')))
+sys.path.append(os.path.abspath(os.path.join('.', 'sphinx')))
+sys.path.append(os.path.abspath(os.path.join('.')))
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+from extensions import extensions
+
+extensions = [
+    'sphinx.ext.extlinks',
+    'sphinx.ext.autodoc',
+    'sphinx.ext.napoleon',
+    'sphinx.ext.mathjax',
+    ]
+
+try:
+    import IPython.sphinxext.ipython_directive
+    extensions.extend(['IPython.sphinxext.ipython_directive',
+                       'IPython.sphinxext.ipython_console_highlighting'])
+except ImportError:
+    pass
+
+intersphinx_mapping = {'py': ('http://docs.python.org/2', None),
+                       'numpy': ('http://docs.scipy.org/doc/numpy/', None),
+                       'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
+                       }
+
+## intersphinx_cache_limit = 10
+
+extlinks = {
+    'scipydoc' : ('http://docs.scipy.org/doc/scipy/reference/generated/%s.html', ''),
+    'numpydoc' : ('http://docs.scipy.org/doc/numpy/reference/generated/numpy.%s.html', ''),
+    }
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'lmfit'
+copyright = u'2014, Matthew Newville, The University of Chicago,  Till Stensitzki, Freie Universitat Berlin'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+sys.path.insert(0, os.path.abspath('../'))
+try:
+    import lmfit
+    release = lmfit.__version__
+# The full version, including alpha/beta/rc tags.
+except ImportError:
+    release = 'latest'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = False
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+html_theme_path = ['sphinx/theme']
+html_theme = 'lmfitdoc'
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+html_title = 'Non-Linear Least-Squares Minimization and Curve-Fitting for Python'
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+html_short_title = 'Minimization and Curve-Fitting for Python'
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+html_sidebars = {'index': ['indexsidebar.html','searchbox.html']}
+
+html_domain_indices = False
+html_use_index = True
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'lmfitdoc'
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'lmfit.tex',
+   'Non-Linear Least-Squares Minimization and Curve-Fitting for Python',
+   'Matthew Newville, Till Stensitzki, and others', 'manual'),
+]
diff --git a/doc/confidence.rst b/doc/confidence.rst
index c678dfb..67c4f97 100644
--- a/doc/confidence.rst
+++ b/doc/confidence.rst
@@ -1,177 +1,193 @@
-.. _confidence_chapter:
-
-Calculation of confidence intervals
-====================================
-
-.. module:: confidence
-
-The lmfit :mod:`confidence` module allows you to explicitly calculate
-confidence intervals for variable parameters.  For most models, it is not
-necessary: the estimation of the standard error from the estimated
-covariance matrix is normally quite good.
-
-But for some models, e.g. a sum of two exponentials, the approximation
-begins to fail. For this case, lmfit has the function :func:`conf_interval`
-to calculate confidence intervals directly.  This is substantially slower
-than using the errors estimated from the covariance matrix, but the results
-are more robust.
-
-
-Method used for calculating confidence intervals
--------------------------------------------------
-
-The F-test is used to compare our null model, which is the best fit we have
-found, with an alternate model, where one of the parameters is fixed to a
-specific value. The value is changed until the difference between :math:`\chi^2_0`
-and :math:`\chi^2_{f}` can't be explained by the loss of a degree of freedom
-within a certain confidence.
-
-.. math::
-
- F(P_{fix},N-P) = \left(\frac{\chi^2_f}{\chi^2_{0}}-1\right)\frac{N-P}{P_{fix}}
-
-N is the number of data-points, P the number of parameter of the null model.
-:math:`P_{fix}` is the number of fixed parameters (or to be more clear, the
-difference of number of parameters between our null model and the alternate
-model).
-
-Adding a log-likelihood method is under consideration.
-
-A basic example
----------------
-
-First we create an example problem::
-
-    >>> import lmfit
-    >>> import numpy as np
-    >>> x = np.linspace(0.3,10,100)
-    >>> y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
-    >>> pars = lmfit.Parameters()
-    >>> pars.add_many(('a', 0.1), ('b', 1))
-    >>> def residual(p):
-    ...    a = p['a'].value
-    ...    b = p['b'].value
-    ...    return 1/(a*x)+b-y
-
-
-before we can generate the confidence intervals, we have to run a fit, so
-that the automated estimate of the standard errors can be used as a
-starting point::
-
-
-    >>> mini = lmfit.Minimizer(residual, pars)
-    >>> result = mini.minimize()
-    >>> print(lmfit.fit_report(result.params))
-    [Variables]]
-        a:   0.09943895 +/- 0.000193 (0.19%) (init= 0.1)
-        b:   1.98476945 +/- 0.012226 (0.62%) (init= 1)
-    [[Correlations]] (unreported correlations are <  0.100)
-        C(a, b)                      =  0.601
-
-Now it is just a simple function call to calculate the confidence
-intervals::
-
-    >>> ci = lmfit.conf_interval(mini, result)
-    >>> lmfit.printfuncs.report_ci(ci)
-         99.70%    95.00%    67.40%     0.00%    67.40%    95.00%    99.70%
-    a   0.09886   0.09905   0.09925   0.09944   0.09963   0.09982   0.10003
-    b   1.94751   1.96049   1.97274   1.97741   1.99680   2.00905   2.02203
-
-This shows the best-fit values for the parameters in the `0.00%` column,
-and parameter values that are at the varying confidence levels given by
-steps in :math:`\sigma`.  As we can see, the estimated error is almost the
-same, and the uncertainties are well behaved: Going from 1 :math:`\sigma`
-(68% confidence) to 3 :math:`\sigma` (99.7% confidence) uncertainties is
-fairly linear.  It can also be seen that the errors are fairy symmetric
-around the best fit value.  For this problem, it is not necessary to
-calculate confidence intervals, and the estimates of the uncertainties from
-the covariance matrix are sufficient.
-
-An advanced example
--------------------
-
-Now we look at a problem where calculating the error from approximated
-covariance can lead to misleading result -- two decaying exponentials.  In
-fact such a problem is particularly hard for the Levenberg-Marquardt
-method, so we fitst estimate the results using the slower but robust
-Nelder-Mead  method, and *then* use Levenberg-Marquardt to estimate the
-uncertainties and correlations
-
-
-.. literalinclude:: ../examples/doc_confidence2.py
-
-which will report::
-
-    [[Variables]]
-        a1:   2.98622120 +/- 0.148671 (4.98%) (init= 2.986237)
-        a2:  -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256)
-        t1:   1.30994233 +/- 0.131211 (10.02%) (init= 1.309932)
-        t2:   11.8240350 +/- 0.463164 (3.92%) (init= 11.82408)
-    [[Correlations]] (unreported correlations are <  0.500)
-        C(a2, t2)                    =  0.987
-        C(a2, t1)                    = -0.925
-        C(t1, t2)                    = -0.881
-        C(a1, t1)                    = -0.599
-          95.00%    68.00%     0.00%    68.00%    95.00%
-    a1   2.71850   2.84525   2.98622   3.14874   3.34076
-    a2  -4.63180  -4.46663  -4.33526  -4.22883  -4.14178
-    t2  10.82699  11.33865  11.82404  12.28195  12.71094
-    t1   1.08014   1.18566   1.30994   1.45566   1.62579
-
-
-Again we called :func:`conf_interval`, this time with tracing and only for
-1- and 2 :math:`\sigma`.  Comparing these two different estimates, we see
-that the estimate for `a1` is reasonably well approximated from the
-covariance matrix, but the estimates for `a2` and especially for `t1`, and
-`t2` are very asymmetric and that going from 1 :math:`\sigma` (68%
-confidence) to 2 :math:`\sigma` (95% confidence) is not very predictable.
-
-Let plots mad of the confidence region are shown the figure on the left
-below for ``a1`` and ``t2``, and for ``a2`` and ``t2`` on the right:
-
-.. _figC1:
-
-  .. image:: _images/conf_interval1.png
-     :target: _images/conf_interval1.png
-     :width: 48%
-  .. image:: _images/conf_interval1a.png
-     :target: _images/conf_interval1a.png
-     :width: 48%
-
-Neither of these plots is very much like an ellipse, which is implicitly
-assumed by the approach using the covariance matrix.
-
-The trace returned as the optional second argument from
-:func:`conf_interval` contains a dictionary for each variable parameter.
-The values are dictionaries with arrays of values for each variable, and an
-array of corresponding probabilities for the corresponding cumulative
-variables.  This can be used to show the dependence between two
-parameters::
-
-    >>> x, y, prob = trace['a1']['a1'], trace['a1']['t2'],trace['a1']['prob']
-    >>> x2, y2, prob2 = trace['t2']['t2'], trace['t2']['a1'],trace['t2']['prob']
-    >>> plt.scatter(x, y, c=prob ,s=30)
-    >>> plt.scatter(x2, y2, c=prob2, s=30)
-    >>> plt.gca().set_xlim((1, 5))
-    >>> plt.gca().set_ylim((5, 15))
-    >>> plt.xlabel('a1')
-    >>> plt.ylabel('t2')
-    >>> plt.show()
-
-
-which shows the trace of values:
-
-.. image:: _images/conf_interval2.png
-   :target: _images/conf_interval2.png
-   :width: 50%
-
-
-
-Confidence Interval Functions
-----------------------------------
-
-.. autofunction:: lmfit.conf_interval
-
-.. autofunction:: lmfit.conf_interval2d
-
-.. autofunction:: lmfit.ci_report
+.. _confidence_chapter:
+
+Calculation of confidence intervals
+====================================
+
+.. module:: confidence
+
+The lmfit :mod:`confidence` module allows you to explicitly calculate
+confidence intervals for variable parameters.  For most models, it is not
+necessary: the estimation of the standard error from the estimated
+covariance matrix is normally quite good.
+
+But for some models, e.g. a sum of two exponentials, the approximation
+begins to fail. For this case, lmfit has the function :func:`conf_interval`
+to calculate confidence intervals directly.  This is substantially slower
+than using the errors estimated from the covariance matrix, but the results
+are more robust.
+
+
+Method used for calculating confidence intervals
+-------------------------------------------------
+
+The F-test is used to compare our null model, which is the best fit we have
+found, with an alternate model, where one of the parameters is fixed to a
+specific value. The value is changed until the difference between :math:`\chi^2_0`
+and :math:`\chi^2_{f}` can't be explained by the loss of a degree of freedom
+within a certain confidence.
+
+.. math::
+
+ F(P_{fix},N-P) = \left(\frac{\chi^2_f}{\chi^2_{0}}-1\right)\frac{N-P}{P_{fix}}
+
+N is the number of data-points, P the number of parameter of the null model.
+:math:`P_{fix}` is the number of fixed parameters (or to be more clear, the
+difference of number of parameters between our null model and the alternate
+model).
+
+Adding a log-likelihood method is under consideration.
+
+A basic example
+---------------
+
+First we create an example problem::
+
+    >>> import lmfit
+    >>> import numpy as np
+    >>> x = np.linspace(0.3,10,100)
+    >>> y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
+    >>> pars = lmfit.Parameters()
+    >>> pars.add_many(('a', 0.1), ('b', 1))
+    >>> def residual(p):
+    ...    a = p['a'].value
+    ...    b = p['b'].value
+    ...    return 1/(a*x)+b-y
+
+
+before we can generate the confidence intervals, we have to run a fit, so
+that the automated estimate of the standard errors can be used as a
+starting point::
+
+
+    >>> mini = lmfit.Minimizer(residual, pars)
+    >>> result = mini.minimize()
+    >>> print(lmfit.fit_report(result.params))
+    [Variables]]
+        a:   0.09943895 +/- 0.000193 (0.19%) (init= 0.1)
+        b:   1.98476945 +/- 0.012226 (0.62%) (init= 1)
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(a, b)                      =  0.601
+
+Now it is just a simple function call to calculate the confidence
+intervals::
+
+    >>> ci = lmfit.conf_interval(mini, result)
+    >>> lmfit.printfuncs.report_ci(ci)
+         99.70%    95.00%    67.40%     0.00%    67.40%    95.00%    99.70%
+    a   0.09886   0.09905   0.09925   0.09944   0.09963   0.09982   0.10003
+    b   1.94751   1.96049   1.97274   1.97741   1.99680   2.00905   2.02203
+
+This shows the best-fit values for the parameters in the `0.00%` column,
+and parameter values that are at the varying confidence levels given by
+steps in :math:`\sigma`.  As we can see, the estimated error is almost the
+same, and the uncertainties are well behaved: Going from 1 :math:`\sigma`
+(68% confidence) to 3 :math:`\sigma` (99.7% confidence) uncertainties is
+fairly linear.  It can also be seen that the errors are fairy symmetric
+around the best fit value.  For this problem, it is not necessary to
+calculate confidence intervals, and the estimates of the uncertainties from
+the covariance matrix are sufficient.
+
+An advanced example
+-------------------
+
+Now we look at a problem where calculating the error from approximated
+covariance can lead to misleading result -- two decaying exponentials.  In
+fact such a problem is particularly hard for the Levenberg-Marquardt
+method, so we first estimate the results using the slower but robust
+Nelder-Mead  method, and *then* use Levenberg-Marquardt to estimate the
+uncertainties and correlations
+
+
+.. literalinclude:: ../examples/doc_confidence2.py
+
+which will report::
+
+    [[Variables]]
+        a1:   2.98622120 +/- 0.148671 (4.98%) (init= 2.986237)
+        a2:  -4.33526327 +/- 0.115275 (2.66%) (init=-4.335256)
+        t1:   1.30994233 +/- 0.131211 (10.02%) (init= 1.309932)
+        t2:   11.8240350 +/- 0.463164 (3.92%) (init= 11.82408)
+    [[Correlations]] (unreported correlations are <  0.500)
+        C(a2, t2)                    =  0.987
+        C(a2, t1)                    = -0.925
+        C(t1, t2)                    = -0.881
+        C(a1, t1)                    = -0.599
+          95.00%    68.00%     0.00%    68.00%    95.00%
+    a1   2.71850   2.84525   2.98622   3.14874   3.34076
+    a2  -4.63180  -4.46663  -4.33526  -4.22883  -4.14178
+    t2  10.82699  11.33865  11.82404  12.28195  12.71094
+    t1   1.08014   1.18566   1.30994   1.45566   1.62579
+
+
+Again we called :func:`conf_interval`, this time with tracing and only for
+1- and 2 :math:`\sigma`.  Comparing these two different estimates, we see
+that the estimate for `a1` is reasonably well approximated from the
+covariance matrix, but the estimates for `a2` and especially for `t1`, and
+`t2` are very asymmetric and that going from 1 :math:`\sigma` (68%
+confidence) to 2 :math:`\sigma` (95% confidence) is not very predictable.
+
+Let plots mad of the confidence region are shown the figure on the left
+below for ``a1`` and ``t2``, and for ``a2`` and ``t2`` on the right:
+
+.. _figC1:
+
+  .. image:: _images/conf_interval1.png
+     :target: _images/conf_interval1.png
+     :width: 48%
+  .. image:: _images/conf_interval1a.png
+     :target: _images/conf_interval1a.png
+     :width: 48%
+
+Neither of these plots is very much like an ellipse, which is implicitly
+assumed by the approach using the covariance matrix.
+
+The trace returned as the optional second argument from
+:func:`conf_interval` contains a dictionary for each variable parameter.
+The values are dictionaries with arrays of values for each variable, and an
+array of corresponding probabilities for the corresponding cumulative
+variables.  This can be used to show the dependence between two
+parameters::
+
+    >>> x, y, prob = trace['a1']['a1'], trace['a1']['t2'],trace['a1']['prob']
+    >>> x2, y2, prob2 = trace['t2']['t2'], trace['t2']['a1'],trace['t2']['prob']
+    >>> plt.scatter(x, y, c=prob ,s=30)
+    >>> plt.scatter(x2, y2, c=prob2, s=30)
+    >>> plt.gca().set_xlim((1, 5))
+    >>> plt.gca().set_ylim((5, 15))
+    >>> plt.xlabel('a1')
+    >>> plt.ylabel('t2')
+    >>> plt.show()
+
+
+which shows the trace of values:
+
+.. image:: _images/conf_interval2.png
+   :target: _images/conf_interval2.png
+   :width: 50%
+
+The :meth:`Minimizer.emcee` method uses Markov Chain Monte Carlo to sample
+the posterior probability distribution. These distributions demonstrate the
+range of solutions that the data supports. The following image was obtained
+by using :meth:`Minimizer.emcee` on the same problem.
+
+.. image:: _images/emcee_triangle.png
+
+Credible intervals (the Bayesian equivalent of the frequentist confidence
+interval) can be obtained with this method. MCMC can be used for model
+selection, to determine outliers, to marginalise over nuisance parameters, etc.
+For example, you may have fractionally underestimated the uncertainties on a
+dataset. MCMC can be used to estimate the true level of uncertainty on each
+datapoint. A tutorial on the possibilities offered by MCMC can be found at [1]_.
+
+.. [1] http://jakevdp.github.io/blog/2014/03/11/frequentism-and-bayesianism-a-practical-intro/
+
+
+
+Confidence Interval Functions
+----------------------------------
+
+.. autofunction:: lmfit.conf_interval
+
+.. autofunction:: lmfit.conf_interval2d
+
+.. autofunction:: lmfit.ci_report
diff --git a/doc/constraints.rst b/doc/constraints.rst
index 8d7fc6e..7d06b8d 100644
--- a/doc/constraints.rst
+++ b/doc/constraints.rst
@@ -1,166 +1,166 @@
-.. _constraints_chapter:
-
-=================================
-Using Mathematical Constraints
-=================================
-
-.. _asteval: http://newville.github.io/asteval/
-
-Being able to fix variables to a constant value or place upper and lower
-bounds on their values can greatly simplify modeling real data.  These 
-capabilities are key to lmfit's Parameters.  In addition, it is sometimes 
-highly desirable to place mathematical constraints on parameter values.  
-For example, one might want to require that two Gaussian peaks have the 
-same width, or have amplitudes that are constrained to add to some value.  
-Of course, one could rewrite the objective or model function to place such 
-requirements, but this is somewhat error prone, and limits the flexibility 
-so that exploring constraints becomes laborious.
-
-To simplify the setting of constraints, Parameters can be assigned a 
-mathematical expression of other Parameters, builtin constants, and builtin 
-mathematical functions that will be used to determine its value.  The 
-expressions used for constraints are evaluated using the `asteval`_ module, 
-which uses Python syntax, and evaluates the constraint expressions in a safe 
-and isolated  namespace.
-
-This approach to mathematical constraints allows one to not have to write a 
-separate model function for two Gaussians where the two ``sigma`` values are 
-forced to be equal, or where amplitudes are related.  Instead, one can write a
-more general two Gaussian model (perhaps using :class:`GaussianModel`) and 
-impose such constraints on the Parameters for a particular fit. 
-
-
-Overview
-===============
-
-Just as one can place bounds on a Parameter, or keep it fixed during the
-fit, so too can one place mathematical constraints on parameters.  The way
-this is done with lmfit is to write a Parameter as a mathematical
-expression of the other parameters and a set of pre-defined operators and
-functions.   The constraint expressions are simple Python statements,
-allowing one to place constraints like::
-
-    pars = Parameters()
-    pars.add('frac_curve1', value=0.5, min=0, max=1)
-    pars.add('frac_curve2', expr='1-frac_curve1')
-
-as the value of the `frac_curve1` parameter is updated at each step in the
-fit, the value of `frac_curve2` will be updated so that the two values are
-constrained to add to 1.0.  Of course, such a constraint could be placed in
-the fitting function, but the use of such constraints allows the end-user
-to modify the model of a more general-purpose fitting function.
-
-Nearly any valid mathematical expression can be used, and a variety of
-built-in functions are available for flexible modeling.
-
-Supported Operators, Functions, and Constants
-=================================================
-
-The mathematical expressions used to define constrained Parameters need to
-be valid python expressions.  As you'd expect, the operators '+', '-', '*',
-'/', '**', are supported.  In fact, a much more complete set can be used,
-including Python's bit- and logical operators::
-
-    +, -, *, /, **, &, |, ^, <<, >>, %, and, or,
-    ==, >, >=, <, <=, !=, ~, not, is, is not, in, not in
-
-
-The values for `e` (2.7182818...) and `pi` (3.1415926...) are available, as
-are  several supported mathematical and trigonometric function::
-
-  abs, acos, acosh, asin, asinh, atan, atan2, atanh, ceil,
-  copysign, cos, cosh, degrees, exp, fabs, factorial,
-  floor, fmod, frexp, fsum, hypot, isinf, isnan, ldexp,
-  log, log10, log1p, max, min, modf, pow, radians, sin,
-  sinh, sqrt, tan, tanh, trunc
-
-
-In addition, all Parameter names will be available in the mathematical
-expressions.  Thus, with parameters for a few peak-like functions::
-
-    pars = Parameters()
-    pars.add('amp_1', value=0.5, min=0, max=1)
-    pars.add('cen_1', value=2.2)
-    pars.add('wid_1', value=0.2)
-
-The following expression are all valid::
-
-    pars.add('amp_2', expr='(2.0 - amp_1**2)')
-    pars.add('cen_2', expr='cen_1 * wid_2 / max(wid_1, 0.001)')
-    pars.add('wid_2', expr='sqrt(pi)*wid_1')
-
-In fact, almost any valid Python expression is allowed.  A notable example
-is that Python's 1-line *if expression* is supported::
-
-    pars.add('bounded', expr='param_a if test_val/2. > 100 else param_b')
-
-which is equivalent to the more familiar::
-
-   if test_val/2. > 100:
-       bounded = param_a
-   else:
-       bounded = param_b
-
-Using Inequality Constraints
-==============================
-
-A rather common question about how to set up constraints
-that use an inequality, say, :math:`x + y \le 10`.  This
-can be done with algebraic constraints by recasting the
-problem, as :math:`x + y = \delta` and :math:`\delta \le
-10`.  That is, first, allow :math:`x` to be held by the
-freely varying parameter `x`.  Next, define a parameter
-`delta` to be variable with a maximum value of 10, and
-define parameter `y` as `delta - x`::
-
-    pars = Parameters()
-    pars.add('x',     value = 5, vary=True)
-    pars.add('delta', value = 5, max=10, vary=True)
-    pars.add('y',     expr='delta-x')
-
-The essential point is that an inequality still implies
-that a variable (here, `delta`) is needed to describe the
-constraint.  The secondary point is that upper and lower
-bounds can be used as part of the inequality to make the
-definitions more convenient.
-
-
-Advanced usage of Expressions in lmfit
-=============================================
-
-The expression used in a constraint is converted to a
-Python `Abstract Syntax Tree
-<http://docs.python.org/library/ast.html>`_, which is an
-intermediate version of the expression -- a syntax-checked,
-partially compiled expression.  Among other things, this
-means that Python's own parser is used to parse and convert
-the expression into something that can easily be evaluated
-within Python.  It also means that the symbols in the
-expressions can point to any Python object.
-
-In fact, the use of Python's AST allows a nearly full version of Python to
-be supported, without using Python's built-in :meth:`eval` function.  The
-`asteval`_ module actually supports most Python syntax, including for- and
-while-loops, conditional expressions, and user-defined functions.  There
-are several unsupported Python constructs, most notably the class
-statement, so that new classes cannot be created, and the import statement,
-which helps make the `asteval`_ module safe from malicious use.
-
-One important feature of the `asteval`_ module is that you can add
-domain-specific functions into the it, for later use in constraint
-expressions.  To do this, you would use the :attr:`asteval` attribute of
-the :class:`Minimizer` class, which contains a complete AST interpreter.
-The `asteval`_ interpreter uses a flat namespace, implemented as a single
-dictionary. That means you can preload any Python symbol into the namespace
-for the constraints::
-
-    def mylorentzian(x, amp, cen, wid):
-        "lorentzian function: wid = half-width at half-max"
-        return (amp  / (1 + ((x-cen)/wid)**2))
-
-    fitter = Minimizer()
-    fitter.asteval.symtable['lorentzian'] = mylorentzian
-
-and this :meth:`lorentzian` function can now be used in constraint
-expressions.
-
+.. _constraints_chapter:
+
+=================================
+Using Mathematical Constraints
+=================================
+
+.. _asteval: http://newville.github.io/asteval/
+
+Being able to fix variables to a constant value or place upper and lower
+bounds on their values can greatly simplify modeling real data.  These 
+capabilities are key to lmfit's Parameters.  In addition, it is sometimes 
+highly desirable to place mathematical constraints on parameter values.  
+For example, one might want to require that two Gaussian peaks have the 
+same width, or have amplitudes that are constrained to add to some value.  
+Of course, one could rewrite the objective or model function to place such 
+requirements, but this is somewhat error prone, and limits the flexibility 
+so that exploring constraints becomes laborious.
+
+To simplify the setting of constraints, Parameters can be assigned a 
+mathematical expression of other Parameters, builtin constants, and builtin 
+mathematical functions that will be used to determine its value.  The 
+expressions used for constraints are evaluated using the `asteval`_ module, 
+which uses Python syntax, and evaluates the constraint expressions in a safe 
+and isolated  namespace.
+
+This approach to mathematical constraints allows one to not have to write a 
+separate model function for two Gaussians where the two ``sigma`` values are 
+forced to be equal, or where amplitudes are related.  Instead, one can write a
+more general two Gaussian model (perhaps using :class:`GaussianModel`) and 
+impose such constraints on the Parameters for a particular fit. 
+
+
+Overview
+===============
+
+Just as one can place bounds on a Parameter, or keep it fixed during the
+fit, so too can one place mathematical constraints on parameters.  The way
+this is done with lmfit is to write a Parameter as a mathematical
+expression of the other parameters and a set of pre-defined operators and
+functions.   The constraint expressions are simple Python statements,
+allowing one to place constraints like::
+
+    pars = Parameters()
+    pars.add('frac_curve1', value=0.5, min=0, max=1)
+    pars.add('frac_curve2', expr='1-frac_curve1')
+
+as the value of the `frac_curve1` parameter is updated at each step in the
+fit, the value of `frac_curve2` will be updated so that the two values are
+constrained to add to 1.0.  Of course, such a constraint could be placed in
+the fitting function, but the use of such constraints allows the end-user
+to modify the model of a more general-purpose fitting function.
+
+Nearly any valid mathematical expression can be used, and a variety of
+built-in functions are available for flexible modeling.
+
+Supported Operators, Functions, and Constants
+=================================================
+
+The mathematical expressions used to define constrained Parameters need to
+be valid python expressions.  As you'd expect, the operators '+', '-', '*',
+'/', '**', are supported.  In fact, a much more complete set can be used,
+including Python's bit- and logical operators::
+
+    +, -, *, /, **, &, |, ^, <<, >>, %, and, or,
+    ==, >, >=, <, <=, !=, ~, not, is, is not, in, not in
+
+
+The values for `e` (2.7182818...) and `pi` (3.1415926...) are available, as
+are  several supported mathematical and trigonometric function::
+
+  abs, acos, acosh, asin, asinh, atan, atan2, atanh, ceil,
+  copysign, cos, cosh, degrees, exp, fabs, factorial,
+  floor, fmod, frexp, fsum, hypot, isinf, isnan, ldexp,
+  log, log10, log1p, max, min, modf, pow, radians, sin,
+  sinh, sqrt, tan, tanh, trunc
+
+
+In addition, all Parameter names will be available in the mathematical
+expressions.  Thus, with parameters for a few peak-like functions::
+
+    pars = Parameters()
+    pars.add('amp_1', value=0.5, min=0, max=1)
+    pars.add('cen_1', value=2.2)
+    pars.add('wid_1', value=0.2)
+
+The following expression are all valid::
+
+    pars.add('amp_2', expr='(2.0 - amp_1**2)')
+    pars.add('cen_2', expr='cen_1 * wid_2 / max(wid_1, 0.001)')
+    pars.add('wid_2', expr='sqrt(pi)*wid_1')
+
+In fact, almost any valid Python expression is allowed.  A notable example
+is that Python's 1-line *if expression* is supported::
+
+    pars.add('bounded', expr='param_a if test_val/2. > 100 else param_b')
+
+which is equivalent to the more familiar::
+
+   if test_val/2. > 100:
+       bounded = param_a
+   else:
+       bounded = param_b
+
+Using Inequality Constraints
+==============================
+
+A rather common question about how to set up constraints
+that use an inequality, say, :math:`x + y \le 10`.  This
+can be done with algebraic constraints by recasting the
+problem, as :math:`x + y = \delta` and :math:`\delta \le
+10`.  That is, first, allow :math:`x` to be held by the
+freely varying parameter `x`.  Next, define a parameter
+`delta` to be variable with a maximum value of 10, and
+define parameter `y` as `delta - x`::
+
+    pars = Parameters()
+    pars.add('x',     value = 5, vary=True)
+    pars.add('delta', value = 5, max=10, vary=True)
+    pars.add('y',     expr='delta-x')
+
+The essential point is that an inequality still implies
+that a variable (here, `delta`) is needed to describe the
+constraint.  The secondary point is that upper and lower
+bounds can be used as part of the inequality to make the
+definitions more convenient.
+
+
+Advanced usage of Expressions in lmfit
+=============================================
+
+The expression used in a constraint is converted to a
+Python `Abstract Syntax Tree
+<http://docs.python.org/library/ast.html>`_, which is an
+intermediate version of the expression -- a syntax-checked,
+partially compiled expression.  Among other things, this
+means that Python's own parser is used to parse and convert
+the expression into something that can easily be evaluated
+within Python.  It also means that the symbols in the
+expressions can point to any Python object.
+
+In fact, the use of Python's AST allows a nearly full version of Python to
+be supported, without using Python's built-in :meth:`eval` function.  The
+`asteval`_ module actually supports most Python syntax, including for- and
+while-loops, conditional expressions, and user-defined functions.  There
+are several unsupported Python constructs, most notably the class
+statement, so that new classes cannot be created, and the import statement,
+which helps make the `asteval`_ module safe from malicious use.
+
+One important feature of the `asteval`_ module is that you can add
+domain-specific functions into the it, for later use in constraint
+expressions.  To do this, you would use the :attr:`asteval` attribute of
+the :class:`Minimizer` class, which contains a complete AST interpreter.
+The `asteval`_ interpreter uses a flat namespace, implemented as a single
+dictionary. That means you can preload any Python symbol into the namespace
+for the constraints::
+
+    def mylorentzian(x, amp, cen, wid):
+        "lorentzian function: wid = half-width at half-max"
+        return (amp  / (1 + ((x-cen)/wid)**2))
+
+    fitter = Minimizer()
+    fitter.asteval.symtable['lorentzian'] = mylorentzian
+
+and this :meth:`lorentzian` function can now be used in constraint
+expressions.
+
diff --git a/doc/contents.rst b/doc/contents.rst
index 8a1e5c2..46c61b2 100644
--- a/doc/contents.rst
+++ b/doc/contents.rst
@@ -1,18 +1,18 @@
-Contents
-=================
-
-.. toctree::
-   :maxdepth: 3
-
-   intro
-   installation
-   whatsnew
-   support
-   faq
-   parameters
-   fitting
-   model
-   builtin_models
-   confidence
-   bounds
-   constraints
+Contents
+=================
+
+.. toctree::
+   :maxdepth: 3
+
+   intro
+   installation
+   whatsnew
+   support
+   faq
+   parameters
+   fitting
+   model
+   builtin_models
+   confidence
+   bounds
+   constraints
diff --git a/doc/extensions.py b/doc/extensions.py
index 3e54c82..40de659 100644
--- a/doc/extensions.py
+++ b/doc/extensions.py
@@ -1,10 +1,10 @@
-# sphinx extensions for mathjax
-extensions = ['sphinx.ext.autodoc',
-              'sphinx.ext.todo',
-              'sphinx.ext.coverage',
-              'sphinx.ext.intersphinx',
-              'numpydoc']
-mathjax = 'sphinx.ext.mathjax'
-pngmath = 'sphinx.ext.pngmath'
-
-extensions.append(mathjax)
+# sphinx extensions for mathjax
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.todo',
+              'sphinx.ext.coverage',
+              'sphinx.ext.intersphinx',
+              'numpydoc']
+mathjax = 'sphinx.ext.mathjax'
+pngmath = 'sphinx.ext.pngmath'
+
+extensions.append(mathjax)
diff --git a/doc/extensions.pyc b/doc/extensions.pyc
index 4dfb438..2ea9e41 100644
Binary files a/doc/extensions.pyc and b/doc/extensions.pyc differ
diff --git a/doc/faq.rst b/doc/faq.rst
index f54dc88..1bf26f9 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -1,100 +1,95 @@
-.. _faq_chapter:
-
-====================================
-Frequently Asked Questions
-====================================
-
-A list of common questions.
-
-What's the best way to ask for help or submit a bug report?
-================================================================
-
-See :ref:`support_chapter`.
-
-
-Why did my script break when upgrading from lmfit 0.8.3 to 0.9.0?
-====================================================================
-
-See :ref:`whatsnew_090_label`
-
-
-I get import errors from IPython
-==============================================================
-
-If you see something like::
-
-        from IPython.html.widgets import Dropdown
-
-    ImportError: No module named 'widgets'
-
-then you need to install the ipywidgets package.   Try 'pip install ipywidgets'.
-
-
-
-
-How can I fit multi-dimensional data?
-========================================
-
-The fitting routines accept data arrays that are 1 dimensional and double
-precision.  So you need to convert the data and model (or the value
-returned by the objective function) to be one dimensional.  A simple way to
-do this is to use numpy's :meth:`numpy.ndarray.flatten`, for example::
-
-    def residual(params, x, data=None):
-        ....
-        resid = calculate_multidim_residual()
-        return resid.flatten()
-
-How can I fit multiple data sets?
-========================================
-
-As above, the fitting routines accept data arrays that are 1 dimensional and double
-precision.  So you need to convert the sets of data and models (or the value
-returned by the objective function) to be one dimensional.  A simple way to
-do this is to use numpy's :meth:`numpy.concatenate`.  As an example, here
-is a residual function to simultaneously fit two lines to two different
-arrays.  As a bonus, the two lines share the 'offset' parameter:
-
-    def fit_function(params, x=None, dat1=None, dat2=None):
-        model1 = params['offset'].value + x * params['slope1'].value
-        model2 = params['offset'].value + x * params['slope2'].value
-
-	resid1 = dat1 - model1
-        resid2 = dat2 - model2
-        return numpy.concatenate((resid1, resid2))
-
-
-
-How can I fit complex data?
-===================================
-
-As with working with multidimensional data, you need to convert your data
-and model (or the value returned by the objective function) to be double precision
-floating point numbers. One way to do this would be to use a function like this::
-
-    def realimag(array):
-        return np.array([(x.real, x.imag) for x in array]).flatten()
-
-to convert the complex array into an array of alternating real and
-imaginary values.  You can then use this function on the result returned by
-your objective function::
-
-    def residual(params, x, data=None):
-        ....
-        resid = calculate_complex_residual()
-        return realimag(resid)
-
-
-Can I constrain values to have integer values?
-===============================================
-
-Basically, no.  None of the minimizers in lmfit support integer
-programming.  They all (I think) assume that they can make a very small
-change to a floating point value for a parameters value and see a change in
-the value to be minimized.
-
-
-How should I cite LMFIT?
-==================================
-
-See http://dx.doi.org/10.5281/zenodo.11813
+.. _faq_chapter:
+
+====================================
+Frequently Asked Questions
+====================================
+
+A list of common questions.
+
+What's the best way to ask for help or submit a bug report?
+================================================================
+
+See :ref:`support_chapter`.
+
+
+Why did my script break when upgrading from lmfit 0.8.3 to 0.9.0?
+====================================================================
+
+See :ref:`whatsnew_090_label`
+
+
+I get import errors from IPython
+==============================================================
+
+If you see something like::
+
+        from IPython.html.widgets import Dropdown
+
+    ImportError: No module named 'widgets'
+
+then you need to install the ipywidgets package.   Try 'pip install ipywidgets'.
+
+
+
+
+How can I fit multi-dimensional data?
+========================================
+
+The fitting routines accept data arrays that are 1 dimensional and double
+precision.  So you need to convert the data and model (or the value
+returned by the objective function) to be one dimensional.  A simple way to
+do this is to use numpy's :numpydoc:`ndarray.flatten`, for example::
+
+    def residual(params, x, data=None):
+        ....
+        resid = calculate_multidim_residual()
+        return resid.flatten()
+
+How can I fit multiple data sets?
+========================================
+
+As above, the fitting routines accept data arrays that are 1 dimensional
+and double precision.  So you need to convert the sets of data and models
+(or the value returned by the objective function) to be one dimensional.  A
+simple way to do this is to use numpy's :numpydoc:`concatenate`.  As an
+example, here is a residual function to simultaneously fit two lines to two
+different arrays.  As a bonus, the two lines share the 'offset' parameter::
+
+    def fit_function(params, x=None, dat1=None, dat2=None):
+        model1 = params['offset'].value + x * params['slope1'].value
+        model2 = params['offset'].value + x * params['slope2'].value
+
+	resid1 = dat1 - model1
+        resid2 = dat2 - model2
+        return numpy.concatenate((resid1, resid2))
+
+
+
+How can I fit complex data?
+===================================
+
+As with working with multidimensional data, you need to convert your data
+and model (or the value returned by the objective function) to be double
+precision floating point numbers. The simplest approach is to use numpy's
+:numpydoc:`ndarray.view` method, perhaps like::
+
+   import numpy as np
+   def residual(params, x, data=None):
+        ....
+        resid = calculate_complex_residual()
+        return resid.view(np.float)
+
+
+Can I constrain values to have integer values?
+===============================================
+
+Basically, no.  None of the minimizers in lmfit support integer
+programming.  They all (I think) assume that they can make a very small
+change to a floating point value for a parameters value and see a change in
+the value to be minimized.
+
+
+How should I cite LMFIT?
+==================================
+
+See http://dx.doi.org/10.5281/zenodo.11813
diff --git a/doc/fitting.rst b/doc/fitting.rst
index f07ce77..cde5d89 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -1,619 +1,900 @@
-.. _minimize_chapter:
-
-=======================================
-Performing Fits, Analyzing Outputs
-=======================================
-
-As shown in the previous chapter, a simple fit can be performed with the
-:func:`minimize` function.  For more sophisticated modeling, the
-:class:`Minimizer` class can be used to gain a bit more control, especially
-when using complicated constraints or comparing results from related fits.
-
-
-.. warning::
-
-  Upgrading scripts from version 0.8.3 to 0.9.0?  See  :ref:`whatsnew_090_label`
-
-
-The :func:`minimize` function
-===============================
-
-The :func:`minimize` function is a wrapper around :class:`Minimizer` for
-running an optimization problem.  It takes an objective function (the
-function that calculates the array to be minimized), a :class:`Parameters`
-object, and several optional arguments.  See :ref:`fit-func-label` for
-details on writing the objective.
-
-.. function:: minimize(function, params[, args=None[, kws=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **fit_kws]]]]]])
-
-   find values for the ``params`` so that the sum-of-squares of the array returned
-   from ``function`` is minimized.
-
-   :param function:  function to return fit residual.  See :ref:`fit-func-label` for details.
-   :type  function:  callable.
-   :param params:  a :class:`Parameters` dictionary.  Keywords must be strings
-                   that match ``[a-z_][a-z0-9_]*`` and cannot be a python
-                   reserved word.  Each value must be :class:`Parameter`.
-   :type  params:  :class:`Parameters`.
-   :param args:  arguments tuple to pass to the residual function as  positional arguments.
-   :type  args:  tuple
-   :param kws:   dictionary to pass to the residual function as keyword arguments.
-   :type  kws:  dict
-   :param method:  name of fitting method to use. See  :ref:`fit-methods-label` for details
-   :type  method:  string (default ``leastsq``)
-   :param scale_covar:  whether to automatically scale covariance matrix (``leastsq`` only)
-   :type  scale_covar:  bool (default ``True``)
-   :param iter_cb:  function to be called at each fit iteration. See :ref:`fit-itercb-label` for details.
-   :type  iter_cb:  callable or ``None``
-   :param fit_kws:  dictionary to pass to :func:`scipy.optimize.leastsq` or :func:`scipy.optimize.minimize`.
-   :type  fit_kws:  dict
-
-   :return: :class:`MinimizerResult` instance, which will contain the
-            optimized parameter, and several goodness-of-fit statistics.
-
-.. versionchanged:: 0.9.0
-   return value changed to :class:`MinimizerResult`
-
-
-   On output, the params will be unchanged.  The best-fit values, and where
-   appropriate, estimated uncertainties and correlations, will all be
-   contained in the returned :class:`MinimizerResult`.  See
-   :ref:`fit-results-label` for further details.
-
-   For clarity, it should be emphasized that this function is simply a
-   wrapper around :class:`Minimizer` that runs a single fit, implemented as::
-
-    fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
-                       iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
-    return fitter.minimize(method=method)
-
-
-..  _fit-func-label:
-
-
-Writing a Fitting Function
-===============================
-
-An important component of a fit is writing a function to be minimized --
-the *objective function*.  Since this function will be called by other
-routines, there are fairly stringent requirements for its call signature
-and return value.  In principle, your function can be any python callable,
-but it must look like this:
-
-.. function:: func(params, *args, **kws):
-
-   calculate objective residual to be minimized from parameters.
-
-   :param params: parameters.
-   :type  params: :class:`Parameters`.
-   :param args:  positional arguments.  Must match ``args`` argument to :func:`minimize`
-   :param kws:   keyword arguments.  Must match ``kws`` argument to :func:`minimize`
-   :return: residual array (generally data-model) to be minimized in the least-squares sense.
-   :rtype: numpy array.  The length of this array cannot change between calls.
-
-
-A common use for the positional and keyword arguments would be to pass in other
-data needed to calculate the residual, including such things as the data array,
-dependent variable, uncertainties in the data, and other data structures for the
-model calculation.
-
-The objective function should return the value to be minimized.  For the
-Levenberg-Marquardt algorithm from :meth:`leastsq`, this returned value **must** be an
-array, with a length greater than or equal to the number of fitting variables in the
-model.  For the other methods, the return value can either be a scalar or an array.  If an
-array is returned, the sum of squares of the array will be sent to the underlying fitting
-method, effectively doing a least-squares optimization of the return values.
-
-
-Since the function will be passed in a dictionary of :class:`Parameters`, it is advisable
-to unpack these to get numerical values at the top of the function.  A
-simple way to do this is with :meth:`Parameters.valuesdict`, as with::
-
-
-    def residual(pars, x, data=None, eps=None):
-        # unpack parameters:
-        #  extract .value attribute for each parameter
-	parvals = pars.valuesdict()
-        period = parvals['period']
-        shift = parvals['shift']
-        decay = parvals['decay']
-
-        if abs(shift) > pi/2:
-            shift = shift - sign(shift)*pi
-
-        if abs(period) < 1.e-10:
-            period = sign(period)*1.e-10
-
-        model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay)
-
-        if data is None:
-            return model
-	if eps is None:
-            return (model - data)
-        return (model - data)/eps
-
-In this example, ``x`` is a positional (required) argument, while the
-``data`` array is actually optional (so that the function returns the model
-calculation if the data is neglected).  Also note that the model
-calculation will divide ``x`` by the value of the 'period' Parameter.  It
-might be wise to ensure this parameter cannot be 0.  It would be possible
-to use the bounds on the :class:`Parameter` to do this::
-
-    params['period'] = Parameter(value=2, min=1.e-10)
-
-but putting this directly in the function with::
-
-        if abs(period) < 1.e-10:
-            period = sign(period)*1.e-10
-
-is also a reasonable approach.   Similarly, one could place bounds on the
-``decay`` parameter to take values only between ``-pi/2`` and ``pi/2``.
-
-..  _fit-methods-label:
-
-Choosing Different Fitting Methods
-===========================================
-
-By default, the `Levenberg-Marquardt
-<http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm>`_ algorithm is
-used for fitting.  While often criticized, including the fact it finds a
-*local* minima, this approach has some distinct advantages.  These include
-being fast, and well-behaved for most curve-fitting needs, and making it
-easy to estimate uncertainties for and correlations between pairs of fit
-variables, as discussed in :ref:`fit-results-label`.
-
-Alternative algorithms can also be used by providing the ``method``
-keyword to the :func:`minimize` function or :meth:`Minimizer.minimize`
-class as listed in the :ref:`Table of Supported Fitting Methods
-<fit-methods-table>`.
-
-.. _fit-methods-table:
-
- Table of Supported Fitting Method, eithers:
-
- +-----------------------+------------------------------------------------------------------+
- | Fitting Method        | ``method`` arg to :func:`minimize` or :meth:`Minimizer.minimize` |
- +=======================+==================================================================+
- | Levenberg-Marquardt   |  ``leastsq``                                                     |
- +-----------------------+------------------------------------------------------------------+
- | Nelder-Mead           |  ``nelder``                                                      |
- +-----------------------+------------------------------------------------------------------+
- | L-BFGS-B              |  ``lbfgsb``                                                      |
- +-----------------------+------------------------------------------------------------------+
- | Powell                |  ``powell``                                                      |
- +-----------------------+------------------------------------------------------------------+
- | Conjugate Gradient    |  ``cg``                                                          |
- +-----------------------+------------------------------------------------------------------+
- | Newton-CG             |  ``newton``                                                      |
- +-----------------------+------------------------------------------------------------------+
- | COBYLA                |  ``cobyla``                                                      |
- +-----------------------+------------------------------------------------------------------+
- | Truncated Newton      |  ``tnc``                                                         |
- +-----------------------+------------------------------------------------------------------+
- | Dogleg                |  ``dogleg``                                                      |
- +-----------------------+------------------------------------------------------------------+
- | Sequential Linear     |  ``slsqp``                                                       |
- | Squares Programming   |                                                                  |
- +-----------------------+------------------------------------------------------------------+
- | Differential          |  ``differential_evolution``                                      |
- | Evolution             |                                                                  |
- +-----------------------+------------------------------------------------------------------+
-
-
-.. note::
-
-   The objective function for the Levenberg-Marquardt method **must**
-   return an array, with more elements than variables.  All other methods
-   can return either a scalar value or an array.
-
-
-.. warning::
-
-  Much of this documentation assumes that the Levenberg-Marquardt method is
-  the method used.  Many of the fit statistics and estimates for
-  uncertainties in parameters discussed in :ref:`fit-results-label` are
-  done only for this method.
-
-
-..  _fit-results-label:
-
-:class:`MinimizerResult` -- the optimization result
-========================================================
-
-
-
-.. class:: MinimizerResult(**kws)
-
-.. versionadded:: 0.9.0
-
-An optimization with :func:`minimize` or :meth:`Minimizer.minimize`
-will return a :class:`MinimizerResult` object.  This is an otherwise
-plain container object (that is, with no methods of its own) that
-simply holds the results of the minimization.  These results will
-include several pieces of informational data such as status and error
-messages, fit statistics, and the updated parameters themselves.
-
-Importantly, the parameters passed in to :meth:`Minimizer.minimize`
-will be not be changed.  To to find the best-fit values, uncertainties
-and so on for each parameter, one must use the
-:attr:`MinimizerResult.params` attribute.
-
-.. attribute::   params
-
-  the :class:`Parameters` actually used in the fit, with updated
-  values, :attr:`stderr` and :attr:`correl`.
-
-.. attribute::  var_names
-
-  ordered list of variable parameter names used in optimization, and
-  useful for understanding the the values in :attr:`init_vals` and
-  :attr:`covar`.
-
-.. attribute:: covar
-
-  covariance matrix from minimization (`leastsq` only), with
-  rows/columns using :attr:`var_names`.
-
-.. attribute:: init_vals
-
-  list of initial values for variable parameters using :attr:`var_names`.
-
-.. attribute::  nfev
-
-  number of function evaluations
-
-.. attribute::  success
-
-  boolean (``True``/``False``) for whether fit succeeded.
-
-.. attribute::  errorbars
-
-  boolean (``True``/``False``) for whether uncertainties were
-  estimated.
-
-.. attribute::  message
-
-  message about fit success.
-
-.. attribute::  ier
-
-  integer error value from :func:`scipy.optimize.leastsq`  (`leastsq`
-  only).
-
-.. attribute::  lmdif_message
-
-  message from :func:`scipy.optimize.leastsq` (`leastsq` only).
-
-
-.. attribute::  nvarys
-
-  number of variables in fit  :math:`N_{\rm varys}`
-
-.. attribute::  ndata
-
-  number of data points:  :math:`N`
-
-.. attribute::  nfree `
-
-  degrees of freedom in fit:  :math:`N - N_{\rm varys}`
-
-.. attribute::  residual
-
-  residual array, return value of :func:`func`:  :math:`{\rm Resid}`
-
-.. attribute::  chisqr
-
-  chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`
-
-.. attribute::  redchi
-
-  reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm
-  varys})}`
-
-.. attribute::  aic
-
-  Akaike Information Criterion statistic (see below)
-
-.. attribute::  bic
-
-  Bayesian Information Criterion statistic (see below).
-
-
-
-
-
-Goodness-of-Fit Statistics
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. _goodfit-table:
-
- Table of Fit Results:  These values, including the standard Goodness-of-Fit statistics,
- are all attributes of the :class:`MinimizerResult` object returned by
- :func:`minimize` or :meth:`Minimizer.minimize`.
-
-+----------------------+----------------------------------------------------------------------------+
-| Attribute Name       | Description / Formula                                                      |
-+======================+============================================================================+
-|    nfev              | number of function evaluations                                             |
-+----------------------+----------------------------------------------------------------------------+
-|    nvarys            | number of variables in fit  :math:`N_{\rm varys}`                          |
-+----------------------+----------------------------------------------------------------------------+
-|    ndata             | number of data points:  :math:`N`                                          |
-+----------------------+----------------------------------------------------------------------------+
-|    nfree `           | degrees of freedom in fit:  :math:`N - N_{\rm varys}`                      |
-+----------------------+----------------------------------------------------------------------------+
-|    residual          | residual array, return value of :func:`func`:  :math:`{\rm Resid}`         |
-+----------------------+----------------------------------------------------------------------------+
-|    chisqr            | chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`                    |
-+----------------------+----------------------------------------------------------------------------+
-|    redchi            | reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm varys})}` |
-+----------------------+----------------------------------------------------------------------------+
-|    aic               | Akaike Information Criterion statistic (see below)                         |
-+----------------------+----------------------------------------------------------------------------+
-|    bic               | Bayesian Information Criterion statistic (see below)                       |
-+----------------------+----------------------------------------------------------------------------+
-|    var_names         | ordered list of variable parameter names used for init_vals and covar      |
-+----------------------+----------------------------------------------------------------------------+
-|    covar             | covariance matrix (with rows/columns using var_names                       |
-+----------------------+----------------------------------------------------------------------------+
-|    init_vals         | list of initial values for variable parameters                             |
-+----------------------+----------------------------------------------------------------------------+
-
-Note that the calculation of chi-square and reduced chi-square assume
-that the returned residual function is scaled properly to the
-uncertainties in the data.  For these statistics to be meaningful, the
-person writing the function to be minimized must scale them properly.
-
-After a fit using using the :meth:`leastsq` method has completed
-successfully, standard errors for the fitted variables and correlations
-between pairs of fitted variables are automatically calculated from the
-covariance matrix.  The standard error (estimated :math:`1\sigma`
-error-bar) go into the :attr:`stderr` attribute of the Parameter.  The
-correlations with all other variables will be put into the
-:attr:`correl` attribute of the Parameter -- a dictionary with keys for
-all other Parameters and values of the corresponding correlation.
-
-In some cases, it may not be possible to estimate the errors and
-correlations.  For example, if a variable actually has no practical effect
-on the fit, it will likely cause the covariance matrix to be singular,
-making standard errors impossible to estimate.  Placing bounds on varied
-Parameters makes it more likely that errors cannot be estimated, as being
-near the maximum or minimum value makes the covariance matrix singular.  In
-these cases, the :attr:`errorbars` attribute of the fit result
-(:class:`Minimizer` object) will be ``False``.
-
-Akaike and Bayesian Information Criteria
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The :class:`MinimizerResult` includes the tradtional chi-square and reduced chi-square statistics:
-
-.. math::
-   :nowrap:
-
-   \begin{eqnarray*}
-        \chi^2  &=&  \sum_i^N r_i^2 \\
-	\chi^2_\nu &=& = \chi^2 / (N-N_{\rm varys})
-    \end{eqnarray*}
-
-where :math:`r` is the residual array returned by the objective function
-(likely to be ``(data-model)/uncertainty`` for data modeling usages),
-:math:`N` is the number of data points (``ndata``), and :math:`N_{\rm
-varys}` is number of variable parameters.
-
-Also included are the `Akaike Information Criterion
-<http://en.wikipedia.org/wiki/Akaike_information_criterion>`_, and
-`Bayesian Information Criterion
-<http://en.wikipedia.org/wiki/Bayesian_information_criterion>`_ statistics,
-held in the ``aic`` and ``bic`` attributes, respectively.  These give slightly
-different measures of the relative quality for a fit, trying to balance
-quality of fit with the number of variable parameters used in the fit.
-These are calculated as
-
-.. math::
-   :nowrap:
-
-   \begin{eqnarray*}
-     {\rm aic} &=&  N \ln(\chi^2/N) + 2 N_{\rm varys} \\
-     {\rm bic} &=&  N \ln(\chi^2/N) + \ln(N) *N_{\rm varys} \\
-    \end{eqnarray*}
-
-
-When comparing fits with different numbers of varying parameters, one
-typically selects the model with lowest reduced chi-square, Akaike
-information criterion, and/or Bayesian information criterion.  Generally,
-the Bayesian information criterion is considered the most conservative of
-these statistics.
-
-..  _fit-itercb-label:
-
-
-Using a Iteration Callback Function
-====================================
-
-An iteration callback function is a function to be called at each
-iteration, just after the objective function is called.  The iteration
-callback allows user-supplied code to be run at each iteration, and can be
-used to abort a fit.
-
-.. function:: iter_cb(params, iter, resid, *args, **kws):
-
-   user-supplied function to be run at each iteration
-
-   :param params: parameters.
-   :type  params: :class:`Parameters`.
-   :param iter:   iteration number
-   :type  iter:   integer
-   :param resid:  residual array.
-   :type  resid:  ndarray
-   :param args:  positional arguments.  Must match ``args`` argument to :func:`minimize`
-   :param kws:   keyword arguments.  Must match ``kws`` argument to :func:`minimize`
-   :return:      residual array (generally data-model) to be minimized in the least-squares sense.
-   :rtype:    ``None`` for normal behavior, any value like ``True`` to abort fit.
-
-
-Normally, the iteration callback would have no return value or return
-``None``.  To abort a fit, have this function return a value that is
-``True`` (including any non-zero integer).  The fit will also abort if any
-exception is raised in the iteration callback. When a fit is aborted this
-way, the parameters will have the values from the last iteration.  The fit
-statistics are not likely to be meaningful, and uncertainties will not be computed.
-
-
-.. module:: Minimizer
-
-..  _fit-minimizer-label:
-
-Using the :class:`Minimizer` class
-=======================================
-
-For full control of the fitting process, you'll want to create a
-:class:`Minimizer` object.
-
-.. class:: Minimizer(function, params, fcn_args=None, fcn_kws=None, iter_cb=None, scale_covar=True, **kws)
-
-   creates a Minimizer, for more detailed access to fitting methods and attributes.
-
-   :param function:  objective function to return fit residual.  See :ref:`fit-func-label` for details.
-   :type  function:  callable.
-   :param params:  a dictionary of Parameters.  Keywords must be strings
-                   that match ``[a-z_][a-z0-9_]*`` and is not a python
-                   reserved word.  Each value must be :class:`Parameter`.
-   :type  params:  dict
-   :param fcn_args:  arguments tuple to pass to the residual function as  positional arguments.
-   :type  fcn_args: tuple
-   :param fcn_kws:  dictionary to pass to the residual function as keyword arguments.
-   :type  fcn_kws:  dict
-   :param iter_cb:  function to be called at each fit iteration.  See :ref:`fit-itercb-label` for details.
-   :type  iter_cb:  callable or ``None``
-   :param scale_covar:  flag for automatically scaling covariance matrix and uncertainties to reduced chi-square (``leastsq`` only)
-   :type  scale_cover:  bool (default ``True``).
-   :param kws:      dictionary to pass as keywords to the underlying :mod:`scipy.optimize` method.
-   :type  kws:      dict
-
-The Minimizer object has a few public methods:
-
-.. method:: minimize(method='leastsq', params=None, **kws)
-
-   perform fit using either :meth:`leastsq` or :meth:`scalar_minimize`.
-
-   :param method: name of fitting method.  Must be one of the naemes in
-                  :ref:`Table of Supported Fitting Methods <fit-methods-table>`
-   :type  method:  str.
-   :param params:  a :class:`Parameters` dictionary for starting values
-   :type  params:  :class:`Parameters` or `None`
-
-   :return: :class:`MinimizerResult` object, containing updated
-            parameters, fitting statistics, and information.
-
-.. versionchanged:: 0.9.0
-   return value changed to :class:`MinimizerResult`
-
-   Additonal keywords are passed on to the correspond :meth:`leastsq`
-   or :meth:`scalar_minimize` method.
-
-
-.. method:: leastsq(params=None, scale_covar=True, **kws)
-
-   perform fit with Levenberg-Marquardt algorithm.  Keywords will be
-   passed directly to :func:`scipy.optimize.leastsq`.  By default,
-   numerical derivatives are used, and the following arguments are set:
-
-
-    +------------------+----------------+------------------------------------------------------------+
-    | :meth:`leastsq`  |  Default Value | Description                                                |
-    | arg              |                |                                                            |
-    +==================+================+============================================================+
-    |   xtol           |  1.e-7         | Relative error in the approximate solution                 |
-    +------------------+----------------+------------------------------------------------------------+
-    |   ftol           |  1.e-7         | Relative error in the desired sum of squares               |
-    +------------------+----------------+------------------------------------------------------------+
-    |   maxfev         | 2000*(nvar+1)  | maximum number of function calls (nvar= # of variables)    |
-    +------------------+----------------+------------------------------------------------------------+
-    |   Dfun           | ``None``       | function to call for Jacobian calculation                  |
-    +------------------+----------------+------------------------------------------------------------+
-
-
-.. versionchanged:: 0.9.0
-   return value changed to :class:`MinimizerResult`
-
-.. method:: scalar_minimize(method='Nelder-Mead', params=None, hess=None, tol=None, **kws)
-
-   perform fit with any of the scalar minimization algorithms supported by
-   :func:`scipy.optimize.minimize`.
-
-    +-------------------------+-----------------+-----------------------------------------------------+
-    | :meth:`scalar_minimize` | Default Value   | Description                                         |
-    | arg                     |                 |                                                     |
-    +=========================+=================+=====================================================+
-    |   method                | ``Nelder-Mead`` | fitting method                                      |
-    +-------------------------+-----------------+-----------------------------------------------------+
-    |   tol                   | 1.e-7           | fitting and parameter tolerance                     |
-    +-------------------------+-----------------+-----------------------------------------------------+
-    |   hess                  | None            | Hessian of objective function                       |
-    +-------------------------+-----------------+-----------------------------------------------------+
-
-.. versionchanged:: 0.9.0
-   return value changed to :class:`MinimizerResult`
-
-.. method:: prepare_fit(**kws)
-
-   prepares and initializes model and Parameters for subsequent
-   fitting. This routine prepares the conversion of :class:`Parameters`
-   into fit variables, organizes parameter bounds, and parses, "compiles"
-   and checks constrain expressions.   The method also creates and returns
-   a new instance of a :class:`MinimizerResult` object that contains the
-   copy of the Parameters that will actually be varied in the fit.
-
-   This method is called directly by the fitting methods, and it is
-   generally not necessary to call this function explicitly.
-
-.. versionchanged:: 0.9.0
-   return value changed to :class:`MinimizerResult`
-
-
-
-Getting and Printing Fit Reports
-===========================================
-
-.. function:: fit_report(result, modelpars=None, show_correl=True, min_correl=0.1)
-
-   generate and return text of report of best-fit values, uncertainties,
-   and correlations from fit.
-
-   :param result:       :class:`MinimizerResult` object as returned by :func:`minimize`.
-   :param modelpars:    Parameters with "Known Values" (optional, default None)
-   :param show_correl:  whether to show list of sorted correlations [``True``]
-   :param min_correl:   smallest correlation absolute value to show [0.1]
-
-   If the first argument is a :class:`Parameters` object,
-   goodness-of-fit statistics will not be included.
-
-.. function:: report_fit(result, modelpars=None, show_correl=True, min_correl=0.1)
-
-   print text of report from :func:`fit_report`.
-
-
-An example fit with report would be
-
-.. literalinclude:: ../examples/doc_withreport.py
-
-which would write out::
-
-    [[Fit Statistics]]
-        # function evals   = 85
-        # data points      = 1001
-        # variables        = 4
-        chi-square         = 498.812
-        reduced chi-square = 0.500
-    [[Variables]]
-        amp:      13.9121944 +/- 0.141202 (1.01%) (init= 13)
-        period:   5.48507044 +/- 0.026664 (0.49%) (init= 2)
-        shift:    0.16203677 +/- 0.014056 (8.67%) (init= 0)
-        decay:    0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
-    [[Correlations]] (unreported correlations are <  0.100)
-        C(period, shift)             =  0.797
-        C(amp, decay)                =  0.582
-        C(amp, shift)                = -0.297
-        C(amp, period)               = -0.243
-        C(shift, decay)              = -0.182
-        C(period, decay)             = -0.150
+.. _minimize_chapter:
+
+=======================================
+Performing Fits, Analyzing Outputs
+=======================================
+
+As shown in the previous chapter, a simple fit can be performed with the
+:func:`minimize` function.  For more sophisticated modeling, the
+:class:`Minimizer` class can be used to gain a bit more control, especially
+when using complicated constraints or comparing results from related fits.
+
+.. warning::
+
+  Upgrading scripts from version 0.8.3 to 0.9.0?  See  :ref:`whatsnew_090_label`
+
+
+The :func:`minimize` function
+===============================
+
+The :func:`minimize` function is a wrapper around :class:`Minimizer` for
+running an optimization problem.  It takes an objective function (the
+function that calculates the array to be minimized), a :class:`Parameters`
+object, and several optional arguments.  See :ref:`fit-func-label` for
+details on writing the objective.
+
+.. function:: minimize(function, params[, args=None[, kws=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **fit_kws]]]]]])
+
+   find values for the ``params`` so that the sum-of-squares of the array returned
+   from ``function`` is minimized.
+
+   :param function:  function to return fit residual.  See :ref:`fit-func-label` for details.
+   :type  function:  callable.
+   :param params:  a :class:`Parameters` dictionary.  Keywords must be strings
+                   that match ``[a-z_][a-z0-9_]*`` and cannot be a python
+                   reserved word.  Each value must be :class:`Parameter`.
+   :type  params:  :class:`Parameters`.
+   :param args:  arguments tuple to pass to the residual function as  positional arguments.
+   :type  args:  tuple
+   :param kws:   dictionary to pass to the residual function as keyword arguments.
+   :type  kws:  dict
+   :param method:  name of fitting method to use. See  :ref:`fit-methods-label` for details
+   :type  method:  string (default ``leastsq``)
+   :param scale_covar:  whether to automatically scale covariance matrix (``leastsq`` only)
+   :type  scale_covar:  bool (default ``True``)
+   :param iter_cb:  function to be called at each fit iteration. See :ref:`fit-itercb-label` for details.
+   :type  iter_cb:  callable or ``None``
+   :param fit_kws:  dictionary to pass to :scipydoc:`optimize.leastsq` or :scipydoc:`optimize.minimize`.
+   :type  fit_kws:  dict
+
+   :return: :class:`MinimizerResult` instance, which will contain the
+            optimized parameter, and several goodness-of-fit statistics.
+
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
+
+
+   On output, the params will be unchanged.  The best-fit values, and where
+   appropriate, estimated uncertainties and correlations, will all be
+   contained in the returned :class:`MinimizerResult`.  See
+   :ref:`fit-results-label` for further details.
+
+   For clarity, it should be emphasized that this function is simply a
+   wrapper around :class:`Minimizer` that runs a single fit, implemented as::
+
+    fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
+                       iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
+    return fitter.minimize(method=method)
+
+
+..  _fit-func-label:
+
+
+Writing a Fitting Function
+===============================
+
+An important component of a fit is writing a function to be minimized --
+the *objective function*.  Since this function will be called by other
+routines, there are fairly stringent requirements for its call signature
+and return value.  In principle, your function can be any python callable,
+but it must look like this:
+
+.. function:: func(params, *args, **kws):
+
+   calculate objective residual to be minimized from parameters.
+
+   :param params: parameters.
+   :type  params: :class:`Parameters`.
+   :param args:  positional arguments.  Must match ``args`` argument to :func:`minimize`
+   :param kws:   keyword arguments.  Must match ``kws`` argument to :func:`minimize`
+   :return: residual array (generally data-model) to be minimized in the least-squares sense.
+   :rtype: numpy array.  The length of this array cannot change between calls.
+
+
+A common use for the positional and keyword arguments would be to pass in other
+data needed to calculate the residual, including such things as the data array,
+dependent variable, uncertainties in the data, and other data structures for the
+model calculation.
+
+The objective function should return the value to be minimized.  For the
+Levenberg-Marquardt algorithm from :meth:`leastsq`, this returned value **must** be an
+array, with a length greater than or equal to the number of fitting variables in the
+model.  For the other methods, the return value can either be a scalar or an array.  If an
+array is returned, the sum of squares of the array will be sent to the underlying fitting
+method, effectively doing a least-squares optimization of the return values.
+
+
+Since the function will be passed in a dictionary of :class:`Parameters`, it is advisable
+to unpack these to get numerical values at the top of the function.  A
+simple way to do this is with :meth:`Parameters.valuesdict`, as with::
+
+
+    def residual(pars, x, data=None, eps=None):
+        # unpack parameters:
+        #  extract .value attribute for each parameter
+	parvals = pars.valuesdict()
+        period = parvals['period']
+        shift = parvals['shift']
+        decay = parvals['decay']
+
+        if abs(shift) > pi/2:
+            shift = shift - sign(shift)*pi
+
+        if abs(period) < 1.e-10:
+            period = sign(period)*1.e-10
+
+        model = parvals['amp'] * sin(shift + x/period) * exp(-x*x*decay*decay)
+
+        if data is None:
+            return model
+	if eps is None:
+            return (model - data)
+        return (model - data)/eps
+
+In this example, ``x`` is a positional (required) argument, while the
+``data`` array is actually optional (so that the function returns the model
+calculation if the data is neglected).  Also note that the model
+calculation will divide ``x`` by the value of the 'period' Parameter.  It
+might be wise to ensure this parameter cannot be 0.  It would be possible
+to use the bounds on the :class:`Parameter` to do this::
+
+    params['period'] = Parameter(value=2, min=1.e-10)
+
+but putting this directly in the function with::
+
+        if abs(period) < 1.e-10:
+            period = sign(period)*1.e-10
+
+is also a reasonable approach.   Similarly, one could place bounds on the
+``decay`` parameter to take values only between ``-pi/2`` and ``pi/2``.
+
+..  _fit-methods-label:
+
+Choosing Different Fitting Methods
+===========================================
+
+By default, the `Levenberg-Marquardt
+<http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm>`_ algorithm is
+used for fitting.  While often criticized, including the fact it finds a
+*local* minima, this approach has some distinct advantages.  These include
+being fast, and well-behaved for most curve-fitting needs, and making it
+easy to estimate uncertainties for and correlations between pairs of fit
+variables, as discussed in :ref:`fit-results-label`.
+
+Alternative algorithms can also be used by providing the ``method``
+keyword to the :func:`minimize` function or :meth:`Minimizer.minimize`
+class as listed in the :ref:`Table of Supported Fitting Methods
+<fit-methods-table>`.
+
+.. _fit-methods-table:
+
+ Table of Supported Fitting Methods:
+
+ +-----------------------+------------------------------------------------------------------+
+ | Fitting Method        | ``method`` arg to :func:`minimize` or :meth:`Minimizer.minimize` |
+ +=======================+==================================================================+
+ | Levenberg-Marquardt   |  ``leastsq``                                                     |
+ +-----------------------+------------------------------------------------------------------+
+ | Nelder-Mead           |  ``nelder``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | L-BFGS-B              |  ``lbfgsb``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | Powell                |  ``powell``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | Conjugate Gradient    |  ``cg``                                                          |
+ +-----------------------+------------------------------------------------------------------+
+ | Newton-CG             |  ``newton``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | COBYLA                |  ``cobyla``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | Truncated Newton      |  ``tnc``                                                         |
+ +-----------------------+------------------------------------------------------------------+
+ | Dogleg                |  ``dogleg``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | Sequential Linear     |  ``slsqp``                                                       |
+ | Squares Programming   |                                                                  |
+ +-----------------------+------------------------------------------------------------------+
+ | Differential          |  ``differential_evolution``                                      |
+ | Evolution             |                                                                  |
+ +-----------------------+------------------------------------------------------------------+
+
+
+.. note::
+
+   The objective function for the Levenberg-Marquardt method **must**
+   return an array, with more elements than variables.  All other methods
+   can return either a scalar value or an array.
+
+
+.. warning::
+
+  Much of this documentation assumes that the Levenberg-Marquardt method is
+  the method used.  Many of the fit statistics and estimates for
+  uncertainties in parameters discussed in :ref:`fit-results-label` are
+  done only for this method.
+
+..  _fit-results-label:
+
+:class:`MinimizerResult` -- the optimization result
+========================================================
+
+
+
+.. class:: MinimizerResult(**kws)
+
+.. versionadded:: 0.9.0
+
+An optimization with :func:`minimize` or :meth:`Minimizer.minimize`
+will return a :class:`MinimizerResult` object.  This is an otherwise
+plain container object (that is, with no methods of its own) that
+simply holds the results of the minimization.  These results will
+include several pieces of informational data such as status and error
+messages, fit statistics, and the updated parameters themselves.
+
+Importantly, the parameters passed in to :meth:`Minimizer.minimize`
+will be not be changed.  To to find the best-fit values, uncertainties
+and so on for each parameter, one must use the
+:attr:`MinimizerResult.params` attribute.
+
+.. attribute::   params
+
+  the :class:`Parameters` actually used in the fit, with updated
+  values, :attr:`stderr` and :attr:`correl`.
+
+.. attribute::  var_names
+
+  ordered list of variable parameter names used in optimization, and
+  useful for understanding the the values in :attr:`init_vals` and
+  :attr:`covar`.
+
+.. attribute:: covar
+
+  covariance matrix from minimization (`leastsq` only), with
+  rows/columns using :attr:`var_names`.
+
+.. attribute:: init_vals
+
+  list of initial values for variable parameters using :attr:`var_names`.
+
+.. attribute::  nfev
+
+  number of function evaluations
+
+.. attribute::  success
+
+  boolean (``True``/``False``) for whether fit succeeded.
+
+.. attribute::  errorbars
+
+  boolean (``True``/``False``) for whether uncertainties were
+  estimated.
+
+.. attribute::  message
+
+  message about fit success.
+
+.. attribute::  ier
+
+  integer error value from :scipydoc:`optimize.leastsq`  (`leastsq`  only).
+
+.. attribute::  lmdif_message
+
+  message from :scipydoc:`optimize.leastsq` (`leastsq` only).
+
+.. attribute::  nvarys
+
+  number of variables in fit  :math:`N_{\rm varys}`
+
+.. attribute::  ndata
+
+  number of data points:  :math:`N`
+
+.. attribute::  nfree `
+
+  degrees of freedom in fit:  :math:`N - N_{\rm varys}`
+
+.. attribute::  residual
+
+  residual array, return value of :func:`func`:  :math:`{\rm Resid}`
+
+.. attribute::  chisqr
+
+  chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`
+
+.. attribute::  redchi
+
+  reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm
+  varys})}`
+
+.. attribute::  aic
+
+  Akaike Information Criterion statistic (see below)
+
+.. attribute::  bic
+
+  Bayesian Information Criterion statistic (see below).
+
+
+
+Goodness-of-Fit Statistics
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _goodfit-table:
+
+ Table of Fit Results:  These values, including the standard Goodness-of-Fit statistics,
+ are all attributes of the :class:`MinimizerResult` object returned by
+ :func:`minimize` or :meth:`Minimizer.minimize`.
+
++----------------------+----------------------------------------------------------------------------+
+| Attribute Name       | Description / Formula                                                      |
++======================+============================================================================+
+|    nfev              | number of function evaluations                                             |
++----------------------+----------------------------------------------------------------------------+
+|    nvarys            | number of variables in fit  :math:`N_{\rm varys}`                          |
++----------------------+----------------------------------------------------------------------------+
+|    ndata             | number of data points:  :math:`N`                                          |
++----------------------+----------------------------------------------------------------------------+
+|    nfree `           | degrees of freedom in fit:  :math:`N - N_{\rm varys}`                      |
++----------------------+----------------------------------------------------------------------------+
+|    residual          | residual array, return value of :func:`func`:  :math:`{\rm Resid}`         |
++----------------------+----------------------------------------------------------------------------+
+|    chisqr            | chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`                    |
++----------------------+----------------------------------------------------------------------------+
+|    redchi            | reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm varys})}` |
++----------------------+----------------------------------------------------------------------------+
+|    aic               | Akaike Information Criterion statistic (see below)                         |
++----------------------+----------------------------------------------------------------------------+
+|    bic               | Bayesian Information Criterion statistic (see below)                       |
++----------------------+----------------------------------------------------------------------------+
+|    var_names         | ordered list of variable parameter names used for init_vals and covar      |
++----------------------+----------------------------------------------------------------------------+
+|    covar             | covariance matrix (with rows/columns using var_names                       |
++----------------------+----------------------------------------------------------------------------+
+|    init_vals         | list of initial values for variable parameters                             |
++----------------------+----------------------------------------------------------------------------+
+
+Note that the calculation of chi-square and reduced chi-square assume
+that the returned residual function is scaled properly to the
+uncertainties in the data.  For these statistics to be meaningful, the
+person writing the function to be minimized must scale them properly.
+
+After a fit using using the :meth:`leastsq` method has completed
+successfully, standard errors for the fitted variables and correlations
+between pairs of fitted variables are automatically calculated from the
+covariance matrix.  The standard error (estimated :math:`1\sigma`
+error-bar) go into the :attr:`stderr` attribute of the Parameter.  The
+correlations with all other variables will be put into the
+:attr:`correl` attribute of the Parameter -- a dictionary with keys for
+all other Parameters and values of the corresponding correlation.
+
+In some cases, it may not be possible to estimate the errors and
+correlations.  For example, if a variable actually has no practical effect
+on the fit, it will likely cause the covariance matrix to be singular,
+making standard errors impossible to estimate.  Placing bounds on varied
+Parameters makes it more likely that errors cannot be estimated, as being
+near the maximum or minimum value makes the covariance matrix singular.  In
+these cases, the :attr:`errorbars` attribute of the fit result
+(:class:`Minimizer` object) will be ``False``.
+
+
+.. _information_criteria_label:
+
+Akaike and Bayesian Information Criteria
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The :class:`MinimizerResult` includes the traditional chi-square and reduced chi-square statistics:
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+        \chi^2  &=&  \sum_i^N r_i^2 \\
+	\chi^2_\nu &=& = \chi^2 / (N-N_{\rm varys})
+    \end{eqnarray*}
+
+where :math:`r` is the residual array returned by the objective function
+(likely to be ``(data-model)/uncertainty`` for data modeling usages),
+:math:`N` is the number of data points (``ndata``), and :math:`N_{\rm
+varys}` is number of variable parameters.
+
+Also included are the `Akaike Information Criterion
+<http://en.wikipedia.org/wiki/Akaike_information_criterion>`_, and
+`Bayesian Information Criterion
+<http://en.wikipedia.org/wiki/Bayesian_information_criterion>`_ statistics,
+held in the ``aic`` and ``bic`` attributes, respectively.  These give slightly
+different measures of the relative quality for a fit, trying to balance
+quality of fit with the number of variable parameters used in the fit.
+These are calculated as
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+     {\rm aic} &=&  N \ln(\chi^2/N) + 2 N_{\rm varys} \\
+     {\rm bic} &=&  N \ln(\chi^2/N) + \ln(N) *N_{\rm varys} \\
+    \end{eqnarray*}
+
+
+When comparing fits with different numbers of varying parameters, one
+typically selects the model with lowest reduced chi-square, Akaike
+information criterion, and/or Bayesian information criterion.  Generally,
+the Bayesian information criterion is considered the most conservative of
+these statistics.
+
+..  _fit-itercb-label:
+
+
+Using a Iteration Callback Function
+====================================
+
+An iteration callback function is a function to be called at each
+iteration, just after the objective function is called.  The iteration
+callback allows user-supplied code to be run at each iteration, and can be
+used to abort a fit.
+
+.. function:: iter_cb(params, iter, resid, *args, **kws):
+
+   user-supplied function to be run at each iteration
+
+   :param params: parameters.
+   :type  params: :class:`Parameters`.
+   :param iter:   iteration number
+   :type  iter:   integer
+   :param resid:  residual array.
+   :type  resid:  ndarray
+   :param args:  positional arguments.  Must match ``args`` argument to :func:`minimize`
+   :param kws:   keyword arguments.  Must match ``kws`` argument to :func:`minimize`
+   :return:      residual array (generally data-model) to be minimized in the least-squares sense.
+   :rtype:    ``None`` for normal behavior, any value like ``True`` to abort fit.
+
+
+Normally, the iteration callback would have no return value or return
+``None``.  To abort a fit, have this function return a value that is
+``True`` (including any non-zero integer).  The fit will also abort if any
+exception is raised in the iteration callback. When a fit is aborted this
+way, the parameters will have the values from the last iteration.  The fit
+statistics are not likely to be meaningful, and uncertainties will not be computed.
+
+
+.. module:: Minimizer
+
+..  _fit-minimizer-label:
+
+Using the :class:`Minimizer` class
+=======================================
+
+For full control of the fitting process, you'll want to create a
+:class:`Minimizer` object.
+
+.. class:: Minimizer(function, params, fcn_args=None, fcn_kws=None, iter_cb=None, scale_covar=True, **kws)
+
+   creates a Minimizer, for more detailed access to fitting methods and attributes.
+
+   :param function:  objective function to return fit residual.  See :ref:`fit-func-label` for details.
+   :type  function:  callable.
+   :param params:  a dictionary of Parameters.  Keywords must be strings
+                   that match ``[a-z_][a-z0-9_]*`` and is not a python
+                   reserved word.  Each value must be :class:`Parameter`.
+   :type  params:  dict
+   :param fcn_args:  arguments tuple to pass to the residual function as  positional arguments.
+   :type  fcn_args: tuple
+   :param fcn_kws:  dictionary to pass to the residual function as keyword arguments.
+   :type  fcn_kws:  dict
+   :param iter_cb:  function to be called at each fit iteration.  See :ref:`fit-itercb-label` for details.
+   :type  iter_cb:  callable or ``None``
+   :param scale_covar:  flag for automatically scaling covariance matrix and uncertainties to reduced chi-square (``leastsq`` only)
+   :type  scale_covar:  bool (default ``True``).
+   :param kws:      dictionary to pass as keywords to the underlying :mod:`scipy.optimize` method.
+   :type  kws:      dict
+
+The Minimizer object has a few public methods:
+
+.. method:: minimize(method='leastsq', params=None, **kws)
+
+   perform fit using either :meth:`leastsq` or :meth:`scalar_minimize`.
+
+   :param method: name of fitting method.  Must be one of the names in
+                  :ref:`Table of Supported Fitting Methods <fit-methods-table>`
+   :type  method:  str.
+   :param params:  a :class:`Parameters` dictionary for starting values
+   :type  params:  :class:`Parameters` or `None`
+
+   :return: :class:`MinimizerResult` object, containing updated
+            parameters, fitting statistics, and information.
+
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
+
+   Additional keywords are passed on to the correspond :meth:`leastsq`
+   or :meth:`scalar_minimize` method.
+
+
+.. method:: leastsq(params=None, scale_covar=True, **kws)
+
+   perform fit with Levenberg-Marquardt algorithm.  Keywords will be
+   passed directly to :scipydoc:`optimize.leastsq`.  By default,
+   numerical derivatives are used, and the following arguments are set:
+
+
+    +------------------+----------------+------------------------------------------------------------+
+    | :meth:`leastsq`  |  Default Value | Description                                                |
+    | arg              |                |                                                            |
+    +==================+================+============================================================+
+    |   xtol           |  1.e-7         | Relative error in the approximate solution                 |
+    +------------------+----------------+------------------------------------------------------------+
+    |   ftol           |  1.e-7         | Relative error in the desired sum of squares               |
+    +------------------+----------------+------------------------------------------------------------+
+    |   maxfev         | 2000*(nvar+1)  | maximum number of function calls (nvar= # of variables)    |
+    +------------------+----------------+------------------------------------------------------------+
+    |   Dfun           | ``None``       | function to call for Jacobian calculation                  |
+    +------------------+----------------+------------------------------------------------------------+
+
+
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
+
+.. method:: scalar_minimize(method='Nelder-Mead', params=None, hess=None, tol=None, **kws)
+
+   perform fit with any of the scalar minimization algorithms supported by
+   :scipydoc:`optimize.minimize`.
+
+    +-------------------------+-----------------+-----------------------------------------------------+
+    | :meth:`scalar_minimize` | Default Value   | Description                                         |
+    | arg                     |                 |                                                     |
+    +=========================+=================+=====================================================+
+    |   method                | ``Nelder-Mead`` | fitting method                                      |
+    +-------------------------+-----------------+-----------------------------------------------------+
+    |   tol                   | 1.e-7           | fitting and parameter tolerance                     |
+    +-------------------------+-----------------+-----------------------------------------------------+
+    |   hess                  | None            | Hessian of objective function                       |
+    +-------------------------+-----------------+-----------------------------------------------------+
+
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
+
+.. method:: prepare_fit(**kws)
+
+   prepares and initializes model and Parameters for subsequent
+   fitting. This routine prepares the conversion of :class:`Parameters`
+   into fit variables, organizes parameter bounds, and parses, "compiles"
+   and checks constrain expressions.   The method also creates and returns
+   a new instance of a :class:`MinimizerResult` object that contains the
+   copy of the Parameters that will actually be varied in the fit.
+
+   This method is called directly by the fitting methods, and it is
+   generally not necessary to call this function explicitly.
+
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
+
+
+
+.. method:: emcee(params=None, steps=1000, nwalkers=100, burn=0, thin=1, ntemps=1, pos=None, reuse_sampler=False, workers=1, float_behavior='posterior', is_weighted=True, seed=None)
+
+  Bayesian sampling of the posterior distribution for the parameters using the `emcee`
+  Markov Chain Monte Carlo package. The method assumes that the prior is Uniform. You need
+  to have `emcee` installed to use this method.
+
+  :param params: a :class:`Parameters` dictionary for starting values
+  :type  params: :class:`Parameters` or `None`
+  :param steps: How many samples you would like to draw from the posterior
+                 distribution for each of the walkers?
+  :type  steps: int
+  :param nwalkers: Should be set so :math:`nwalkers >> nvarys`, where `nvarys`
+                    are the number of parameters being varied during the fit.
+                    "Walkers are the members of the ensemble. They are almost
+                    like separate Metropolis-Hastings chains but, of course,
+                    the proposal distribution for a given walker depends on the
+                    positions of all the other walkers in the ensemble." - from
+                    [1]_.
+  :type  nwalkers: int
+  :param burn: Discard this many samples from the start of the sampling regime.
+  :type  burn: int
+  :param thin: Only accept 1 in every `thin` samples.
+  :type  thin: int
+  :param ntemps: If `ntemps > 1` perform a Parallel Tempering.
+  :type ntemps: int
+  :param pos: Specify the initial positions for the sampler.  If `ntemps == 1`
+              then `pos.shape` should be `(nwalkers, nvarys)`. Otherwise,
+              `(ntemps, nwalkers, nvarys)`. You can also initialise using a
+              previous chain that had the same `ntemps`, `nwalkers` and `nvarys`.
+  :type pos: np.ndarray
+  :param reuse_sampler:  If you have already run :meth:`emcee` on a given
+            :class:`Minimizer` object then it possesses an internal sampler
+            attribute. You can continue to draw from the same sampler (retaining
+            the chain history) if you set this option to `True`. Otherwise a new
+            sampler is created. The `nwalkers`, `ntemps` and `params` keywords
+            are ignored with this option.
+            **Important**: the :class:`Parameters` used to create the sampler
+            must not change in-between calls to :meth:`emcee`. Alteration of
+            :class:`Parameters` would include changed ``min``, ``max``,
+            ``vary`` and ``expr`` attributes. This may happen, for example, if
+            you use an altered :class:`Parameters` object and call the
+            :meth:`minimize` method in-between calls to :meth:`emcee` .
+  :type  reuse_sampler:  bool
+  :param workers: For parallelization of sampling.  It can be any Pool-like object
+            with a map method that follows the same calling sequence as the
+            built-in map function. If int is given as the argument, then a
+            multiprocessing-based pool is spawned internally with the
+            corresponding number of parallel processes. 'mpi4py'-based
+            parallelization and 'joblib'-based parallelization pools can also
+            be used here. **Note**: because of multiprocessing overhead it may
+            only be worth parallelising if the objective function is expensive
+            to calculate, or if there are a large number of objective
+            evaluations per step (`ntemps * nwalkers * nvarys`).
+  :type workers: int or Pool-like
+  :type float_behavior: str
+  :param float_behavior: Specifies the meaning of the objective function if it
+   returns a float. One of:
+
+    'posterior' - the objective function returns a log-posterior probability
+
+    'chi2' - the objective function is returning :math:`\chi^2`.
+
+   See Notes for further details.
+  :param is_weighted: Has your objective function been weighted by measurement
+            uncertainties? If `is_weighted is True` then your objective
+            function is assumed to return residuals that have been divided by
+            the true measurement uncertainty `(data - model) / sigma`. If
+            `is_weighted is False` then the objective function is assumed to
+            return unweighted residuals, `data - model`. In this case `emcee`
+            will employ a positive measurement uncertainty during the sampling.
+            This measurement uncertainty will be present in the output params
+            and output chain with the name `__lnsigma`. A side effect of this
+            is that you cannot use this parameter name yourself.
+            **Important** this parameter only has any effect if your objective
+            function returns an array. If your objective function returns a
+            float, then this parameter is ignored. See Notes for more details.
+  :type is_weighted: bool
+  :param seed: If `seed` is an int, a new `np.random.RandomState` instance is used,
+            seeded with `seed`.
+            If `seed` is already a `np.random.RandomState` instance, then that
+            `np.random.RandomState` instance is used.
+            Specify `seed` for repeatable sampling.
+  :type seed: int or np.random.RandomState
+
+  :return: :class:`MinimizerResult` object containing updated params, statistics,
+            etc. The :class:`MinimizerResult` also contains the ``chain``,
+            ``flatchain`` and ``lnprob`` attributes. The ``chain``
+            and ``flatchain`` attributes contain the samples and have the shape
+            `(nwalkers, (steps - burn) // thin, nvarys)` or
+            `(ntemps, nwalkers, (steps - burn) // thin, nvarys)`,
+            depending on whether Parallel tempering was used or not.
+            `nvarys` is the number of parameters that are allowed to vary.
+            The ``flatchain`` attribute is a :class:`pandas.DataFrame` of the
+            flattened chain, `chain.reshape(-1, nvarys)`. To access flattened
+            chain values for a particular parameter use
+            `result.flatchain[parname]`. The ``lnprob`` attribute contains the
+            log probability for each sample in ``chain``. The sample with the
+            highest probability corresponds to the maximum likelihood estimate.
+
+  This method samples the posterior distribution of the parameters using
+  Markov Chain Monte Carlo.  To do so it needs to calculate the
+  log-posterior probability of the model parameters, `F`, given the data,
+  `D`, :math:`\ln p(F_{true} | D)`. This 'posterior probability' is
+  calculated as:
+
+  .. math::
+
+    \ln p(F_{true} | D) \propto \ln p(D | F_{true}) + \ln p(F_{true})
+
+  where :math:`\ln p(D | F_{true})` is the 'log-likelihood' and
+  :math:`\ln p(F_{true})` is the 'log-prior'. The default log-prior
+  encodes prior information already known about the model. This method
+  assumes that the log-prior probability is `-np.inf` (impossible) if the
+  one of the parameters is outside its limits. The log-prior probability
+  term is zero if all the parameters are inside their bounds (known as a
+  uniform prior). The log-likelihood function is given by [1]_:
+
+  .. math::
+
+    \ln p(D|F_{true}) = -\frac{1}{2}\sum_n \left[\frac{\left(g_n(F_{true}) - D_n \right)^2}{s_n^2}+\ln (2\pi s_n^2)\right]
+
+  The first summand in the square brackets represents the residual for a
+  given datapoint (:math:`g` being the generative model) . This term
+  represents :math:`\chi^2` when summed over all datapoints.
+  Ideally the objective function used to create :class:`lmfit.Minimizer` should
+  return the log-posterior probability, :math:`\ln p(F_{true} | D)`.
+  However, since the in-built log-prior term is zero, the objective
+  function can also just return the log-likelihood, unless you wish to
+  create a non-uniform prior.
+
+  If a float value is returned by the objective function then this value
+  is assumed by default to be the log-posterior probability, i.e.
+  `float_behavior is 'posterior'`. If your objective function returns
+  :math:`\chi^2`, then you should use a value of `'chi2'` for
+  `float_behavior`. `emcee` will then multiply your :math:`\chi^2` value
+  by -0.5 to obtain the posterior probability.
+
+  However, the default behaviour of many objective functions is to return
+  a vector of (possibly weighted) residuals. Therefore, if your objective
+  function returns a vector, `res`, then the vector is assumed to contain
+  the residuals. If `is_weighted is True` then your residuals are assumed
+  to be correctly weighted by the standard deviation of the data points
+  (`res = (data - model) / sigma`) and the log-likelihood (and
+  log-posterior probability) is calculated as: `-0.5 * np.sum(res **2)`.
+  This ignores the second summand in the square brackets. Consequently, in
+  order to calculate a fully correct log-posterior probability value your
+  objective function should return a single value. If `is_weighted is False`
+  then the data uncertainty, :math:`s_n`, will be treated as a nuisance
+  parameter and will be marginalised out. This is achieved by employing a
+  strictly positive uncertainty (homoscedasticity) for each data point,
+  :math:`s_n=exp(\_\_lnsigma)`. `__lnsigma` will be present in
+  `MinimizerResult.params`, as well as `Minimizer.chain`, `nvarys` will also be
+  increased by one.
+
+  .. [1] http://dan.iel.fm/emcee/current/user/line/
+
+
+.. _label-emcee:
+
+:meth:`emcee` - calculating the posterior probability distribution of parameters
+==============================================================================================
+
+:meth:`emcee` can be used to obtain the posterior probability distribution of
+parameters, given a set of experimental data. An example problem is a double
+exponential decay. A small amount of Gaussian noise is also added in::
+
+    >>> import numpy as np
+    >>> import lmfit
+    >>> import matplotlib.pyplot as plt
+    >>> x = np.linspace(1, 10, 250)
+    >>> np.random.seed(0)
+    >>> y = 3.0 * np.exp(-x / 2) - 5.0 * np.exp(-(x - 0.1) / 10.) + 0.1 * np.random.randn(len(x))
+    >>> plt.plot(x, y)
+    >>> plt.show()
+
+.. image:: _images/emcee_dbl_exp.png
+
+Create a Parameter set for the initial guesses::
+
+    >>> p = lmfit.Parameters()
+    >>> p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3., True))
+
+    >>> def residual(p):
+    ...     v = p.valuesdict()
+    ...     return v['a1'] * np.exp(-x / v['t1']) + v['a2'] * np.exp(-(x - 0.1) / v['t2']) - y
+
+Solving with :func:`minimize` gives the Maximum Likelihood solution.::
+
+    >>> mi = lmfit.minimize(residual, p, method='Nelder')
+    >>> lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
+    [[Variables]]
+        a1:   2.98623688 (init= 4)
+        a2:  -4.33525596 (init= 4)
+        t1:   1.30993185 (init= 3)
+        t2:   11.8240752 (init= 3)
+    [[Correlations]] (unreported correlations are <  0.500)
+    >>> plt.plot(x, y)
+    >>> plt.plot(x, residual(mi.params) + y, 'r')
+    >>> plt.show()
+
+.. image:: _images/emcee_dbl_exp2.png
+
+However, this doesn't give a probability distribution for the parameters.
+Furthermore, we wish to deal with the data uncertainty. This is called
+marginalisation of a nuisance parameter. emcee requires a function that returns
+the log-posterior probability. The log-posterior probability is a sum of the
+log-prior probability and log-likelihood functions. The log-prior probability is
+assumed to be zero if all the parameters are within their bounds and `-np.inf`
+if any of the parameters are outside their bounds.::
+
+    >>> # add a noise parameter
+    >>> mi.params.add('f', value=1, min=0.001, max=2)
+
+    >>> # This is the log-likelihood probability for the sampling. We're going to estimate the
+    >>> # size of the uncertainties on the data as well.
+    >>> def lnprob(p):
+    ...    resid = residual(p)
+    ...    s = p['f']
+    ...    resid *= 1 / s
+    ...    resid *= resid
+    ...    resid += np.log(2 * np.pi * s**2)
+    ...    return -0.5 * np.sum(resid)
+
+Now we have to set up the minimizer and do the sampling.::
+
+    >>> mini = lmfit.Minimizer(lnprob, mi.params)
+    >>> res = mini.emcee(burn=300, steps=600, thin=10, params=mi.params)
+
+Lets have a look at those posterior distributions for the parameters.  This requires
+installation of the `corner` package.::
+
+    >>> import corner
+    >>> corner.corner(res.flatchain, labels=res.var_names, truths=list(res.params.valuesdict().values()))
+
+.. image:: _images/emcee_triangle.png
+
+The values reported in the :class:`MinimizerResult` are the medians of the
+probability distributions and a 1 sigma quantile, estimated as half the
+difference between the 15.8 and 84.2 percentiles. The median value is not
+necessarily the same as the Maximum Likelihood Estimate. We'll get that as well.
+You can see that we recovered the right uncertainty level on the data.::
+
+    >>> print("median of posterior probability distribution")
+    >>> print('------------------------------------------')
+    >>> lmfit.report_fit(res.params)
+    median of posterior probability distribution
+    ------------------------------------------
+    [[Variables]]
+        a1:   3.00975345 +/- 0.151034 (5.02%) (init= 2.986237)
+        a2:  -4.35419204 +/- 0.127505 (2.93%) (init=-4.335256)
+        t1:   1.32726415 +/- 0.142995 (10.77%) (init= 1.309932)
+        t2:   11.7911935 +/- 0.495583 (4.20%) (init= 11.82408)
+        f:    0.09805494 +/- 0.004256 (4.34%) (init= 1)
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(a2, t2)                    =  0.981
+        C(a2, t1)                    = -0.927
+        C(t1, t2)                    = -0.880
+        C(a1, t1)                    = -0.519
+        C(a1, a2)                    =  0.195
+        C(a1, t2)                    =  0.146
+
+    >>> # find the maximum likelihood solution
+    >>> highest_prob = np.argmax(res.lnprob)
+    >>> hp_loc = np.unravel_index(highest_prob, res.lnprob.shape)
+    >>> mle_soln = res.chain[hp_loc]
+    >>> for i, par in enumerate(p):
+    ...     p[par].value = mle_soln[i]
+
+    >>> print("\nMaximum likelihood Estimation")
+    >>> print('-----------------------------')
+    >>> print(p)
+    Maximum likelihood Estimation
+    -----------------------------
+    Parameters([('a1', <Parameter 'a1', 2.9943337359308981, bounds=[-inf:inf]>),
+    ('a2', <Parameter 'a2', -4.3364489105166593, bounds=[-inf:inf]>),
+    ('t1', <Parameter 't1', 1.3124544105342462, bounds=[-inf:inf]>),
+    ('t2', <Parameter 't2', 11.80612160586597, bounds=[-inf:inf]>)])
+
+    >>> # Finally lets work out a 1 and 2-sigma error estimate for 't1'
+    >>> quantiles = np.percentile(res.flatchain['t1'], [2.28, 15.9, 50, 84.2, 97.7])
+    >>> print("2 sigma spread", 0.5 * (quantiles[-1] - quantiles[0]))
+    2 sigma spread 0.298878202908
+
+Getting and Printing Fit Reports
+===========================================
+
+.. function:: fit_report(result, modelpars=None, show_correl=True, min_correl=0.1)
+
+   generate and return text of report of best-fit values, uncertainties,
+   and correlations from fit.
+
+   :param result:       :class:`MinimizerResult` object as returned by :func:`minimize`.
+   :param modelpars:    Parameters with "Known Values" (optional, default None)
+   :param show_correl:  whether to show list of sorted correlations [``True``]
+   :param min_correl:   smallest correlation absolute value to show [0.1]
+
+   If the first argument is a :class:`Parameters` object,
+   goodness-of-fit statistics will not be included.
+
+.. function:: report_fit(result, modelpars=None, show_correl=True, min_correl=0.1)
+
+   print text of report from :func:`fit_report`.
+
+An example fit with report would be
+
+.. literalinclude:: ../examples/doc_withreport.py
+
+which would write out::
+
+    [[Fit Statistics]]
+        # function evals   = 85
+        # data points      = 1001
+        # variables        = 4
+        chi-square         = 498.812
+        reduced chi-square = 0.500
+        Akaike info crit   = -685.215
+        Bayesian info crit = -665.579
+    [[Variables]]
+        amp:      13.9121944 +/- 0.141202 (1.01%) (init= 13)
+        period:   5.48507044 +/- 0.026664 (0.49%) (init= 2)
+        shift:    0.16203676 +/- 0.014056 (8.67%) (init= 0)
+        decay:    0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(period, shift)             =  0.797
+        C(amp, decay)                =  0.582
+        C(amp, shift)                = -0.297
+        C(amp, period)               = -0.243
+        C(shift, decay)              = -0.182
+        C(period, decay)             = -0.150
diff --git a/doc/index.rst b/doc/index.rst
index 8430f2b..5a2205a 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -1,68 +1,68 @@
-.. lmfit documentation master file,
-
-Non-Linear Least-Square Minimization and Curve-Fitting for Python
-===========================================================================
-
-.. _Levenberg-Marquardt:     http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm
-.. _MINPACK-1:               http://en.wikipedia.org/wiki/MINPACK
-
-
-.. warning::
-
-  Upgrading scripts from version 0.8.3 to 0.9.0?  See  :ref:`whatsnew_090_label`
-
-
-Lmfit provides a high-level interface to non-linear optimization and curve
-fitting problems for Python. Lmfit builds on and extends many of the
-optimizatin algorithm of :mod:`scipy.optimize`, especially the
-`Levenberg-Marquardt`_ method from :func:`scipy.optimize.leastsq`.
-
-Lmfit provides a number of useful enhancements to optimization and data
-fitting problems, including:
-
-  * Using :class:`Parameter` objects instead of plain floats as variables.
-    A :class:`Parameter` has a value that can be varied in the fit, have a
-    fixed value, or have upper and/or lower bounds.  A Parameter can even
-    have a value that is constrained by an algebraic expression of other
-    Parameter values.
-
-  * Ease of changing fitting algorithms.  Once a fitting model is set up,
-    one can change the fitting algorithm used to find the optimal solution
-    without changing the objective function.
-
-  * Improved estimation of confidence intervals.  While
-    :func:`scipy.optimize.leastsq` will automatically calculate
-    uncertainties and correlations from the covariance matrix, the accuracy
-    of these estimates are often questionable.  To help address this, lmfit
-    has functions to explicitly explore parameter space to determine
-    confidence levels even for the most difficult cases.
-
-  * Improved curve-fitting with the :class:`Model` class.  This
-    extends the capabilities of :func:`scipy.optimize.curve_fit`, allowing
-    you to turn a function that models for your data into a python class
-    that helps you parametrize and fit data with that model.
-
-  * Many :ref:`pre-built models <builtin_models_chapter>` for common
-    lineshapes are included and ready to use.
-
-.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
-
-The lmfit package is Free software, using an MIT license.  The software and
-this document are works in progress.  If you are interested in
-participating in this effort please use the `lmfit github repository`_.
-
-
-.. toctree::
-   :maxdepth: 2
-
-   intro
-   installation
-   support
-   faq
-   parameters
-   fitting
-   model
-   builtin_models
-   confidence
-   bounds
-   constraints
+.. lmfit documentation master file,
+
+Non-Linear Least-Square Minimization and Curve-Fitting for Python
+===========================================================================
+
+.. _Levenberg-Marquardt:     http://en.wikipedia.org/wiki/Levenberg-Marquardt_algorithm
+.. _MINPACK-1:               http://en.wikipedia.org/wiki/MINPACK
+
+
+.. warning::
+
+  Upgrading scripts from version 0.8.3 to 0.9.0?  See  :ref:`whatsnew_090_label`
+
+
+Lmfit provides a high-level interface to non-linear optimization and curve
+fitting problems for Python. Lmfit builds on and extends many of the
+optimization algorithm of :mod:`scipy.optimize`, especially the
+`Levenberg-Marquardt`_ method from :scipydoc:`optimize.leastsq`.
+
+Lmfit provides a number of useful enhancements to optimization and data
+fitting problems, including:
+
+  * Using :class:`Parameter` objects instead of plain floats as variables.
+    A :class:`Parameter` has a value that can be varied in the fit, have a
+    fixed value, or have upper and/or lower bounds.  A Parameter can even
+    have a value that is constrained by an algebraic expression of other
+    Parameter values.
+
+  * Ease of changing fitting algorithms.  Once a fitting model is set up,
+    one can change the fitting algorithm used to find the optimal solution
+    without changing the objective function.
+
+  * Improved estimation of confidence intervals.  While
+    :scipydoc:`optimize.leastsq` will automatically calculate
+    uncertainties and correlations from the covariance matrix, the accuracy
+    of these estimates are often questionable.  To help address this, lmfit
+    has functions to explicitly explore parameter space to determine
+    confidence levels even for the most difficult cases.
+
+  * Improved curve-fitting with the :class:`Model` class.  This
+    extends the capabilities of :scipydoc:`optimize.curve_fit`, allowing
+    you to turn a function that models for your data into a python class
+    that helps you parametrize and fit data with that model.
+
+  * Many :ref:`pre-built models <builtin_models_chapter>` for common
+    lineshapes are included and ready to use.
+
+.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
+
+The lmfit package is Free software, using an MIT license.  The software and
+this document are works in progress.  If you are interested in
+participating in this effort please use the `lmfit github repository`_.
+
+
+.. toctree::
+   :maxdepth: 2
+
+   intro
+   installation
+   support
+   faq
+   parameters
+   fitting
+   model
+   builtin_models
+   confidence
+   bounds
+   constraints
diff --git a/doc/installation.rst b/doc/installation.rst
index 9112fc6..f9adadd 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -1,82 +1,82 @@
-====================================
-Downloading and Installation
-====================================
-
-.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
-.. _Python Setup Tools:        http://pypi.python.org/pypi/setuptools
-.. _pip:  https://pip.pypa.io/
-.. _nose: http://nose.readthedocs.org/
-
-Prerequisites
-~~~~~~~~~~~~~~~
-
-The lmfit package requires Python, Numpy, and Scipy.  Scipy version 0.13 or
-higher is recommended, but extensive testing on compatibility with various
-versions of scipy has not been done.  Lmfit works with Python 2.7, 3.3 and
-3.4.  No testing has been done with Python 3.5, but as the package is pure
-Python, relying only on scipy and numpy, no significant troubles are
-expected.  The `nose`_ framework is required for running the test suite,
-and IPython and matplotib are recommended.  If Pandas is available, it will
-be used in portions of lmfit.
-
-
-Downloads
-~~~~~~~~~~~~~
-
-
-The latest stable version of lmfit is  available from `PyPi <http://pypi.python.org/pypi/lmfit/>`_.
-
-Installation
-~~~~~~~~~~~~~~~~~
-
-If you have `pip`_  installed, you can install lmfit with::
-
-    pip install lmfit
-
-or, if  you have `Python Setup Tools`_  installed, you install lmfit with::
-
-   easy_install -U lmfit
-
-
-or, you can download the source kit, unpack it and install with::
-
-   python setup.py install
-
-
-Development Version
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-To get the latest development version, use::
-
-   git clone http://github.com/lmfit/lmfit-py.git
-
-
-and install using::
-
-   python setup.py install
-
-
-Testing
-~~~~~~~~~~
-
-A battery of tests scripts that can be run with the `nose`_ testing
-framework is distributed with lmfit in the ``tests`` folder.  These are
-routinely run on the development version.  Running ``nosetests`` should run
-all of these tests to completion without errors or failures.
-
-Many of the examples in this documentation are distributed with lmfit in
-the ``examples`` folder, and should also run for you.  Many of these require
-
-
-Acknowledgements
-~~~~~~~~~~~~~~~~~~
-
-.. literalinclude:: ../THANKS.txt
-
-
-License
-~~~~~~~~~~~~~
-
-The LMFIT-py code is distribution under the following license:
-
-.. literalinclude:: ../LICENSE
+====================================
+Downloading and Installation
+====================================
+
+.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
+.. _Python Setup Tools:        http://pypi.python.org/pypi/setuptools
+.. _pip:  https://pip.pypa.io/
+.. _nose: http://nose.readthedocs.org/
+
+Prerequisites
+~~~~~~~~~~~~~~~
+
+The lmfit package requires Python, Numpy, and Scipy.  Scipy version 0.13 or
+higher is recommended, but extensive testing on compatibility with various
+versions of scipy has not been done.  Lmfit works with Python 2.7, 3.3 and
+3.4.  No testing has been done with Python 3.5, but as the package is pure
+Python, relying only on scipy and numpy, no significant troubles are
+expected.  The `nose`_ framework is required for running the test suite,
+and IPython and matplotib are recommended.  If Pandas is available, it will
+be used in portions of lmfit.
+
+
+Downloads
+~~~~~~~~~~~~~
+
+
+The latest stable version of lmfit is  available from `PyPi <http://pypi.python.org/pypi/lmfit/>`_.
+
+Installation
+~~~~~~~~~~~~~~~~~
+
+If you have `pip`_  installed, you can install lmfit with::
+
+    pip install lmfit
+
+or, if  you have `Python Setup Tools`_  installed, you install lmfit with::
+
+   easy_install -U lmfit
+
+
+or, you can download the source kit, unpack it and install with::
+
+   python setup.py install
+
+
+Development Version
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+To get the latest development version, use::
+
+   git clone http://github.com/lmfit/lmfit-py.git
+
+
+and install using::
+
+   python setup.py install
+
+
+Testing
+~~~~~~~~~~
+
+A battery of tests scripts that can be run with the `nose`_ testing
+framework is distributed with lmfit in the ``tests`` folder.  These are
+routinely run on the development version.  Running ``nosetests`` should run
+all of these tests to completion without errors or failures.
+
+Many of the examples in this documentation are distributed with lmfit in
+the ``examples`` folder, and should also run for you.  Many of these require
+
+
+Acknowledgements
+~~~~~~~~~~~~~~~~~~
+
+.. literalinclude:: ../THANKS.txt
+
+
+License
+~~~~~~~~~~~~~
+
+The LMFIT-py code is distribution under the following license:
+
+.. literalinclude:: ../LICENSE
diff --git a/doc/intro.rst b/doc/intro.rst
index c480ae6..bc854f4 100644
--- a/doc/intro.rst
+++ b/doc/intro.rst
@@ -1,150 +1,150 @@
-.. _intro_chapter:
-
-===========================================================
-Getting started with Non-Linear Least-Squares Fitting
-===========================================================
-
-The lmfit package is designed to provide simple tools to help you build
-complex fitting models for non-linear least-squares problems and apply
-these models to real data.  This section gives an overview of the concepts
-and describes how to set up and perform simple fits.  Some basic knowledge
-of Python, numpy, and modeling data are assumed.
-
-To do a non-linear least-squares fit of a model to data or for a variety of other
-optimization problems, the main task is to write an *objective function*
-that takes the values of the fitting variables and calculates either a
-scalar value to be minimized or an array of values that is to be minimized
-in the least-squares sense.   For many data fitting processes, the
-least-squares approach is used, and the objective function should
-return an array of (data-model), perhaps scaled by some weighting factor
-such as the inverse of the uncertainty in the data.  For such a problem,
-the chi-square (:math:`\chi^2`) statistic is often defined as:
-
-.. math::
-
- \chi^2 =  \sum_i^{N} \frac{[y^{\rm meas}_i - y_i^{\rm model}({\bf{v}})]^2}{\epsilon_i^2}
-
-where :math:`y_i^{\rm meas}` is the set of measured data, :math:`y_i^{\rm
-model}({\bf{v}})` is the model calculation, :math:`{\bf{v}}` is the set of
-variables in the model to be optimized in the fit, and :math:`\epsilon_i`
-is the estimated uncertainty in the data.
-
-In a traditional non-linear fit, one writes an objective function that takes the
-variable values and calculates the residual :math:`y^{\rm meas}_i -
-y_i^{\rm model}({\bf{v}})`, or the residual scaled by the data
-uncertainties, :math:`[y^{\rm meas}_i - y_i^{\rm
-model}({\bf{v}})]/{\epsilon_i}`, or some other weighting factor.  As a
-simple example, one might write an objective function like this::
-
-    def residual(vars, x, data, eps_data):
-        amp = vars[0]
-        phaseshift = vars[1]
-	freq = vars[2]
-        decay = vars[3]
-
-	model = amp * sin(x * freq  + phaseshift) * exp(-x*x*decay)
-
-        return (data-model)/eps_data
-
-To perform the minimization with :mod:`scipy.optimize`, one would do::
-
-    from scipy.optimize import leastsq
-    vars = [10.0, 0.2, 3.0, 0.007]
-    out = leastsq(residual, vars, args=(x, data, eps_data))
-
-Though it is wonderful to be able to use python for such optimization
-problems, and the scipy library is robust and easy to use, the approach
-here is not terribly different from how one would do the same fit in C or
-Fortran.  There are several practical challenges to using this approach,
-including:
-
-  a) The user has to keep track of the order of the variables, and their
-     meaning -- vars[0] is the amplitude, vars[2] is the frequency, and so
-     on, although there is no intrinsic meaning to this order.
-
-  b) If the user wants to fix a particular variable (*not* vary it in the
-     fit), the residual function has to be altered to have fewer variables,
-     and have the corresponding constant value passed in some other way.
-     While reasonable for simple cases, this quickly becomes a significant
-     work for more complex models, and greatly complicates modeling for
-     people not intimately familiar with the details of the fitting code.
-
-  c) There is no simple, robust way to put bounds on values for the
-     variables, or enforce mathematical relationships between the
-     variables.  In fact, those optimization methods that do provide
-     bounds, require bounds to be set for all variables with separate
-     arrays that are in the same arbitrary order as variable values.
-     Again, this is acceptable for small or one-off cases, but becomes
-     painful if the fitting model needs to change.
-
-These shortcomings are really do solely to the use of traditional arrays of
-variables, as matches closely the implementation of the Fortran code.  The
-lmfit module overcomes these shortcomings by using objects -- a core reason for wokring with
-Python.  The key concept for lmfit is to use :class:`Parameter`
-objects instead of plain floating point numbers as the variables for the
-fit.  By using :class:`Parameter` objects (or the closely related
-:class:`Parameters` -- a dictionary of :class:`Parameter` objects), one can
-
-   a) forget about the order of variables and refer to Parameters
-      by meaningful names.
-   b) place bounds on Parameters as attributes, without worrying about order.
-   c) fix Parameters, without having to rewrite the objective function.
-   d) place algebraic constraints on Parameters.
-
-To illustrate the value of this approach, we can rewrite the above example
-as::
-
-    from lmfit import minimize, Parameters
-
-    def residual(params, x, data, eps_data):
-        amp = params['amp'].value
-        pshift = params['phase'].value
-	freq = params['frequency'].value
-        decay = params['decay'].value
-
-	model = amp * sin(x * freq  + pshift) * exp(-x*x*decay)
-
-        return (data-model)/eps_data
-
-    params = Parameters()
-    params.add('amp', value=10)
-    params.add('decay', value=0.007)
-    params.add('phase', value=0.2)
-    params.add('frequency', value=3.0)
-
-    out = minimize(residual, params, args=(x, data, eps_data))
-
-
-At first look, we simply replaced a list of values with a dictionary,
-accessed by name -- not a huge improvement.  But each of the named
-:class:`Parameter` in the :class:`Parameters` object holds additional
-attributes to modify the value during the fit.  For example, Parameters can
-be fixed or bounded.  This can be done during definition::
-
-    params = Parameters()
-    params.add('amp', value=10, vary=False)
-    params.add('decay', value=0.007, min=0.0)
-    params.add('phase', value=0.2)
-    params.add('frequency', value=3.0, max=10)
-
-where ``vary=False`` will prevent the value from changing in the fit, and
-``min=0.0`` will set a lower bound on that parameters value). It can also be done
-later by setting the corresponding attributes after they have been
-created::
-
-    params['amp'].vary = False
-    params['decay'].min = 0.10
-
-Importantly, our objective function remains unchanged.
-
-The `params` object can be copied and modified to make many user-level
-changes to the model and fitting process.  Of course, most of the
-information about how your data is modeled goes into the objective
-function, but the approach here allows some external control; that is, control by
-the **user** performing the fit, instead of by the author of the
-objective function.
-
-Finally, in addition to the :class:`Parameters` approach to fitting data,
-lmfit allows switching optimization methods without changing
-the objective function, provides tools for writing fitting reports, and
-provides better determination of Parameters confidence levels.
+.. _intro_chapter:
+
+===========================================================
+Getting started with Non-Linear Least-Squares Fitting
+===========================================================
+
+The lmfit package is designed to provide simple tools to help you build
+complex fitting models for non-linear least-squares problems and apply
+these models to real data.  This section gives an overview of the concepts
+and describes how to set up and perform simple fits.  Some basic knowledge
+of Python, numpy, and modeling data are assumed.
+
+To do a non-linear least-squares fit of a model to data or for a variety of other
+optimization problems, the main task is to write an *objective function*
+that takes the values of the fitting variables and calculates either a
+scalar value to be minimized or an array of values that is to be minimized
+in the least-squares sense.   For many data fitting processes, the
+least-squares approach is used, and the objective function should
+return an array of (data-model), perhaps scaled by some weighting factor
+such as the inverse of the uncertainty in the data.  For such a problem,
+the chi-square (:math:`\chi^2`) statistic is often defined as:
+
+.. math::
+
+ \chi^2 =  \sum_i^{N} \frac{[y^{\rm meas}_i - y_i^{\rm model}({\bf{v}})]^2}{\epsilon_i^2}
+
+where :math:`y_i^{\rm meas}` is the set of measured data, :math:`y_i^{\rm
+model}({\bf{v}})` is the model calculation, :math:`{\bf{v}}` is the set of
+variables in the model to be optimized in the fit, and :math:`\epsilon_i`
+is the estimated uncertainty in the data.
+
+In a traditional non-linear fit, one writes an objective function that takes the
+variable values and calculates the residual :math:`y^{\rm meas}_i -
+y_i^{\rm model}({\bf{v}})`, or the residual scaled by the data
+uncertainties, :math:`[y^{\rm meas}_i - y_i^{\rm
+model}({\bf{v}})]/{\epsilon_i}`, or some other weighting factor.  As a
+simple example, one might write an objective function like this::
+
+    def residual(vars, x, data, eps_data):
+        amp = vars[0]
+        phaseshift = vars[1]
+	freq = vars[2]
+        decay = vars[3]
+
+	model = amp * sin(x * freq  + phaseshift) * exp(-x*x*decay)
+
+        return (data-model)/eps_data
+
+To perform the minimization with :mod:`scipy.optimize`, one would do::
+
+    from scipy.optimize import leastsq
+    vars = [10.0, 0.2, 3.0, 0.007]
+    out = leastsq(residual, vars, args=(x, data, eps_data))
+
+Though it is wonderful to be able to use python for such optimization
+problems, and the scipy library is robust and easy to use, the approach
+here is not terribly different from how one would do the same fit in C or
+Fortran.  There are several practical challenges to using this approach,
+including:
+
+  a) The user has to keep track of the order of the variables, and their
+     meaning -- vars[0] is the amplitude, vars[2] is the frequency, and so
+     on, although there is no intrinsic meaning to this order.
+
+  b) If the user wants to fix a particular variable (*not* vary it in the
+     fit), the residual function has to be altered to have fewer variables,
+     and have the corresponding constant value passed in some other way.
+     While reasonable for simple cases, this quickly becomes a significant
+     work for more complex models, and greatly complicates modeling for
+     people not intimately familiar with the details of the fitting code.
+
+  c) There is no simple, robust way to put bounds on values for the
+     variables, or enforce mathematical relationships between the
+     variables.  In fact, those optimization methods that do provide
+     bounds, require bounds to be set for all variables with separate
+     arrays that are in the same arbitrary order as variable values.
+     Again, this is acceptable for small or one-off cases, but becomes
+     painful if the fitting model needs to change.
+
+These shortcomings are really do solely to the use of traditional arrays of
+variables, as matches closely the implementation of the Fortran code.  The
+lmfit module overcomes these shortcomings by using objects -- a core reason for working with
+Python.  The key concept for lmfit is to use :class:`Parameter`
+objects instead of plain floating point numbers as the variables for the
+fit.  By using :class:`Parameter` objects (or the closely related
+:class:`Parameters` -- a dictionary of :class:`Parameter` objects), one can
+
+   a) forget about the order of variables and refer to Parameters
+      by meaningful names.
+   b) place bounds on Parameters as attributes, without worrying about order.
+   c) fix Parameters, without having to rewrite the objective function.
+   d) place algebraic constraints on Parameters.
+
+To illustrate the value of this approach, we can rewrite the above example
+as::
+
+    from lmfit import minimize, Parameters
+
+    def residual(params, x, data, eps_data):
+        amp = params['amp'].value
+        pshift = params['phase'].value
+	freq = params['frequency'].value
+        decay = params['decay'].value
+
+	model = amp * sin(x * freq  + pshift) * exp(-x*x*decay)
+
+        return (data-model)/eps_data
+
+    params = Parameters()
+    params.add('amp', value=10)
+    params.add('decay', value=0.007)
+    params.add('phase', value=0.2)
+    params.add('frequency', value=3.0)
+
+    out = minimize(residual, params, args=(x, data, eps_data))
+
+
+At first look, we simply replaced a list of values with a dictionary,
+accessed by name -- not a huge improvement.  But each of the named
+:class:`Parameter` in the :class:`Parameters` object holds additional
+attributes to modify the value during the fit.  For example, Parameters can
+be fixed or bounded.  This can be done during definition::
+
+    params = Parameters()
+    params.add('amp', value=10, vary=False)
+    params.add('decay', value=0.007, min=0.0)
+    params.add('phase', value=0.2)
+    params.add('frequency', value=3.0, max=10)
+
+where ``vary=False`` will prevent the value from changing in the fit, and
+``min=0.0`` will set a lower bound on that parameters value). It can also be done
+later by setting the corresponding attributes after they have been
+created::
+
+    params['amp'].vary = False
+    params['decay'].min = 0.10
+
+Importantly, our objective function remains unchanged.
+
+The `params` object can be copied and modified to make many user-level
+changes to the model and fitting process.  Of course, most of the
+information about how your data is modeled goes into the objective
+function, but the approach here allows some external control; that is, control by
+the **user** performing the fit, instead of by the author of the
+objective function.
+
+Finally, in addition to the :class:`Parameters` approach to fitting data,
+lmfit allows switching optimization methods without changing
+the objective function, provides tools for writing fitting reports, and
+provides better determination of Parameters confidence levels.
diff --git a/doc/model.rst b/doc/model.rst
index 1e1054b..9b0b595 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -1,1140 +1,1150 @@
-.. _model_chapter:
-
-=================================================
-Modeling Data and Curve Fitting
-=================================================
-
-.. module:: model
-
-A common use of least-squares minimization is *curve fitting*, where one
-has a parametrized model function meant to explain some phenomena and wants
-to adjust the numerical values for the model to most closely match some
-data.  With :mod:`scipy`, such problems are commonly solved with
-:func:`scipy.optimize.curve_fit`, which is a wrapper around
-:func:`scipy.optimize.leastsq`.  Since Lmfit's :func:`minimize` is also a
-high-level wrapper around :func:`scipy.optimize.leastsq` it can be used for
-curve-fitting problems, but requires more effort than using
-:func:`scipy.optimize.curve_fit`.
-
-Here we discuss lmfit's :class:`Model` class.  This takes a model function
--- a function that calculates a model for some data -- and provides methods
-to create parameters for that model and to fit data using that model
-function.  This is closer in spirit to :func:`scipy.optimize.curve_fit`,
-but with the advantages of using :class:`Parameters` and lmfit.
-
-In addition to allowing you turn any model function into a curve-fitting
-method, Lmfit also provides canonical definitions for many known line shapes
-such as Gaussian or Lorentzian peaks and Exponential decays that are widely
-used in many scientific domains.  These are available in the :mod:`models`
-module that will be discussed in more detail in the next chapter
-(:ref:`builtin_models_chapter`).  We mention it here as you may want to
-consult that list before writing your own model.  For now, we focus on
-turning python function into high-level fitting models with the
-:class:`Model` class, and using these to fit data.
-
-
-Example: Fit data to Gaussian profile
-================================================
-
-Let's start with a simple and common example of fitting data to a Gaussian
-peak.  As we will see, there is a buit-in :class:`GaussianModel` class that
-provides a model function for a Gaussian profile, but here we'll build our
-own.  We start with a simple definition of the model function:
-
-    >>> from numpy import sqrt, pi, exp, linspace
-    >>>
-    >>> def gaussian(x, amp, cen, wid):
-    ...    return amp * exp(-(x-cen)**2 /wid)
-    ...
-
-We want to fit this objective function to data :math:`y(x)` represented by the
-arrays ``y`` and ``x``.  This can be done easily wit :func:`scipy.optimize.curve_fit`::
-
-    >>> from scipy.optimize import curve_fit
-    >>>
-    >>> x = linspace(-10,10)
-    >>> y = y = gaussian(x, 2.33, 0.21, 1.51) + np.random.normal(0, 0.2, len(x))
-    >>>
-    >>> init_vals = [1, 0, 1]     # for [amp, cen, wid]
-    >>> best_vals, covar = curve_fit(gaussian, x, y, p0=init_vals)
-    >>> print best_vals
-
-
-We sample random data point, make an initial guess of the model
-values, and run :func:`scipy.optimize.curve_fit` with the model function,
-data arrays, and initial guesses.  The results returned are the optimal
-values for the parameters and the covariance matrix.  It's simple and very
-useful.  But it misses the benefits of lmfit.
-
-
-To solve this with lmfit we would have to write an objective function. But
-such a function would be fairly simple (essentially, ``data - model``,
-possibly with some weighting), and we would need to define and use
-appropriately named parameters.  Though convenient, it is somewhat of a
-burden to keep the named parameter straight (on the other hand, with
-:func:`scipy.optimize.curve_fit` you are required to remember the parameter
-order).  After doing this a few times it appears as a recurring pattern,
-and we can imagine automating this process.  That's where the
-:class:`Model` class comes in.
-
-:class:`Model` allows us to easily wrap a model function such as the
-``gaussian`` function.  This automatically generate the appropriate
-residual function, and determines the corresponding parameter names from
-the function signature itself::
-
-    >>> from lmfit import Model
-    >>> gmod = Model(gaussian)
-    >>> gmod.param_names
-    set(['amp', 'wid', 'cen'])
-    >>> gmod.independent_vars)
-    ['x']
-
-The Model ``gmod`` knows the names of the parameters and the independent
-variables.  By default, the first argument of the function is taken as the
-independent variable, held in :attr:`independent_vars`, and the rest of the
-functions positional arguments (and, in certain cases, keyword arguments --
-see below) are used for Parameter names.  Thus, for the ``gaussian``
-function above, the parameters are named ``amp``, ``cen``, and ``wid``, and
-``x`` is the independent variable -- all taken directly from the signature
-of the model function. As we will see below, you can specify what the
-independent variable is, and you can add or alter parameters, too.
-
-The parameters are *not* created when the model is created. The model knows
-what the parameters should be named, but not anything about the scale and
-range of your data.  You will normally have to make these parameters and
-assign initial values and other attributes.  To help you do this, each
-model has a :meth:`make_params` method that will generate parameters with
-the expected names:
-
-    >>> params = gmod.make_params()
-
-This creates the :class:`Parameters` but doesn't necessarily give them
-initial values -- again, the model has no idea what the scale should be.
-You can set initial values for parameters with keyword arguments to
-:meth:`make_params`:
-
-
-    >>> params = gmod.make_params(cen=5, amp=200, wid=1)
-
-or assign them (and other parameter properties) after the
-:class:`Parameters` has been created.
-
-A :class:`Model` has several methods associated with it.  For example, one
-can use the :meth:`eval` method to evaluate the model or the :meth:`fit`
-method to fit data to this model with a :class:`Parameter` object.  Both of
-these methods can take explicit keyword arguments for the parameter values.
-For example, one could use :meth:`eval` to calculate the predicted
-function::
-
-    >>> x = linspace(0, 10, 201)
-    >>> y = gmod.eval(x=x, amp=10, cen=6.2, wid=0.75)
-
-Admittedly, this a slightly long-winded way to calculate a Gaussian
-function.   But now that the model is set up, we can also use its
-:meth:`fit` method to fit this model to data, as with::
-
-    >>> result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
-
-Putting everything together, the script to do such a fit (included in the
-``examples`` folder with the source code) is:
-
-.. literalinclude:: ../examples/doc_model1.py
-
-which is pretty compact and to the point.  The returned ``result`` will be
-a :class:`ModelResult` object.  As we will see below, this has many
-components, including a :meth:`fit_report` method, which will show::
-
-    [[Model]]
-        gaussian
-    [[Fit Statistics]]
-        # function evals   = 33
-        # data points      = 101
-        # variables        = 3
-        chi-square         = 3.409
-        reduced chi-square = 0.035
-    [[Variables]]
-        amp:   8.88021829 +/- 0.113594 (1.28%) (init= 5)
-        cen:   5.65866102 +/- 0.010304 (0.18%) (init= 5)
-        wid:   0.69765468 +/- 0.010304 (1.48%) (init= 1)
-    [[Correlations]] (unreported correlations are <  0.100)
-        C(amp, wid)                  =  0.577
-
-The result will also have :attr:`init_fit` for the fit with the initial
-parameter values and a :attr:`best_fit` for the fit with the best fit
-parameter values.  These can be used to generate the following plot:
-
-
-.. image:: _images/model_fit1.png
-   :target: _images/model_fit1.png
-   :width: 50%
-
-which shows the data in blue dots, the best fit as a solid red line, and
-the initial fit as a dashed black line.
-
-Note that the model fitting was really performed with 2 lines of code::
-
-    gmod = Model(gaussian)
-    result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
-
-These lines clearly express that we want to turn the ``gaussian`` function
-into a fitting model, and then fit the :math:`y(x)` data to this model,
-starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``.
-This is much more expressive than :func:`scipy.optimize.curve_fit`::
-
-    best_vals, covar = curve_fit(gaussian, x, y, p0=[5, 5, 1])
-
-In addition, all the other features of lmfit are included:
-:class:`Parameters` can have bounds and constraints and the result is a
-rich object that can be reused to explore the model fit in detail.
-
-
-The :class:`Model` class
-=======================================
-
-The :class:`Model` class provides a general way to wrap a pre-defined
-function as a fitting model.
-
-.. class::  Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix=''[, name=None[, **kws]]]]]])
-
-    Create a model based on the user-supplied function.  This uses
-    introspection to automatically converting argument names of the
-    function to Parameter names.
-
-    :param func: model function to be wrapped
-    :type func: callable
-    :param independent_vars: list of argument names to ``func`` that are independent variables.
-    :type independent_vars: ``None`` (default) or list of strings.
-    :param param_names: list of argument names to ``func`` that should be made into Parameters.
-    :type param_names: ``None`` (default) or list of strings
-    :param missing: how to handle missing values.
-    :type missing: one of ``None`` (default), 'none', 'drop', or 'raise'.
-    :param prefix: prefix to add to all parameter names to distinguish components in a :class:`CompositeModel`.
-    :type prefix: string
-    :param name: name for the model. When ``None`` (default) the name is the same  as the model function (``func``).
-    :type name: ``None`` or string.
-    :param kws:   additional keyword arguments to pass to model function.
-
-
-Of course, the model function will have to return an array that will be the
-same size as the data being modeled.  Generally this is handled by also
-specifying one or more independent variables.
-
-
-:class:`Model` class Methods
----------------------------------
-
-.. method:: Model.eval(params=None[, **kws])
-
-   evaluate the model function for a set of parameters and inputs.
-
-   :param params: parameters to use for fit.
-   :type params: ``None`` (default) or Parameters
-   :param kws:    additional keyword arguments to pass to model function.
-   :return:       ndarray for model given the parameters and other arguments.
-
-   If ``params`` is ``None``, the values for all parameters are expected to
-   be provided as keyword arguments.  If ``params`` is given, and a keyword
-   argument for a parameter value is also given, the keyword argument will
-   be used.
-
-   Note that all non-parameter arguments for the model function --
-   **including all the independent variables!** -- will need to be passed
-   in using keyword arguments.
-
-
-.. method:: Model.fit(data[, params=None[, weights=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **kws]]]]]])
-
-   perform a fit of the model to the ``data`` array with a set of
-   parameters.
-
-   :param data: array of data to be fitted.
-   :type data: ndarray-like
-   :param params: parameters to use for fit.
-   :type params: ``None`` (default) or Parameters
-   :param weights: weights to use for residual calculation in fit.
-   :type weights: ``None`` (default) or ndarray-like.
-   :param method:  name of fitting method to use. See  :ref:`fit-methods-label` for details
-   :type  method:  string (default ``leastsq``)
-   :param scale_covar:  whether to automatically scale covariance matrix (``leastsq`` only)
-   :type  scale_covar:  bool (default ``True``)
-   :param iter_cb:  function to be called at each fit iteration. See :ref:`fit-itercb-label` for details.
-   :type  iter_cb:  callable or ``None``
-   :param verbose:  print a message when a new parameter is created due to a *hint*
-   :type  verbose:  bool (default ``True``)
-   :param kws:      additional keyword arguments to pass to model function.
-   :return:         :class:`ModelResult` object.
-
-   If ``params`` is ``None``, the internal ``params`` will be used. If it
-   is supplied, these will replace the internal ones.   If supplied,
-   ``weights`` will be used to weight the calculated residual so that the
-   quantitiy minimized in the least-squares sense is ``weights*(data -
-   fit)``.  ``weights`` must be an ndarray-like object of same size and
-   shape as ``data``.
-
-   Note that other arguments for the model function (including all the
-   independent variables!) will need to be passed in using keyword
-   arguments.
-
-
-.. method:: Model.guess(data, **kws)
-
-   Guess starting values for model parameters.
-
-    :param data: data array used to guess parameter values
-    :type func:  ndarray
-    :param kws:  additional options to pass to model function.
-    :return: :class:`Parameters` with guessed initial values for each parameter.
-
-   by default this is left to raise a ``NotImplementedError``, but may be
-   overwritten by subclasses.  Generally, this method should take some
-   values for ``data`` and use it to construct reasonable starting values for
-   the parameters.
-
-
-.. method:: Model.make_params(**kws)
-
-   Create a set of parameters for model.
-
-    :param kws:  optional keyword/value pairs to set initial values for parameters.
-    :return: :class:`Parameters`.
-
-    The parameters may or may not have decent initial values for each
-    parameter.
-
-
-.. method:: Model.set_param_hint(name, value=None[, min=None[, max=None[, vary=True[, expr=None]]]])
-
-   set *hints* to use when creating parameters with :meth:`Model.make_param` for
-   the named parameter.  This is especially convenient for setting initial
-   values.  The ``name`` can include the models ``prefix`` or not.
-
-   :param name: parameter name.
-   :type name: string
-   :param value: value for parameter
-   :type value: float
-   :param min:  lower bound for parameter value
-   :type min: ``None`` or float
-   :param max:  upper bound for parameter value
-   :type max: ``None`` or float
-   :param vary:  whether to vary parameter in fit.
-   :type vary: boolean
-   :param expr:  mathematical expression for constraint
-   :type expr: string
-
-   See :ref:`model_param_hints_section`.
-
-
-:class:`Model` class Attributes
----------------------------------
-
-.. attribute:: func
-
-   The model function used to calculate the model.
-
-.. attribute:: independent_vars
-
-   list of strings for names of the independent variables.
-
-.. attribute:: missing
-
-   describes what to do for missing values.  The choices are
-
-    * ``None``: Do not check for null or missing values (default)
-    * ``'none'``: Do not check for null or missing values.
-    * ``'drop'``: Drop null or missing observations in data.  If pandas is
-                installed, ``pandas.isnull`` is used, otherwise :attr:`numpy.isnan` is used.
-    * ``'raise'``: Raise a (more helpful) exception when data contains null
-                  or missing values.
-
-.. attribute:: name
-
-   name of the model, used only in the string representation of the
-   model. By default this will be taken from the model function.
-
-.. attribute:: opts
-
-   extra keyword arguments to pass to model function.  Normally this will
-   be determined internally and should not be changed.
-
-.. attribute:: param_hints
-
-   Dictionary of parameter hints.  See :ref:`model_param_hints_section`.
-
-.. attribute:: param_names
-
-   list of strings of parameter names.
-
-.. attribute:: prefix
-
-   prefix used for name-mangling of parameter names.  The default is ''.
-   If a particular :class:`Model` has arguments ``amplitude``,
-   ``center``, and ``sigma``, these would become the parameter names.
-   Using a prefix of ``g1_`` would convert these parameter names to
-   ``g1_amplitude``, ``g1_center``, and ``g1_sigma``.   This can be
-   essential to avoid name collision in composite models.
-
-
-Determining parameter names and independent variables for a function
------------------------------------------------------------------------
-
-The :class:`Model` created from the supplied function ``func`` will create
-a :class:`Parameters` object, and names are inferred from the function
-arguments, and a residual function is automatically constructed.
-
-
-By default, the independent variable is take as the first argument to the
-function.  You can explicitly set this, of course, and will need to if the
-independent variable is not first in the list, or if there are actually more
-than one independent variables.
-
-If not specified, Parameters are constructed from all positional arguments
-and all keyword arguments that have a default value that is numerical, except
-the independent variable, of course.   Importantly, the Parameters can be
-modified after creation.  In fact, you'll have to do this because none of the
-parameters have valid initial values.  You can place bounds and constraints
-on Parameters, or fix their values.
-
-
-
-Explicitly specifying ``independent_vars``
--------------------------------------------------
-
-As we saw for the Gaussian example above, creating a :class:`Model` from a
-function is fairly easy. Let's try another::
-
-    >>> def decay(t, tau, N):
-    ...    return N*np.exp(-t/tau)
-    ...
-    >>> decay_model = Model(decay)
-    >>> print decay_model.independent_vars
-    ['t']
-    >>> for pname, par in decay_model.params.items():
-    ...     print pname, par
-    ...
-    tau <Parameter 'tau', None, bounds=[None:None]>
-    N <Parameter 'N', None, bounds=[None:None]>
-
-Here, ``t`` is assumed to be the independent variable because it is the
-first argument to the function.  The other function arguments are used to
-create parameters for the model.
-
-If you want ``tau`` to be the independent variable in the above example,
-you can say so::
-
-    >>> decay_model = Model(decay, independent_vars=['tau'])
-    >>> print decay_model.independent_vars
-    ['tau']
-    >>> for pname, par in decay_model.params.items():
-    ...     print pname, par
-    ...
-    t <Parameter 't', None, bounds=[None:None]>
-    N <Parameter 'N', None, bounds=[None:None]>
-
-
-You can also supply multiple values for multi-dimensional functions with
-multiple independent variables.  In fact, the meaning of *independent
-variable* here is simple, and based on how it treats arguments of the
-function you are modeling:
-
-independent variable
-    a function argument that is not a parameter or otherwise part of the
-    model, and that will be required to be explicitly provided as a
-    keyword argument for each fit with :meth:`Model.fit` or evaluation
-    with :meth:`Model.eval`.
-
-Note that independent variables are not required to be arrays, or even
-floating point numbers.
-
-
-Functions with keyword arguments
------------------------------------------
-
-If the model function had keyword parameters, these would be turned into
-Parameters if the supplied default value was a valid number (but not
-``None``, ``True``, or ``False``).
-
-    >>> def decay2(t, tau, N=10, check_positive=False):
-    ...    if check_small:
-    ...        arg = abs(t)/max(1.e-9, abs(tau))
-    ...    else:
-    ...        arg = t/tau
-    ...    return N*np.exp(arg)
-    ...
-    >>> mod = Model(decay2)
-    >>> for pname, par in mod.params.items():
-    ...     print pname, par
-    ...
-    t <Parameter 't', None, bounds=[None:None]>
-    N <Parameter 'N', 10, bounds=[None:None]>
-
-Here, even though ``N`` is a keyword argument to the function, it is turned
-into a parameter, with the default numerical value as its initial value.
-By default, it is permitted to be varied in the fit -- the 10 is taken as
-an initial value, not a fixed value.  On the other hand, the
-``check_positive`` keyword argument, was not converted to a parameter
-because it has a boolean default value.    In some sense,
-``check_positive`` becomes like an independent variable to the model.
-However, because it has a default value it is not required to be given for
-each model evaluation or fit, as independent variables are.
-
-Defining a ``prefix`` for the Parameters
---------------------------------------------
-
-As we will see in the next chapter when combining models, it is sometimes
-necessary to decorate the parameter names in the model, but still have them
-be correctly used in the underlying model function.  This would be
-necessary, for example, if two parameters in a composite model (see
-:ref:`composite_models_section` or examples in the next chapter) would have
-the same name.  To avoid this, we can add a ``prefix`` to the
-:class:`Model` which will automatically do this mapping for us.
-
-    >>> def myfunc(x, amplitude=1, center=0, sigma=1):
-    ...
-
-    >>> mod = Model(myfunc, prefix='f1_')
-    >>> for pname, par in mod.params.items():
-    ...     print pname, par
-    ...
-    f1_amplitude <Parameter 'f1_amplitude', None, bounds=[None:None]>
-    f1_center <Parameter 'f1_center', None, bounds=[None:None]>
-    f1_sigma <Parameter 'f1_sigma', None, bounds=[None:None]>
-
-You would refer to these parameters as ``f1_amplitude`` and so forth, and
-the model will know to map these to the ``amplitude`` argument of ``myfunc``.
-
-
-Initializing model parameters
------------------------------------------
-
-As mentioned above, the parameters created by :meth:`Model.make_params` are
-generally created with invalid initial values of ``None``.  These values
-**must** be initialized in order for the model to be evaluated or used in a
-fit.  There are four different ways to do this initialization that can be
-used in any combination:
-
-  1. You can supply initial values in the definition of the model function.
-  2. You can initialize the parameters when creating parameters with :meth:`Model.make_params`.
-  3. You can give parameter hints with :meth:`Model.set_param_hint`.
-  4. You can supply initial values for the parameters when you use the
-     :meth:`Model.eval` or :meth:`Model.fit` methods.
-
-Of course these methods can be mixed, allowing you to overwrite initial
-values at any point in the process of defining and using the model.
-
-Initializing values in the function definition
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To supply initial values for parameters in the definition of the model
-function, you can simply supply a default value::
-
-    >>> def myfunc(x, a=1, b=0):
-    >>>     ...
-
-instead of using::
-
-    >>> def myfunc(x, a, b):
-    >>>     ...
-
-This has the advantage of working at the function level -- all parameters
-with keywords can be treated as options.  It also means that some default
-initial value will always be available for the parameter.
-
-
-Initializing values with :meth:`Model.make_params`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When creating parameters with :meth:`Model.make_params` you can specify initial
-values.  To do this, use keyword arguments for the parameter names and
-initial values::
-
-    >>> mod = Model(myfunc)
-    >>> pars = mod.make_params(a=3, b=0.5)
-
-
-Initializing values by setting parameter hints
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-After a model has been created, but prior to creating parameters with
-:meth:`Model.make_params`, you can set parameter hints.  These allows you to set
-not only a default initial value but also to set other parameter attributes
-controlling bounds, whether it is varied in the fit, or a constraint
-expression.  To set a parameter hint, you can use :meth:`Model.set_param_hint`,
-as with::
-
-    >>> mod = Model(myfunc)
-    >>> mod.set_param_hint('a', value = 1.0)
-    >>> mod.set_param_hint('b', value = 0.3, min=0, max=1.0)
-    >>> pars = mod.make_params()
-
-Parameter hints are discussed in more detail in section
-:ref:`model_param_hints_section`.
-
-
-Initializing values when using a model
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Finally, you can explicitly supply initial values when using a model.  That
-is, as with :meth:`Model.make_params`, you can include values
-as keyword arguments to either the :meth:`Model.eval` or :meth:`Model.fit` methods::
-
-   >>> y1 = mod.eval(x=x, a=7.0, b=-2.0)
-
-   >>> out = mod.fit(x=x, pars, a=3.0, b=-0.0)
-
-These approaches to initialization provide many opportunities for setting
-initial values for parameters.  The methods can be combined, so that you
-can set parameter hints but then change the initial value explicitly with
-:meth:`Model.fit`.
-
-.. _model_param_hints_section:
-
-Using parameter hints
---------------------------------
-
-
-After a model has been created, you can give it hints for how to create
-parameters with :meth:`Model.make_params`.  This allows you to set not only a
-default initial value but also to set other parameter attributes
-controlling bounds, whether it is varied in the fit, or a constraint
-expression.   To set a parameter hint, you can use :meth:`Model.set_param_hint`,
-as with::
-
-    >>> mod = Model(myfunc)
-    >>> mod.set_param_hint('a', value = 1.0)
-    >>> mod.set_param_hint('b', value = 0.3, min=0, max=1.0)
-
-Parameter hints are stored in a model's :attr:`param_hints` attribute,
-which is simply a nested dictionary::
-
-    >>> print mod.param_hints
-    {'a': {'value': 1}, 'b': {'max': 1.0, 'value': 0.3, 'min': 0}}
-
-
-You can change this dictionary directly, or with the :meth:`Model.set_param_hint`
-method.  Either way, these parameter hints are used by :meth:`Model.make_params`
-when making parameters.
-
-An important feature of parameter hints is that you can force the creation
-of new parameters with parameter hints.  This can be useful to make derived
-parameters with constraint expressions.  For example to get the full-width
-at half maximum of a Gaussian model, one could use a parameter hint of::
-
-    >>> mod = Model(gaussian)
-    >>> mod.set_param_hint('fwhm', expr='2.3548*sigma')
-
-
-
-The :class:`ModelResult` class
-=======================================
-
-A :class:`ModelResult` (which had been called `ModelFit` prior to version
-0.9) is the object returned by :meth:`Model.fit`.  It is a subclass of
-:class:`Minimizer`, and so contains many of the fit results.  Of course, it
-knows the :class:`Model` and the set of :class:`Parameters` used in the
-fit, and it has methods to evaluate the model, to fit the data (or re-fit
-the data with changes to the parameters, or fit with different or modified
-data) and to print out a report for that fit.
-
-While a :class:`Model` encapsulates your model function, it is fairly
-abstract and does not contain the parameters or data used in a particular
-fit.  A :class:`ModelResult` *does* contain parameters and data as well as
-methods to alter and re-do fits.  Thus the :class:`Model` is the idealized
-model while the :class:`ModelResult` is the messier, more complex (but perhaps
-more useful) object that represents a fit with a set of parameters to data
-with a model.
-
-
-A :class:`ModelResult` has several attributes holding values for fit results,
-and several methods for working with fits.  These include statistics
-inherited from :class:`Minimizer` useful for comparing different models,
-includind `chisqr`, `redchi`, `aic`, and `bic`.
-
-.. class:: ModelResult()
-
-    Model fit is intended to be created and returned by :meth:`Model.fit`.
-
-
-
-:class:`ModelResult` methods
----------------------------------
-
-These methods are all inherited from :class:`Minimize` or from
-:class:`Model`.
-
-.. method:: ModelResult.eval(**kwargs)
-
-   evaluate the model using the best-fit parameters and supplied
-   independent variables.  The ``**kwargs`` arguments can be used to update
-   parameter values and/or independent variables.
-
-
-.. method:: ModelResult.eval_components(**kwargs)
-
-   evaluate each component of a :class:`CompositeModel`, returning an
-   ordered dictionary of with the values for each component model.  The
-   returned dictionary will have keys of the model prefix or (if no prefix
-   is given), the model name.  The ``**kwargs`` arguments can be used to
-   update parameter values and/or independent variables.
-
-.. method:: ModelResult.fit(data=None[, params=None[, weights=None[, method=None[, **kwargs]]]])
-
-   fit (or re-fit), optionally changing ``data``, ``params``, ``weights``,
-   or ``method``, or changing the independent variable(s) with the
-   ``**kwargs`` argument.  See :meth:`Model.fit` for argument
-   descriptions, and note that any value of ``None`` defaults to the last
-   used value.
-
-.. method:: ModelResult.fit_report(modelpars=None[, show_correl=True[,`< min_correl=0.1]])
-
-   return a printable fit report for the fit with fit statistics, best-fit
-   values with uncertainties and correlations.  As with :func:`fit_report`.
-
-   :param modelpars:    Parameters with "Known Values" (optional, default None)
-   :param show_correl:  whether to show list of sorted correlations [``True``]
-   :param min_correl:   smallest correlation absolute value to show [0.1]
-
-
-.. method:: ModelResult.conf_interval(**kwargs)
-
-   calculate the confidence intervals for the variable parameters using
-   :func:`confidence.conf_interval() <lmfit.conf_interval>`.  All keyword
-   arguments are passed to that function.  The result is stored in
-   :attr:`ci_out`, and so can be accessed without recalculating them.
-
-.. method:: ModelResult.ci_report(with_offset=True)
-
-   return a nicely formatted text report of the confidence intervals, as
-   from :func:`ci_report() <lmfit.ci_report>`.
-
-
-.. method:: ModelResult.plot(datafmt='o', fitfmt='-', initfmt='--', yerr=None, numpoints=None, fig=None, data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None, ax_fit_kws=None, fig_kws=None)
-
-   Plot the fit results and residuals using matplotlib, if available.  The
-   plot will include two panels, one showing the fit residual, and the
-   other with the data points, the initial fit curve, and the best-fit
-   curve. If the fit model included weights or if ``yerr`` is specified,
-   errorbars will also be plotted.
-
-   :param datafmt: matplotlib format string for data curve.
-   :type  datafmt: ``None`` or string.
-   :param fitfmt:  matplotlib format string for best-fit curve.
-   :type fitfmt: ``None`` or string.
-   :param initfmt:  matplotlib format string for initial curve.
-   :type intfmt: ``None`` or string.
-   :param yerr:  array of uncertainties for data array.
-   :type  yerr: ``None`` or ndarray.
-   :param numpoints:  number of points to display
-   :type numpoints: ``None`` or integer
-   :param fig: matplotlib Figure to plot on.
-   :type fig:  ``None`` or matplotlib.figure.Figure
-   :param data_kws:  keyword arguments passed to plot for data curve.
-   :type data_kws: ``None`` or dictionary
-   :param fit_kws:  keyword arguments passed to plot for best-fit curve.
-   :type fit_kws: ``None`` or dictionary
-   :param init_kws:  keyword arguments passed to plot for initial curve.
-   :type init_kws: ``None`` or dictionary
-   :param ax_res_kws:  keyword arguments passed to creation of matplotlib axes for the residual plot.
-   :type ax_res_kws: ``None`` or dictionary
-   :param ax_fit_kws:  keyword arguments passed to creation of matplotlib axes for the fit plot.
-   :type ax_fit_kws: ``None`` or dictionary
-   :param fig_kws:  keyword arguments passed to creation of matplotlib figure.
-   :type fig_kws: ``None`` or dictionary
-   :returns:     matplotlib.figure.Figure
-
-   This combines :meth:`ModelResult.plot_fit` and :meth:`ModelResult.plot_residual`.
-
-   If ``yerr`` is specified or if the fit model included weights, then
-   matplotlib.axes.Axes.errorbar is used to plot the data.  If ``yerr`` is
-   not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
-
-   If ``fig`` is None then ``matplotlib.pyplot.figure(**fig_kws)`` is called.
-
-.. method:: ModelResult.plot_fit(ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None, numpoints=None,  data_kws=None, fit_kws=None, init_kws=None, ax_kws=None)
-
-   Plot the fit results using matplotlib, if available.  The plot will include
-   the data points, the initial fit curve, and the best-fit curve. If the fit
-   model included weights or if ``yerr`` is specified, errorbars will also
-   be plotted.
-
-   :param ax: matplotlib axes to plot on.
-   :type ax:  ``None`` or matplotlib.axes.Axes.
-   :param datafmt: matplotlib format string for data curve.
-   :type  datafmt: ``None`` or string.
-   :param fitfmt:  matplotlib format string for best-fit curve.
-   :type fitfmt: ``None`` or string.
-   :param initfmt:  matplotlib format string for initial curve.
-   :type intfmt: ``None`` or string.
-   :param yerr:  array of uncertainties for data array.
-   :type  yerr: ``None`` or ndarray.
-   :param numpoints:  number of points to display
-   :type numpoints: ``None`` or integer
-   :param data_kws:  keyword arguments passed to plot for data curve.
-   :type data_kws: ``None`` or dictionary
-   :param fit_kws:  keyword arguments passed to plot for best-fit curve.
-   :type fit_kws: ``None`` or dictionary
-   :param init_kws:  keyword arguments passed to plot for initial curve.
-   :type init_kws: ``None`` or dictionary
-   :param ax_kws:  keyword arguments passed to creation of matplotlib axes.
-   :type ax_kws: ``None`` or dictionary
-   :returns:     matplotlib.axes.Axes
-
-   For details about plot format strings and keyword arguments see
-   documentation of :func:`matplotlib.axes.Axes.plot`.
-
-   If ``yerr`` is specified or if the fit model included weights, then
-   matplotlib.axes.Axes.errorbar is used to plot the data.  If ``yerr`` is
-   not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
-
-   If ``ax`` is None then ``matplotlib.pyplot.gca(**ax_kws)`` is called.
-
-.. method:: ModelResult.plot_residuals(ax=None, datafmt='o', yerr=None, data_kws=None, fit_kws=None, ax_kws=None)
-
-  Plot the fit residuals (data - fit) using matplotlib.  If ``yerr`` is
-  supplied or if the model included weights, errorbars will also be plotted.
-
-   :param ax: matplotlib axes to plot on.
-   :type ax:  ``None`` or matplotlib.axes.Axes.
-   :param datafmt: matplotlib format string for data curve.
-   :type  datafmt: ``None`` or string.
-   :param yerr:  array of uncertainties for data array.
-   :type  yerr: ``None`` or ndarray.
-   :param numpoints:  number of points to display
-   :type numpoints: ``None`` or integer
-   :param data_kws:  keyword arguments passed to plot for data curve.
-   :type data_kws: ``None`` or dictionary
-   :param fit_kws:  keyword arguments passed to plot for best-fit curve.
-   :type fit_kws: ``None`` or dictionary
-   :param ax_kws:  keyword arguments passed to creation of matplotlib axes.
-   :type ax_kws: ``None`` or dictionary
-   :returns:     matplotlib.axes.Axes
-
-   For details about plot format strings and keyword arguments see
-   documentation of :func:`matplotlib.axes.Axes.plot`.
-
-   If ``yerr`` is specified or if the fit model included weights, then
-   matplotlib.axes.Axes.errorbar is used to plot the data.  If ``yerr`` is
-   not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
-
-   If ``ax`` is None then ``matplotlib.pyplot.gca(**ax_kws)`` is called.
-
-
-
-
-:class:`ModelResult` attributes
----------------------------------
-
-.. attribute:: aic
-
-   floating point best-fit Akaike Information Criterion statistic (see :ref:`fit-results-label`).
-
-.. attribute:: best_fit
-
-   ndarray result of model function, evaluated at provided
-   independent variables and with best-fit parameters.
-
-.. attribute:: best_values
-
-   dictionary with  parameter names as keys, and best-fit values as values.
-
-.. attribute:: bic
-
-   floating point best-fit Bayesian Information Criterion statistic (see :ref:`fit-results-label`).
-
-.. attribute:: chisqr
-
-   floating point best-fit chi-square statistic (see :ref:`fit-results-label`).
-
-.. attribute:: ci_out
-
-   confidence interval data (see :ref:`confidence_chapter`) or `None`  if
-   the confidence intervals have not been calculated.
-
-.. attribute:: covar
-
-   ndarray (square) covariance matrix returned from fit.
-
-.. attribute:: data
-
-   ndarray of data to compare to model.
-
-.. attribute:: errorbars
-
-   boolean for whether error bars were estimated by fit.
-
-.. attribute::  ier
-
-   integer returned code from :func:`scipy.optimize.leastsq`.
-
-.. attribute:: init_fit
-
-   ndarray result of model function, evaluated at provided
-   independent variables and with initial parameters.
-
-.. attribute:: init_params
-
-   initial parameters.
-
-.. attribute:: init_values
-
-   dictionary with  parameter names as keys, and initial values as values.
-
-.. attribute:: iter_cb
-
-   optional callable function, to be called at each fit iteration.  This
-   must take take arguments of ``params, iter, resid, *args, **kws``, where
-   ``params`` will have the current parameter values, ``iter`` the
-   iteration, ``resid`` the current residual array, and ``*args`` and
-   ``**kws`` as passed to the objective function.  See :ref:`fit-itercb-label`.
-
-.. attribute:: jacfcn
-
-   optional callable function, to be called to calculate jacobian array.
-
-.. attribute::  lmdif_message
-
-   string message returned from :func:`scipy.optimize.leastsq`.
-
-.. attribute::  message
-
-   string message returned from :func:`minimize`.
-
-.. attribute::  method
-
-   string naming fitting method for :func:`minimize`.
-
-.. attribute::  model
-
-   instance of :class:`Model` used for model.
-
-.. attribute::  ndata
-
-    integer number of data points.
-
-.. attribute::  nfev
-
-    integer number of function evaluations used for fit.
-
-.. attribute::  nfree
-
-    integer number of free parameters in fit.
-
-.. attribute::  nvarys
-
-    integer number of independent, freely varying variables in fit.
-
-.. attribute::  params
-
-    Parameters used in fit.  Will have best-fit values.
-
-.. attribute::  redchi
-
-    floating point reduced chi-square statistic (see :ref:`fit-results-label`).
-
-.. attribute::  residual
-
-   ndarray for residual.
-
-.. attribute::  scale_covar
-
-   boolean flag for whether to automatically scale covariance matrix.
-
-.. attribute:: success
-
-   boolean value of whether fit succeeded.
-
-.. attribute:: weights
-
-   ndarray (or ``None``) of weighting values to be used in fit.  If not
-   ``None``, it will be used as a multiplicative factor of the residual
-   array, so that ``weights*(data - fit)`` is minimized in the
-   least-squares sense.
-
-.. index:: Composite models
-
-.. _composite_models_section:
-
-
-Composite Models : adding (or multiplying) Models
-==============================================================
-
-One of the more interesting features of the :class:`Model` class is that
-Models can be added together or combined with basic algebraic operations
-(add, subtract, multiply, and divide) to give a composite model.  The
-composite model will have parameters from each of the component models,
-with all parameters being available to influence the whole model.  This
-ability to combine models will become even more useful in the next chapter,
-when pre-built subclasses of :class:`Model` are discussed.  For now, we'll
-consider a simple example, and build a model of a Gaussian plus a line, as
-to model a peak with a background. For such a simple problem, we could just
-build a model that included both components::
-
-    def gaussian_plus_line(x, amp, cen, wid, slope, intercept):
-        "line + 1-d gaussian"
-
-        gauss = (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
-        line = slope * x + intercept
-        return gauss + line
-
-and use that with::
-
-    mod = Model(gaussian_plus_line)
-
-But we already had a function for a gaussian function, and maybe we'll
-discover that a linear background isn't sufficient which would mean the
-model function would have to be changed.  As an alternative we could define
-a linear function::
-
-    def line(x, slope, intercept):
-        "a line"
-        return slope * x + intercept
-
-and build a composite model with just::
-
-    mod = Model(gaussian) + Model(line)
-
-This model has parameters for both component models, and can be used as:
-
-.. literalinclude:: ../examples/doc_model2.py
-
-which prints out the results::
-
-    [[Model]]
-        (Model(gaussian) + Model(line))
-    [[Fit Statistics]]
-        # function evals   = 44
-        # data points      = 101
-        # variables        = 5
-        chi-square         = 2.579
-        reduced chi-square = 0.027
-    [[Variables]]
-        amp:         8.45931061 +/- 0.124145 (1.47%) (init= 5)
-        cen:         5.65547872 +/- 0.009176 (0.16%) (init= 5)
-        intercept:  -0.96860201 +/- 0.033522 (3.46%) (init= 1)
-        slope:       0.26484403 +/- 0.005748 (2.17%) (init= 0)
-        wid:         0.67545523 +/- 0.009916 (1.47%) (init= 1)
-    [[Correlations]] (unreported correlations are <  0.100)
-        C(amp, wid)                  =  0.666
-        C(cen, intercept)            =  0.129
-
-
-and shows the plot on the left.
-
-.. _figModel2:
-
-  .. image:: _images/model_fit2.png
-     :target: _images/model_fit2.png
-     :width: 48%
-  .. image:: _images/model_fit2a.png
-     :target: _images/model_fit2a.png
-     :width: 48%
-
-
-On the left, data is shown in blue dots, the total fit is shown in solid
-red line, and the initial fit is shown as a black dashed line.  In the
-figure on the right, the data is again shown in blue dots, and the Gaussian
-component shown as a black dashed line, and the linear component shown as a
-red dashed line.  These components were generated after the fit using the
-Models :meth:`ModelResult.eval_components` method of the `result`::
-
-    comps = result.eval_components()
-
-which returns a dictionary of the components, using keys of the model name
-(or `prefix` if that is set).  This will use the parameter values in
-``result.params`` and the independent variables (``x``) used during the
-fit.  Note that while the :class:`ModelResult` held in `result` does store the
-best parameters and the best estimate of the model in ``result.best_fit``,
-the original model and parameters in ``pars`` are left unaltered.
-
-You can apply this composite model to other data sets, or evaluate the
-model at other values of ``x``.  You may want to do this to give a finer or
-coarser spacing of data point, or to extrapolate the model outside the
-fitting range.  This can be done with::
-
-    xwide = np.linspace(-5, 25, 3001)
-    predicted = mod.eval(x=xwide)
-
-In this example, the argument names for the model functions do not overlap.
-If they had, the ``prefix`` argument to :class:`Model` would have allowed
-us to identify which parameter went with which component model.  As we will
-see in the next chapter, using composite models with the built-in models
-provides a simple way to build up complex models.
-
-.. class::  CompositeModel(left, right, op[, **kws])
-
-    Create a composite model from two models (`left` and `right` and an
-    binary operator (`op`).  Additional keywords are passed to
-    :class:`Model`.
-
-    :param left: left-hand side Model
-    :type left: :class:`Model`
-    :param right: right-hand side Model
-    :type right: :class:`Model`
-    :param op: binary operator
-    :type op: callable, and taking 2 arguments (`left` and `right`).
-
-Normally, one does not have to explicitly create a :class:`CompositeModel`,
-as doing::
-
-     mod = Model(fcn1) + Model(fcn2) * Model(fcn3)
-
-will automatically create a :class:`CompositeModel`.  In this example,
-`mod.left` will be `Model(fcn1)`, `mod.op` will be :meth:`operator.add`,
-and `mod.right` will be another CompositeModel that has a `left` attribute
-of `Model(fcn2)`, an `op` of :meth:`operator.mul`, and a `right` of
-`Model(fcn3)`.
-
-If you want to use a binary operator other than add, subtract, multiply, or
-divide that are supported through normal Python syntax, you'll need to
-explicitly create a :class:`CompositeModel` with the appropriate binary
-operator.  For example, to convolve two models, you could define a simple
-convolution function, perhaps as::
-
-    import numpy as np
-    def convolve(dat, kernel):
-        # simple convolution
-        npts = min(len(dat), len(kernel))
-        pad  = np.ones(npts)
-        tmp  = np.concatenate((pad*dat[0], dat, pad*dat[-1]))
-        out  = np.convolve(tmp, kernel, mode='valid')
-        noff = int((len(out) - npts)/2)
-        return (out[noff:])[:npts]
-
-which extends the data in both directions so that the convolving kernel
-function gives a valid result over the data range.  Because this function
-takes two array arguments and returns an array, it can be used as the
-binary operator.  A full script using this technique is here:
-
-.. literalinclude:: ../examples/doc_model3.py
-
-which prints out the results::
-
-    [[Model]]
-        (Model(jump) <function convolve at 0x109ee4488> Model(gaussian))
-    [[Fit Statistics]]
-        # function evals   = 25
-        # data points      = 201
-        # variables        = 3
-        chi-square         = 21.692
-        reduced chi-square = 0.110
-    [[Variables]]
-        amplitude:   0.62106099 +/- 0.001783 (0.29%) (init= 1)
-        center:      4.49913218 +/- 0.009373 (0.21%) (init= 3.5)
-        mid:         5 (fixed)
-        sigma:       0.61936067 +/- 0.012977 (2.10%) (init= 1)
-    [[Correlations]] (unreported correlations are <  0.100)
-        C(amplitude, center)         =  0.336
-        C(amplitude, sigma)          =  0.274
-
-and shows the plots:
-
-.. _figModel3:
-
-  .. image:: _images/model_fit3a.png
-     :target: _images/model_fit3a.png
-     :width: 48%
-  .. image:: _images/model_fit3b.png
-     :target: _images/model_fit3b.png
-     :width: 48%
-
-Using composite models with built-in or custom operators allows you to
-build complex models from testable sub-components.
+.. _model_chapter:
+
+=================================================
+Modeling Data and Curve Fitting
+=================================================
+
+.. module:: model
+
+A common use of least-squares minimization is *curve fitting*, where one
+has a parametrized model function meant to explain some phenomena and wants
+to adjust the numerical values for the model to most closely match some
+data.  With :mod:`scipy`, such problems are commonly solved with
+:scipydoc:`scipy.optimize.curve_fit`, which is a wrapper around
+:scipydoc:`scipy.optimize.leastsq`.  Since Lmfit's :func:`minimize` is also
+a high-level wrapper around :scipydoc:`scipy.optimize.leastsq` it can be used
+for curve-fitting problems, but requires more effort than using
+:scipydoc:`scipy.optimize.curve_fit`.
+
+
+Here we discuss lmfit's :class:`Model` class.  This takes a model function
+-- a function that calculates a model for some data -- and provides methods
+to create parameters for that model and to fit data using that model
+function.  This is closer in spirit to :scipydoc:`scipy.optimize.curve_fit`,
+but with the advantages of using :class:`Parameters` and lmfit.
+
+In addition to allowing you turn any model function into a curve-fitting
+method, Lmfit also provides canonical definitions for many known line shapes
+such as Gaussian or Lorentzian peaks and Exponential decays that are widely
+used in many scientific domains.  These are available in the :mod:`models`
+module that will be discussed in more detail in the next chapter
+(:ref:`builtin_models_chapter`).  We mention it here as you may want to
+consult that list before writing your own model.  For now, we focus on
+turning python function into high-level fitting models with the
+:class:`Model` class, and using these to fit data.
+
+
+Example: Fit data to Gaussian profile
+================================================
+
+Let's start with a simple and common example of fitting data to a Gaussian
+peak.  As we will see, there is a buit-in :class:`GaussianModel` class that
+provides a model function for a Gaussian profile, but here we'll build our
+own.  We start with a simple definition of the model function:
+
+    >>> from numpy import sqrt, pi, exp, linspace
+    >>>
+    >>> def gaussian(x, amp, cen, wid):
+    ...    return amp * exp(-(x-cen)**2 /wid)
+    ...
+
+We want to fit this objective function to data :math:`y(x)` represented by the
+arrays ``y`` and ``x``.  This can be done easily with :scipydoc:`optimize.curve_fit`::
+
+    >>> from scipy.optimize import curve_fit
+    >>>
+    >>> x = linspace(-10,10)
+    >>> y = y = gaussian(x, 2.33, 0.21, 1.51) + np.random.normal(0, 0.2, len(x))
+    >>>
+    >>> init_vals = [1, 0, 1]     # for [amp, cen, wid]
+    >>> best_vals, covar = curve_fit(gaussian, x, y, p0=init_vals)
+    >>> print best_vals
+
+
+We sample random data point, make an initial guess of the model
+values, and run :scipydoc:`optimize.curve_fit` with the model function,
+data arrays, and initial guesses.  The results returned are the optimal
+values for the parameters and the covariance matrix.  It's simple and very
+useful.  But it misses the benefits of lmfit.
+
+
+To solve this with lmfit we would have to write an objective function. But
+such a function would be fairly simple (essentially, ``data - model``,
+possibly with some weighting), and we would need to define and use
+appropriately named parameters.  Though convenient, it is somewhat of a
+burden to keep the named parameter straight (on the other hand, with
+:scipydoc:`optimize.curve_fit` you are required to remember the parameter
+order).  After doing this a few times it appears as a recurring pattern,
+and we can imagine automating this process.  That's where the
+:class:`Model` class comes in.
+
+:class:`Model` allows us to easily wrap a model function such as the
+``gaussian`` function.  This automatically generate the appropriate
+residual function, and determines the corresponding parameter names from
+the function signature itself::
+
+    >>> from lmfit import Model
+    >>> gmod = Model(gaussian)
+    >>> gmod.param_names
+    set(['amp', 'wid', 'cen'])
+    >>> gmod.independent_vars)
+    ['x']
+
+The Model ``gmod`` knows the names of the parameters and the independent
+variables.  By default, the first argument of the function is taken as the
+independent variable, held in :attr:`independent_vars`, and the rest of the
+functions positional arguments (and, in certain cases, keyword arguments --
+see below) are used for Parameter names.  Thus, for the ``gaussian``
+function above, the parameters are named ``amp``, ``cen``, and ``wid``, and
+``x`` is the independent variable -- all taken directly from the signature
+of the model function. As we will see below, you can specify what the
+independent variable is, and you can add or alter parameters, too.
+
+The parameters are *not* created when the model is created. The model knows
+what the parameters should be named, but not anything about the scale and
+range of your data.  You will normally have to make these parameters and
+assign initial values and other attributes.  To help you do this, each
+model has a :meth:`make_params` method that will generate parameters with
+the expected names:
+
+    >>> params = gmod.make_params()
+
+This creates the :class:`Parameters` but doesn't necessarily give them
+initial values -- again, the model has no idea what the scale should be.
+You can set initial values for parameters with keyword arguments to
+:meth:`make_params`:
+
+
+    >>> params = gmod.make_params(cen=5, amp=200, wid=1)
+
+or assign them (and other parameter properties) after the
+:class:`Parameters` has been created.
+
+A :class:`Model` has several methods associated with it.  For example, one
+can use the :meth:`eval` method to evaluate the model or the :meth:`fit`
+method to fit data to this model with a :class:`Parameter` object.  Both of
+these methods can take explicit keyword arguments for the parameter values.
+For example, one could use :meth:`eval` to calculate the predicted
+function::
+
+    >>> x = linspace(0, 10, 201)
+    >>> y = gmod.eval(x=x, amp=10, cen=6.2, wid=0.75)
+
+Admittedly, this a slightly long-winded way to calculate a Gaussian
+function.   But now that the model is set up, we can also use its
+:meth:`fit` method to fit this model to data, as with::
+
+    >>> result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+
+Putting everything together, the script to do such a fit (included in the
+``examples`` folder with the source code) is:
+
+.. literalinclude:: ../examples/doc_model1.py
+
+which is pretty compact and to the point.  The returned ``result`` will be
+a :class:`ModelResult` object.  As we will see below, this has many
+components, including a :meth:`fit_report` method, which will show::
+
+    [[Model]]
+        gaussian
+    [[Fit Statistics]]
+        # function evals   = 33
+        # data points      = 101
+        # variables        = 3
+        chi-square         = 3.409
+        reduced chi-square = 0.035
+        Akaike info crit   = -333.218
+        Bayesian info crit = -325.373
+    [[Variables]]
+        amp:   8.88021829 +/- 0.113594 (1.28%) (init= 5)
+        cen:   5.65866102 +/- 0.010304 (0.18%) (init= 5)
+        wid:   0.69765468 +/- 0.010304 (1.48%) (init= 1)
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(amp, wid)                  =  0.577
+
+The result will also have :attr:`init_fit` for the fit with the initial
+parameter values and a :attr:`best_fit` for the fit with the best fit
+parameter values.  These can be used to generate the following plot:
+
+
+.. image:: _images/model_fit1.png
+   :target: _images/model_fit1.png
+   :width: 50%
+
+which shows the data in blue dots, the best fit as a solid red line, and
+the initial fit as a dashed black line.
+
+Note that the model fitting was really performed with 2 lines of code::
+
+    gmod = Model(gaussian)
+    result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+
+These lines clearly express that we want to turn the ``gaussian`` function
+into a fitting model, and then fit the :math:`y(x)` data to this model,
+starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``.
+This is much more expressive than :scipydoc:`optimize.curve_fit`::
+
+    best_vals, covar = curve_fit(gaussian, x, y, p0=[5, 5, 1])
+
+In addition, all the other features of lmfit are included:
+:class:`Parameters` can have bounds and constraints and the result is a
+rich object that can be reused to explore the model fit in detail.
+
+
+The :class:`Model` class
+=======================================
+
+The :class:`Model` class provides a general way to wrap a pre-defined
+function as a fitting model.
+
+.. class::  Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix=''[, name=None[, **kws]]]]]])
+
+    Create a model based on the user-supplied function.  This uses
+    introspection to automatically converting argument names of the
+    function to Parameter names.
+
+    :param func: model function to be wrapped
+    :type func: callable
+    :param independent_vars: list of argument names to ``func`` that are independent variables.
+    :type independent_vars: ``None`` (default) or list of strings.
+    :param param_names: list of argument names to ``func`` that should be made into Parameters.
+    :type param_names: ``None`` (default) or list of strings
+    :param missing: how to handle missing values.
+    :type missing: one of ``None`` (default), 'none', 'drop', or 'raise'.
+    :param prefix: prefix to add to all parameter names to distinguish components in a :class:`CompositeModel`.
+    :type prefix: string
+    :param name: name for the model. When ``None`` (default) the name is the same  as the model function (``func``).
+    :type name: ``None`` or string.
+    :param kws:   additional keyword arguments to pass to model function.
+
+
+Of course, the model function will have to return an array that will be the
+same size as the data being modeled.  Generally this is handled by also
+specifying one or more independent variables.
+
+
+:class:`Model` class Methods
+---------------------------------
+
+.. method:: Model.eval(params=None[, **kws])
+
+   evaluate the model function for a set of parameters and inputs.
+
+   :param params: parameters to use for fit.
+   :type params: ``None`` (default) or Parameters
+   :param kws:    additional keyword arguments to pass to model function.
+   :return:       ndarray for model given the parameters and other arguments.
+
+   If ``params`` is ``None``, the values for all parameters are expected to
+   be provided as keyword arguments.  If ``params`` is given, and a keyword
+   argument for a parameter value is also given, the keyword argument will
+   be used.
+
+   Note that all non-parameter arguments for the model function --
+   **including all the independent variables!** -- will need to be passed
+   in using keyword arguments.
+
+
+.. method:: Model.fit(data[, params=None[, weights=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **kws]]]]]])
+
+   perform a fit of the model to the ``data`` array with a set of
+   parameters.
+
+   :param data: array of data to be fitted.
+   :type data: ndarray-like
+   :param params: parameters to use for fit.
+   :type params: ``None`` (default) or Parameters
+   :param weights: weights to use for residual calculation in fit.
+   :type weights: ``None`` (default) or ndarray-like.
+   :param method:  name of fitting method to use. See  :ref:`fit-methods-label` for details
+   :type  method:  string (default ``leastsq``)
+   :param scale_covar:  whether to automatically scale covariance matrix (``leastsq`` only)
+   :type  scale_covar:  bool (default ``True``)
+   :param iter_cb:  function to be called at each fit iteration. See :ref:`fit-itercb-label` for details.
+   :type  iter_cb:  callable or ``None``
+   :param verbose:  print a message when a new parameter is created due to a *hint*
+   :type  verbose:  bool (default ``True``)
+   :param kws:      additional keyword arguments to pass to model function.
+   :return:         :class:`ModelResult` object.
+
+   If ``params`` is ``None``, the internal ``params`` will be used. If it
+   is supplied, these will replace the internal ones.   If supplied,
+   ``weights`` will be used to weight the calculated residual so that the
+   quantity minimized in the least-squares sense is ``weights*(data -
+   fit)``.  ``weights`` must be an ndarray-like object of same size and
+   shape as ``data``.
+
+   Note that other arguments for the model function (including all the
+   independent variables!) will need to be passed in using keyword
+   arguments.
+
+
+.. method:: Model.guess(data, **kws)
+
+   Guess starting values for model parameters.
+
+    :param data: data array used to guess parameter values
+    :type func:  ndarray
+    :param kws:  additional options to pass to model function.
+    :return: :class:`Parameters` with guessed initial values for each parameter.
+
+   by default this is left to raise a ``NotImplementedError``, but may be
+   overwritten by subclasses.  Generally, this method should take some
+   values for ``data`` and use it to construct reasonable starting values for
+   the parameters.
+
+
+.. method:: Model.make_params(**kws)
+
+   Create a set of parameters for model.
+
+    :param kws:  optional keyword/value pairs to set initial values for parameters.
+    :return: :class:`Parameters`.
+
+    The parameters may or may not have decent initial values for each
+    parameter.
+
+
+.. method:: Model.set_param_hint(name, value=None[, min=None[, max=None[, vary=True[, expr=None]]]])
+
+   set *hints* to use when creating parameters with :meth:`Model.make_param` for
+   the named parameter.  This is especially convenient for setting initial
+   values.  The ``name`` can include the models ``prefix`` or not.
+
+   :param name: parameter name.
+   :type name: string
+   :param value: value for parameter
+   :type value: float
+   :param min:  lower bound for parameter value
+   :type min: ``None`` or float
+   :param max:  upper bound for parameter value
+   :type max: ``None`` or float
+   :param vary:  whether to vary parameter in fit.
+   :type vary: boolean
+   :param expr:  mathematical expression for constraint
+   :type expr: string
+
+   See :ref:`model_param_hints_section`.
+
+
+.. automethod:: lmfit.model.Model.print_param_hints
+
+
+:class:`Model` class Attributes
+---------------------------------
+
+.. attribute:: func
+
+   The model function used to calculate the model.
+
+.. attribute:: independent_vars
+
+   list of strings for names of the independent variables.
+
+.. attribute:: missing
+
+   describes what to do for missing values.  The choices are
+
+    * ``None``: Do not check for null or missing values (default)
+    * ``'none'``: Do not check for null or missing values.
+    * ``'drop'``: Drop null or missing observations in data.  If pandas is
+                installed, ``pandas.isnull`` is used, otherwise :attr:`numpy.isnan` is used.
+    * ``'raise'``: Raise a (more helpful) exception when data contains null
+                  or missing values.
+
+.. attribute:: name
+
+   name of the model, used only in the string representation of the
+   model. By default this will be taken from the model function.
+
+.. attribute:: opts
+
+   extra keyword arguments to pass to model function.  Normally this will
+   be determined internally and should not be changed.
+
+.. attribute:: param_hints
+
+   Dictionary of parameter hints.  See :ref:`model_param_hints_section`.
+
+.. attribute:: param_names
+
+   list of strings of parameter names.
+
+.. attribute:: prefix
+
+   prefix used for name-mangling of parameter names.  The default is ''.
+   If a particular :class:`Model` has arguments ``amplitude``,
+   ``center``, and ``sigma``, these would become the parameter names.
+   Using a prefix of ``g1_`` would convert these parameter names to
+   ``g1_amplitude``, ``g1_center``, and ``g1_sigma``.   This can be
+   essential to avoid name collision in composite models.
+
+
+Determining parameter names and independent variables for a function
+-----------------------------------------------------------------------
+
+The :class:`Model` created from the supplied function ``func`` will create
+a :class:`Parameters` object, and names are inferred from the function
+arguments, and a residual function is automatically constructed.
+
+
+By default, the independent variable is take as the first argument to the
+function.  You can explicitly set this, of course, and will need to if the
+independent variable is not first in the list, or if there are actually more
+than one independent variables.
+
+If not specified, Parameters are constructed from all positional arguments
+and all keyword arguments that have a default value that is numerical, except
+the independent variable, of course.   Importantly, the Parameters can be
+modified after creation.  In fact, you'll have to do this because none of the
+parameters have valid initial values.  You can place bounds and constraints
+on Parameters, or fix their values.
+
+
+
+Explicitly specifying ``independent_vars``
+-------------------------------------------------
+
+As we saw for the Gaussian example above, creating a :class:`Model` from a
+function is fairly easy. Let's try another::
+
+    >>> def decay(t, tau, N):
+    ...    return N*np.exp(-t/tau)
+    ...
+    >>> decay_model = Model(decay)
+    >>> print decay_model.independent_vars
+    ['t']
+    >>> for pname, par in decay_model.params.items():
+    ...     print pname, par
+    ...
+    tau <Parameter 'tau', None, bounds=[None:None]>
+    N <Parameter 'N', None, bounds=[None:None]>
+
+Here, ``t`` is assumed to be the independent variable because it is the
+first argument to the function.  The other function arguments are used to
+create parameters for the model.
+
+If you want ``tau`` to be the independent variable in the above example,
+you can say so::
+
+    >>> decay_model = Model(decay, independent_vars=['tau'])
+    >>> print decay_model.independent_vars
+    ['tau']
+    >>> for pname, par in decay_model.params.items():
+    ...     print pname, par
+    ...
+    t <Parameter 't', None, bounds=[None:None]>
+    N <Parameter 'N', None, bounds=[None:None]>
+
+
+You can also supply multiple values for multi-dimensional functions with
+multiple independent variables.  In fact, the meaning of *independent
+variable* here is simple, and based on how it treats arguments of the
+function you are modeling:
+
+independent variable
+    a function argument that is not a parameter or otherwise part of the
+    model, and that will be required to be explicitly provided as a
+    keyword argument for each fit with :meth:`Model.fit` or evaluation
+    with :meth:`Model.eval`.
+
+Note that independent variables are not required to be arrays, or even
+floating point numbers.
+
+
+Functions with keyword arguments
+-----------------------------------------
+
+If the model function had keyword parameters, these would be turned into
+Parameters if the supplied default value was a valid number (but not
+``None``, ``True``, or ``False``).
+
+    >>> def decay2(t, tau, N=10, check_positive=False):
+    ...    if check_small:
+    ...        arg = abs(t)/max(1.e-9, abs(tau))
+    ...    else:
+    ...        arg = t/tau
+    ...    return N*np.exp(arg)
+    ...
+    >>> mod = Model(decay2)
+    >>> for pname, par in mod.params.items():
+    ...     print pname, par
+    ...
+    t <Parameter 't', None, bounds=[None:None]>
+    N <Parameter 'N', 10, bounds=[None:None]>
+
+Here, even though ``N`` is a keyword argument to the function, it is turned
+into a parameter, with the default numerical value as its initial value.
+By default, it is permitted to be varied in the fit -- the 10 is taken as
+an initial value, not a fixed value.  On the other hand, the
+``check_positive`` keyword argument, was not converted to a parameter
+because it has a boolean default value.    In some sense,
+``check_positive`` becomes like an independent variable to the model.
+However, because it has a default value it is not required to be given for
+each model evaluation or fit, as independent variables are.
+
+Defining a ``prefix`` for the Parameters
+--------------------------------------------
+
+As we will see in the next chapter when combining models, it is sometimes
+necessary to decorate the parameter names in the model, but still have them
+be correctly used in the underlying model function.  This would be
+necessary, for example, if two parameters in a composite model (see
+:ref:`composite_models_section` or examples in the next chapter) would have
+the same name.  To avoid this, we can add a ``prefix`` to the
+:class:`Model` which will automatically do this mapping for us.
+
+    >>> def myfunc(x, amplitude=1, center=0, sigma=1):
+    ...
+
+    >>> mod = Model(myfunc, prefix='f1_')
+    >>> for pname, par in mod.params.items():
+    ...     print pname, par
+    ...
+    f1_amplitude <Parameter 'f1_amplitude', None, bounds=[None:None]>
+    f1_center <Parameter 'f1_center', None, bounds=[None:None]>
+    f1_sigma <Parameter 'f1_sigma', None, bounds=[None:None]>
+
+You would refer to these parameters as ``f1_amplitude`` and so forth, and
+the model will know to map these to the ``amplitude`` argument of ``myfunc``.
+
+
+Initializing model parameters
+-----------------------------------------
+
+As mentioned above, the parameters created by :meth:`Model.make_params` are
+generally created with invalid initial values of ``None``.  These values
+**must** be initialized in order for the model to be evaluated or used in a
+fit.  There are four different ways to do this initialization that can be
+used in any combination:
+
+  1. You can supply initial values in the definition of the model function.
+  2. You can initialize the parameters when creating parameters with :meth:`Model.make_params`.
+  3. You can give parameter hints with :meth:`Model.set_param_hint`.
+  4. You can supply initial values for the parameters when you use the
+     :meth:`Model.eval` or :meth:`Model.fit` methods.
+
+Of course these methods can be mixed, allowing you to overwrite initial
+values at any point in the process of defining and using the model.
+
+Initializing values in the function definition
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To supply initial values for parameters in the definition of the model
+function, you can simply supply a default value::
+
+    >>> def myfunc(x, a=1, b=0):
+    >>>     ...
+
+instead of using::
+
+    >>> def myfunc(x, a, b):
+    >>>     ...
+
+This has the advantage of working at the function level -- all parameters
+with keywords can be treated as options.  It also means that some default
+initial value will always be available for the parameter.
+
+
+Initializing values with :meth:`Model.make_params`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When creating parameters with :meth:`Model.make_params` you can specify initial
+values.  To do this, use keyword arguments for the parameter names and
+initial values::
+
+    >>> mod = Model(myfunc)
+    >>> pars = mod.make_params(a=3, b=0.5)
+
+
+Initializing values by setting parameter hints
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+After a model has been created, but prior to creating parameters with
+:meth:`Model.make_params`, you can set parameter hints.  These allows you to set
+not only a default initial value but also to set other parameter attributes
+controlling bounds, whether it is varied in the fit, or a constraint
+expression.  To set a parameter hint, you can use :meth:`Model.set_param_hint`,
+as with::
+
+    >>> mod = Model(myfunc)
+    >>> mod.set_param_hint('a', value = 1.0)
+    >>> mod.set_param_hint('b', value = 0.3, min=0, max=1.0)
+    >>> pars = mod.make_params()
+
+Parameter hints are discussed in more detail in section
+:ref:`model_param_hints_section`.
+
+
+Initializing values when using a model
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Finally, you can explicitly supply initial values when using a model.  That
+is, as with :meth:`Model.make_params`, you can include values
+as keyword arguments to either the :meth:`Model.eval` or :meth:`Model.fit` methods::
+
+   >>> y1 = mod.eval(x=x, a=7.0, b=-2.0)
+
+   >>> out = mod.fit(x=x, pars, a=3.0, b=-0.0)
+
+These approaches to initialization provide many opportunities for setting
+initial values for parameters.  The methods can be combined, so that you
+can set parameter hints but then change the initial value explicitly with
+:meth:`Model.fit`.
+
+.. _model_param_hints_section:
+
+Using parameter hints
+--------------------------------
+
+
+After a model has been created, you can give it hints for how to create
+parameters with :meth:`Model.make_params`.  This allows you to set not only a
+default initial value but also to set other parameter attributes
+controlling bounds, whether it is varied in the fit, or a constraint
+expression.   To set a parameter hint, you can use :meth:`Model.set_param_hint`,
+as with::
+
+    >>> mod = Model(myfunc)
+    >>> mod.set_param_hint('a', value = 1.0)
+    >>> mod.set_param_hint('b', value = 0.3, min=0, max=1.0)
+
+Parameter hints are stored in a model's :attr:`param_hints` attribute,
+which is simply a nested dictionary::
+
+    >>> print mod.param_hints
+    {'a': {'value': 1}, 'b': {'max': 1.0, 'value': 0.3, 'min': 0}}
+
+
+You can change this dictionary directly, or with the :meth:`Model.set_param_hint`
+method.  Either way, these parameter hints are used by :meth:`Model.make_params`
+when making parameters.
+
+An important feature of parameter hints is that you can force the creation
+of new parameters with parameter hints.  This can be useful to make derived
+parameters with constraint expressions.  For example to get the full-width
+at half maximum of a Gaussian model, one could use a parameter hint of::
+
+    >>> mod = Model(gaussian)
+    >>> mod.set_param_hint('fwhm', expr='2.3548*sigma')
+
+
+
+The :class:`ModelResult` class
+=======================================
+
+A :class:`ModelResult` (which had been called `ModelFit` prior to version
+0.9) is the object returned by :meth:`Model.fit`.  It is a subclass of
+:class:`Minimizer`, and so contains many of the fit results.  Of course, it
+knows the :class:`Model` and the set of :class:`Parameters` used in the
+fit, and it has methods to evaluate the model, to fit the data (or re-fit
+the data with changes to the parameters, or fit with different or modified
+data) and to print out a report for that fit.
+
+While a :class:`Model` encapsulates your model function, it is fairly
+abstract and does not contain the parameters or data used in a particular
+fit.  A :class:`ModelResult` *does* contain parameters and data as well as
+methods to alter and re-do fits.  Thus the :class:`Model` is the idealized
+model while the :class:`ModelResult` is the messier, more complex (but perhaps
+more useful) object that represents a fit with a set of parameters to data
+with a model.
+
+
+A :class:`ModelResult` has several attributes holding values for fit results,
+and several methods for working with fits.  These include statistics
+inherited from :class:`Minimizer` useful for comparing different models,
+including `chisqr`, `redchi`, `aic`, and `bic`.
+
+.. class:: ModelResult()
+
+    Model fit is intended to be created and returned by :meth:`Model.fit`.
+
+
+
+:class:`ModelResult` methods
+---------------------------------
+
+These methods are all inherited from :class:`Minimize` or from
+:class:`Model`.
+
+.. method:: ModelResult.eval(**kwargs)
+
+   evaluate the model using the best-fit parameters and supplied
+   independent variables.  The ``**kwargs`` arguments can be used to update
+   parameter values and/or independent variables.
+
+
+.. method:: ModelResult.eval_components(**kwargs)
+
+   evaluate each component of a :class:`CompositeModel`, returning an
+   ordered dictionary of with the values for each component model.  The
+   returned dictionary will have keys of the model prefix or (if no prefix
+   is given), the model name.  The ``**kwargs`` arguments can be used to
+   update parameter values and/or independent variables.
+
+.. method:: ModelResult.fit(data=None[, params=None[, weights=None[, method=None[, **kwargs]]]])
+
+   fit (or re-fit), optionally changing ``data``, ``params``, ``weights``,
+   or ``method``, or changing the independent variable(s) with the
+   ``**kwargs`` argument.  See :meth:`Model.fit` for argument
+   descriptions, and note that any value of ``None`` defaults to the last
+   used value.
+
+.. method:: ModelResult.fit_report(modelpars=None[, show_correl=True[,`< min_correl=0.1]])
+
+   return a printable fit report for the fit with fit statistics, best-fit
+   values with uncertainties and correlations.  As with :func:`fit_report`.
+
+   :param modelpars:    Parameters with "Known Values" (optional, default None)
+   :param show_correl:  whether to show list of sorted correlations [``True``]
+   :param min_correl:   smallest correlation absolute value to show [0.1]
+
+
+.. method:: ModelResult.conf_interval(**kwargs)
+
+   calculate the confidence intervals for the variable parameters using
+   :func:`confidence.conf_interval() <lmfit.conf_interval>`.  All keyword
+   arguments are passed to that function.  The result is stored in
+   :attr:`ci_out`, and so can be accessed without recalculating them.
+
+.. method:: ModelResult.ci_report(with_offset=True)
+
+   return a nicely formatted text report of the confidence intervals, as
+   from :func:`ci_report() <lmfit.ci_report>`.
+
+
+.. method:: ModelResult.plot(datafmt='o', fitfmt='-', initfmt='--', yerr=None, numpoints=None, fig=None, data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None, ax_fit_kws=None, fig_kws=None)
+
+   Plot the fit results and residuals using matplotlib, if available.  The
+   plot will include two panels, one showing the fit residual, and the
+   other with the data points, the initial fit curve, and the best-fit
+   curve. If the fit model included weights or if ``yerr`` is specified,
+   errorbars will also be plotted.
+
+   :param datafmt: matplotlib format string for data curve.
+   :type  datafmt: ``None`` or string.
+   :param fitfmt:  matplotlib format string for best-fit curve.
+   :type fitfmt: ``None`` or string.
+   :param initfmt:  matplotlib format string for initial curve.
+   :type intfmt: ``None`` or string.
+   :param yerr:  array of uncertainties for data array.
+   :type  yerr: ``None`` or ndarray.
+   :param numpoints:  number of points to display
+   :type numpoints: ``None`` or integer
+   :param fig: matplotlib Figure to plot on.
+   :type fig:  ``None`` or matplotlib.figure.Figure
+   :param data_kws:  keyword arguments passed to plot for data curve.
+   :type data_kws: ``None`` or dictionary
+   :param fit_kws:  keyword arguments passed to plot for best-fit curve.
+   :type fit_kws: ``None`` or dictionary
+   :param init_kws:  keyword arguments passed to plot for initial curve.
+   :type init_kws: ``None`` or dictionary
+   :param ax_res_kws:  keyword arguments passed to creation of matplotlib axes for the residual plot.
+   :type ax_res_kws: ``None`` or dictionary
+   :param ax_fit_kws:  keyword arguments passed to creation of matplotlib axes for the fit plot.
+   :type ax_fit_kws: ``None`` or dictionary
+   :param fig_kws:  keyword arguments passed to creation of matplotlib figure.
+   :type fig_kws: ``None`` or dictionary
+   :returns:     matplotlib.figure.Figure
+
+   This combines :meth:`ModelResult.plot_fit` and :meth:`ModelResult.plot_residual`.
+
+   If ``yerr`` is specified or if the fit model included weights, then
+   matplotlib.axes.Axes.errorbar is used to plot the data.  If ``yerr`` is
+   not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
+
+   If ``fig`` is None then ``matplotlib.pyplot.figure(**fig_kws)`` is called.
+
+.. method:: ModelResult.plot_fit(ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None, numpoints=None,  data_kws=None, fit_kws=None, init_kws=None, ax_kws=None)
+
+   Plot the fit results using matplotlib, if available.  The plot will include
+   the data points, the initial fit curve, and the best-fit curve. If the fit
+   model included weights or if ``yerr`` is specified, errorbars will also
+   be plotted.
+
+   :param ax: matplotlib axes to plot on.
+   :type ax:  ``None`` or matplotlib.axes.Axes.
+   :param datafmt: matplotlib format string for data curve.
+   :type  datafmt: ``None`` or string.
+   :param fitfmt:  matplotlib format string for best-fit curve.
+   :type fitfmt: ``None`` or string.
+   :param initfmt:  matplotlib format string for initial curve.
+   :type intfmt: ``None`` or string.
+   :param yerr:  array of uncertainties for data array.
+   :type  yerr: ``None`` or ndarray.
+   :param numpoints:  number of points to display
+   :type numpoints: ``None`` or integer
+   :param data_kws:  keyword arguments passed to plot for data curve.
+   :type data_kws: ``None`` or dictionary
+   :param fit_kws:  keyword arguments passed to plot for best-fit curve.
+   :type fit_kws: ``None`` or dictionary
+   :param init_kws:  keyword arguments passed to plot for initial curve.
+   :type init_kws: ``None`` or dictionary
+   :param ax_kws:  keyword arguments passed to creation of matplotlib axes.
+   :type ax_kws: ``None`` or dictionary
+   :returns:     matplotlib.axes.Axes
+
+   For details about plot format strings and keyword arguments see
+   documentation of :func:`matplotlib.axes.Axes.plot`.
+
+   If ``yerr`` is specified or if the fit model included weights, then
+   matplotlib.axes.Axes.errorbar is used to plot the data.  If ``yerr`` is
+   not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
+
+   If ``ax`` is None then ``matplotlib.pyplot.gca(**ax_kws)`` is called.
+
+.. method:: ModelResult.plot_residuals(ax=None, datafmt='o', yerr=None, data_kws=None, fit_kws=None, ax_kws=None)
+
+  Plot the fit residuals (data - fit) using matplotlib.  If ``yerr`` is
+  supplied or if the model included weights, errorbars will also be plotted.
+
+   :param ax: matplotlib axes to plot on.
+   :type ax:  ``None`` or matplotlib.axes.Axes.
+   :param datafmt: matplotlib format string for data curve.
+   :type  datafmt: ``None`` or string.
+   :param yerr:  array of uncertainties for data array.
+   :type  yerr: ``None`` or ndarray.
+   :param numpoints:  number of points to display
+   :type numpoints: ``None`` or integer
+   :param data_kws:  keyword arguments passed to plot for data curve.
+   :type data_kws: ``None`` or dictionary
+   :param fit_kws:  keyword arguments passed to plot for best-fit curve.
+   :type fit_kws: ``None`` or dictionary
+   :param ax_kws:  keyword arguments passed to creation of matplotlib axes.
+   :type ax_kws: ``None`` or dictionary
+   :returns:     matplotlib.axes.Axes
+
+   For details about plot format strings and keyword arguments see
+   documentation of :func:`matplotlib.axes.Axes.plot`.
+
+   If ``yerr`` is specified or if the fit model included weights, then
+   matplotlib.axes.Axes.errorbar is used to plot the data.  If ``yerr`` is
+   not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
+
+   If ``ax`` is None then ``matplotlib.pyplot.gca(**ax_kws)`` is called.
+
+
+
+
+:class:`ModelResult` attributes
+---------------------------------
+
+.. attribute:: aic
+
+   floating point best-fit Akaike Information Criterion statistic (see :ref:`fit-results-label`).
+
+.. attribute:: best_fit
+
+   ndarray result of model function, evaluated at provided
+   independent variables and with best-fit parameters.
+
+.. attribute:: best_values
+
+   dictionary with  parameter names as keys, and best-fit values as values.
+
+.. attribute:: bic
+
+   floating point best-fit Bayesian Information Criterion statistic (see :ref:`fit-results-label`).
+
+.. attribute:: chisqr
+
+   floating point best-fit chi-square statistic (see :ref:`fit-results-label`).
+
+.. attribute:: ci_out
+
+   confidence interval data (see :ref:`confidence_chapter`) or `None`  if
+   the confidence intervals have not been calculated.
+
+.. attribute:: covar
+
+   ndarray (square) covariance matrix returned from fit.
+
+.. attribute:: data
+
+   ndarray of data to compare to model.
+
+.. attribute:: errorbars
+
+   boolean for whether error bars were estimated by fit.
+
+.. attribute::  ier
+
+   integer returned code from :scipydoc:`optimize.leastsq`.
+
+.. attribute:: init_fit
+
+   ndarray result of model function, evaluated at provided
+   independent variables and with initial parameters.
+
+.. attribute:: init_params
+
+   initial parameters.
+
+.. attribute:: init_values
+
+   dictionary with  parameter names as keys, and initial values as values.
+
+.. attribute:: iter_cb
+
+   optional callable function, to be called at each fit iteration.  This
+   must take take arguments of ``params, iter, resid, *args, **kws``, where
+   ``params`` will have the current parameter values, ``iter`` the
+   iteration, ``resid`` the current residual array, and ``*args`` and
+   ``**kws`` as passed to the objective function.  See :ref:`fit-itercb-label`.
+
+.. attribute:: jacfcn
+
+   optional callable function, to be called to calculate jacobian array.
+
+.. attribute::  lmdif_message
+
+   string message returned from :scipydoc:`optimize.leastsq`.
+
+.. attribute::  message
+
+   string message returned from :func:`minimize`.
+
+.. attribute::  method
+
+   string naming fitting method for :func:`minimize`.
+
+.. attribute::  model
+
+   instance of :class:`Model` used for model.
+
+.. attribute::  ndata
+
+    integer number of data points.
+
+.. attribute::  nfev
+
+    integer number of function evaluations used for fit.
+
+.. attribute::  nfree
+
+    integer number of free parameters in fit.
+
+.. attribute::  nvarys
+
+    integer number of independent, freely varying variables in fit.
+
+.. attribute::  params
+
+    Parameters used in fit.  Will have best-fit values.
+
+.. attribute::  redchi
+
+    floating point reduced chi-square statistic (see :ref:`fit-results-label`).
+
+.. attribute::  residual
+
+   ndarray for residual.
+
+.. attribute::  scale_covar
+
+   boolean flag for whether to automatically scale covariance matrix.
+
+.. attribute:: success
+
+   boolean value of whether fit succeeded.
+
+.. attribute:: weights
+
+   ndarray (or ``None``) of weighting values to be used in fit.  If not
+   ``None``, it will be used as a multiplicative factor of the residual
+   array, so that ``weights*(data - fit)`` is minimized in the
+   least-squares sense.
+
+.. index:: Composite models
+
+.. _composite_models_section:
+
+
+Composite Models : adding (or multiplying) Models
+==============================================================
+
+One of the more interesting features of the :class:`Model` class is that
+Models can be added together or combined with basic algebraic operations
+(add, subtract, multiply, and divide) to give a composite model.  The
+composite model will have parameters from each of the component models,
+with all parameters being available to influence the whole model.  This
+ability to combine models will become even more useful in the next chapter,
+when pre-built subclasses of :class:`Model` are discussed.  For now, we'll
+consider a simple example, and build a model of a Gaussian plus a line, as
+to model a peak with a background. For such a simple problem, we could just
+build a model that included both components::
+
+    def gaussian_plus_line(x, amp, cen, wid, slope, intercept):
+        "line + 1-d gaussian"
+
+        gauss = (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+        line = slope * x + intercept
+        return gauss + line
+
+and use that with::
+
+    mod = Model(gaussian_plus_line)
+
+But we already had a function for a gaussian function, and maybe we'll
+discover that a linear background isn't sufficient which would mean the
+model function would have to be changed.  As an alternative we could define
+a linear function::
+
+    def line(x, slope, intercept):
+        "a line"
+        return slope * x + intercept
+
+and build a composite model with just::
+
+    mod = Model(gaussian) + Model(line)
+
+This model has parameters for both component models, and can be used as:
+
+.. literalinclude:: ../examples/doc_model2.py
+
+which prints out the results::
+
+    [[Model]]
+        (Model(gaussian) + Model(line))
+    [[Fit Statistics]]
+        # function evals   = 44
+        # data points      = 101
+        # variables        = 5
+        chi-square         = 2.579
+        reduced chi-square = 0.027
+        Akaike info crit   = -355.329
+        Bayesian info crit = -342.253
+    [[Variables]]
+        amp:         8.45931061 +/- 0.124145 (1.47%) (init= 5)
+        cen:         5.65547872 +/- 0.009176 (0.16%) (init= 5)
+        intercept:  -0.96860201 +/- 0.033522 (3.46%) (init= 1)
+        slope:       0.26484403 +/- 0.005748 (2.17%) (init= 0)
+        wid:         0.67545523 +/- 0.009916 (1.47%) (init= 1)
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(amp, wid)                  =  0.666
+        C(cen, intercept)            =  0.129
+
+
+and shows the plot on the left.
+
+.. _figModel2:
+
+  .. image:: _images/model_fit2.png
+     :target: _images/model_fit2.png
+     :width: 48%
+  .. image:: _images/model_fit2a.png
+     :target: _images/model_fit2a.png
+     :width: 48%
+
+
+On the left, data is shown in blue dots, the total fit is shown in solid
+red line, and the initial fit is shown as a black dashed line.  In the
+figure on the right, the data is again shown in blue dots, and the Gaussian
+component shown as a black dashed line, and the linear component shown as a
+red dashed line.  These components were generated after the fit using the
+Models :meth:`ModelResult.eval_components` method of the `result`::
+
+    comps = result.eval_components()
+
+which returns a dictionary of the components, using keys of the model name
+(or `prefix` if that is set).  This will use the parameter values in
+``result.params`` and the independent variables (``x``) used during the
+fit.  Note that while the :class:`ModelResult` held in `result` does store the
+best parameters and the best estimate of the model in ``result.best_fit``,
+the original model and parameters in ``pars`` are left unaltered.
+
+You can apply this composite model to other data sets, or evaluate the
+model at other values of ``x``.  You may want to do this to give a finer or
+coarser spacing of data point, or to extrapolate the model outside the
+fitting range.  This can be done with::
+
+    xwide = np.linspace(-5, 25, 3001)
+    predicted = mod.eval(x=xwide)
+
+In this example, the argument names for the model functions do not overlap.
+If they had, the ``prefix`` argument to :class:`Model` would have allowed
+us to identify which parameter went with which component model.  As we will
+see in the next chapter, using composite models with the built-in models
+provides a simple way to build up complex models.
+
+.. class::  CompositeModel(left, right, op[, **kws])
+
+    Create a composite model from two models (`left` and `right` and an
+    binary operator (`op`).  Additional keywords are passed to
+    :class:`Model`.
+
+    :param left: left-hand side Model
+    :type left: :class:`Model`
+    :param right: right-hand side Model
+    :type right: :class:`Model`
+    :param op: binary operator
+    :type op: callable, and taking 2 arguments (`left` and `right`).
+
+Normally, one does not have to explicitly create a :class:`CompositeModel`,
+as doing::
+
+     mod = Model(fcn1) + Model(fcn2) * Model(fcn3)
+
+will automatically create a :class:`CompositeModel`.  In this example,
+`mod.left` will be `Model(fcn1)`, `mod.op` will be :meth:`operator.add`,
+and `mod.right` will be another CompositeModel that has a `left` attribute
+of `Model(fcn2)`, an `op` of :meth:`operator.mul`, and a `right` of
+`Model(fcn3)`.
+
+If you want to use a binary operator other than add, subtract, multiply, or
+divide that are supported through normal Python syntax, you'll need to
+explicitly create a :class:`CompositeModel` with the appropriate binary
+operator.  For example, to convolve two models, you could define a simple
+convolution function, perhaps as::
+
+    import numpy as np
+    def convolve(dat, kernel):
+        # simple convolution
+        npts = min(len(dat), len(kernel))
+        pad  = np.ones(npts)
+        tmp  = np.concatenate((pad*dat[0], dat, pad*dat[-1]))
+        out  = np.convolve(tmp, kernel, mode='valid')
+        noff = int((len(out) - npts)/2)
+        return (out[noff:])[:npts]
+
+which extends the data in both directions so that the convolving kernel
+function gives a valid result over the data range.  Because this function
+takes two array arguments and returns an array, it can be used as the
+binary operator.  A full script using this technique is here:
+
+.. literalinclude:: ../examples/doc_model3.py
+
+which prints out the results::
+
+    [[Model]]
+        (Model(jump) <function convolve at 0x109ee4488> Model(gaussian))
+    [[Fit Statistics]]
+        # function evals   = 23
+        # data points      = 201
+        # variables        = 3
+        chi-square         = 25.789
+        reduced chi-square = 0.130
+        Akaike info crit   = -403.702
+        Bayesian info crit = -393.793
+    [[Variables]]
+        mid:         5 (fixed)
+        amplitude:   0.62249894 +/- 0.001946 (0.31%) (init= 1)
+        sigma:       0.61438887 +/- 0.014057 (2.29%) (init= 1.5)
+        center:      4.51710256 +/- 0.010152 (0.22%) (init= 3.5)
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(amplitude, center)         =  0.335
+        C(amplitude, sigma)          =  0.273
+
+and shows the plots:
+
+.. _figModel3:
+
+  .. image:: _images/model_fit3a.png
+     :target: _images/model_fit3a.png
+     :width: 48%
+  .. image:: _images/model_fit3b.png
+     :target: _images/model_fit3b.png
+     :width: 48%
+
+Using composite models with built-in or custom operators allows you to
+build complex models from testable sub-components.
diff --git a/doc/parameters.rst b/doc/parameters.rst
index 980d809..e9b53d7 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -1,240 +1,237 @@
-.. _parameters_chapter:
-
-================================================
-:class:`Parameter`  and :class:`Parameters`
-================================================
-
-This chapter describes :class:`Parameter` objects which is the key concept
-of lmfit.
-
-A :class:`Parameter` is the quantity to be optimized in all minimization
-problems, replacing the plain floating point number used in the
-optimization routines from :mod:`scipy.optimize`.  A :class:`Parameter` has
-a value that can be varied in the fit or have a fixed value, have upper
-and/or lower bounds.  It can even have a value that is constrained by an
-algebraic expression of other Parameter values.  Since :class:`Parameters`
-live outside the core optimization routines, they can be used in **all**
-optimization routines from :mod:`scipy.optimize`.  By using
-:class:`Parameter` objects instead of plain variables, the objective
-function does not have to be modified to reflect every change of what is
-varied in the fit.  This simplifies the writing of models, allowing general
-models that describe the phenomenon to be written, and gives the user more
-flexibility in using and testing variations of that model.
-
-Whereas a :class:`Parameter` expands on an individual floating point
-variable, the optimization methods need an ordered group of floating point
-variables.  In the :mod:`scipy.optimize` routines this is required to be a
-1-dimensional numpy ndarray.  For lmfit, where each :class:`Parameter` has
-a name, this is replaced by a :class:`Parameters` class, which works as an
-ordered dictionary of :class:`Parameter` objects, with a few additional
-features and methods.  That is, while the concept of a :class:`Parameter`
-is central to lmfit, one normally creates and interacts with a
-:class:`Parameters` instance that contains many :class:`Parameter` objects.
-The objective functions you write for lmfit will take an instance of
-:class:`Parameters` as its first argument.
-
-
-The :class:`Parameter` class
-========================================
-
-.. class:: Parameter(name=None[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]])
-
-   create a Parameter object.
-
-   :param name: parameter name
-   :type name: ``None`` or string -- will be overwritten during fit if ``None``.
-   :param value: the numerical value for the parameter
-   :param vary:  whether to vary the parameter or not.
-   :type vary:  boolean (``True``/``False``) [default ``True``]
-   :param min:  lower bound for value (``None`` = no lower bound).
-   :param max:  upper bound for value (``None`` = no upper bound).
-   :param expr:  mathematical expression to use to evaluate value during fit.
-   :type expr: ``None`` or string
-
-
-Each of these inputs is turned into an attribute of the same name.
-
-After a fit, a Parameter for a fitted variable (that is with ``vary =
-True``) may have its :attr:`value` attribute to hold the best-fit value.
-Depending on the success of the fit and fitting algorithm used, it may also
-have attributes :attr:`stderr` and :attr:`correl`.
-
-.. attribute:: stderr
-
-   the estimated standard error for the best-fit value.
-
-.. attribute:: correl
-
-   a dictionary of the correlation with the other fitted variables in the
-   fit, of the form::
-
-   {'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}
-
-See :ref:`bounds_chapter` for details on the math used to implement the
-bounds with :attr:`min` and :attr:`max`.
-
-The :attr:`expr` attribute can contain a mathematical expression that will
-be used to compute the value for the Parameter at each step in the fit.
-See :ref:`constraints_chapter` for more details and examples of this
-feature.
-
-.. index:: Removing a Constraint Expression
-
-.. method:: set(value=None[, vary=None[, min=None[, max=None[, expr=None]]]])
-
-   set or update a Parameters value or other attributes.
-
-   :param name:  parameter name
-   :param value: the numerical value for the parameter
-   :param vary:  whether to vary the parameter or not.
-   :param min:   lower bound for value
-   :param max:   upper bound for value
-   :param expr:  mathematical expression to use to evaluate value during fit.
-
-Each argument of :meth:`set` has a default value of ``None``, and will
-be set only if the provided value is not ``None``.  You can use this to
-update some Parameter attribute without affecting others, for example::
-
-       p1 = Parameter('a', value=2.0)
-       p2 = Parameter('b', value=0.0)
-       p1.set(min=0)
-       p2.set(vary=False)
-
-   to set a lower bound, or to set a Parameter as have a fixed value.
-
-   Note that to use this approach to lift a lower or upper bound, doing::
-
-       p1.set(min=0)
-       .....
-       # now lift the lower bound
-       p1.set(min=None)   # won't work!  lower bound NOT changed
-
-   won't work -- this will not change the current lower bound.  Instead
-   you'll have to use ``np.inf`` to remove a lower or upper bound::
-
-       # now lift the lower bound
-       p1.set(min=-np.inf)   # will work!
-
-   Similarly, to clear an expression of a parameter, you need to pass an
-   empty string, not ``None``.  You also need to give a value and
-   explicitly tell it to vary::
-
-       p3 = Parameter('c', expr='(a+b)/2')
-       p3.set(expr=None)     # won't work!  expression NOT changed
-
-       # remove constraint expression
-       p3.set(value=1.0, vary=True, expr='')  # will work!  parameter now unconstrained
-
-
-The :class:`Parameters` class
-========================================
-
-.. class:: Parameters()
-
-   create a Parameters object.  This is little more than a fancy ordered
-   dictionary, with the restrictions that:
-
-   1. keys must be valid Python symbol names, so that they can be used in
-      expressions of mathematical constraints.  This means the names must
-      match ``[a-z_][a-z0-9_]*``  and cannot be a Python reserved word.
-
-   2. values must be valid :class:`Parameter` objects.
-
-
-   Two methods are for provided for convenient initialization of a :class:`Parameters`,
-   and one for extracting :class:`Parameter` values into a plain dictionary.
-
-.. method:: add(name[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]])
-
-   add a named parameter.  This creates a :class:`Parameter`
-   object associated with the key `name`, with optional arguments
-   passed to :class:`Parameter`::
-
-     p = Parameters()
-     p.add('myvar', value=1, vary=True)
-
-.. method:: add_many(self, paramlist)
-
-   add a list of named parameters.  Each entry must be a tuple
-   with the following entries::
-
-        name, value, vary, min, max, expr
-
-   This method is somewhat rigid and verbose (no default values), but can
-   be useful when initially defining a parameter list so that it looks
-   table-like::
-
-     p = Parameters()
-     #           (Name,  Value,  Vary,   Min,  Max,  Expr)
-     p.add_many(('amp1',    10,  True, None, None,  None),
-                ('cen1',   1.2,  True,  0.5,  2.0,  None),
-                ('wid1',   0.8,  True,  0.1, None,  None),
-                ('amp2',   7.5,  True, None, None,  None),
-                ('cen2',   1.9,  True,  1.0,  3.0,  None),
-                ('wid2',  None, False, None, None, '2*wid1/3'))
-
-
-.. method:: pretty_print(oneline=False)
-
-   prints a clean representation on the Parameters. If `oneline` is
-   `True`, the result will be printed to a single (long) line.
-
-.. method:: valuesdict()
-
-   return an ordered dictionary of name:value pairs with the
-   Paramater name as the key and Parameter value as value.
-
-   This is distinct from the :class:`Parameters` itself, as the dictionary
-   values are not :class:`Parameter` objects, just the :attr:`value`.
-   Using :method:`valuesdict` can be a very convenient way to get updated
-   values in a objective function.
-
-.. method:: dumps(**kws):
-
-   return a JSON string representation of the :class:`Parameter` object.
-   This can be saved or used to re-create or re-set parameters, using the
-   :meth:`loads` method.
-
-   Optional keywords are sent :py:func:`json.dumps`.
-
-.. method:: dump(file, **kws):
-
-   write a JSON representation of the :class:`Parameter` object to a file
-   or file-like object in `file` -- really any object with a :meth:`write`
-   method.  Optional keywords are sent :py:func:`json.dumps`.
-
-.. method:: loads(sval, **kws):
-
-   use a JSON string representation of the :class:`Parameter` object in
-   `sval` to set all parameter settins. Optional keywords are sent
-   :py:func:`json.loads`.
-
-.. method:: load(file, **kws):
-
-   read and use a JSON string representation of the :class:`Parameter`
-   object from a file or file-like object in `file` -- really any object
-   with a :meth:`read` method.  Optional keywords are sent
-   :py:func:`json.loads`.
-
-
-Simple Example
-==================
-
-Using :class:`Parameters`` and :func:`minimize` function (discussed in the
-next chapter) might look like this:
-
-.. literalinclude:: ../examples/doc_basic.py
-
-
-Here, the objective function explicitly unpacks each Parameter value.  This
-can be simplified using the :class:`Parameters` :meth:`valuesdict` method,
-which would make the objective function ``fcn2min`` above look like::
-
-    def fcn2min(params, x, data):
-        """ model decaying sine wave, subtract data"""
-        v = params.valuesdict()
-
-        model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
-        return model - data
-
-The results are identical, and the difference is a stylistic choice.
+.. _parameters_chapter:
+
+================================================
+:class:`Parameter`  and :class:`Parameters`
+================================================
+
+This chapter describes :class:`Parameter` objects which is the key concept
+of lmfit.
+
+A :class:`Parameter` is the quantity to be optimized in all minimization
+problems, replacing the plain floating point number used in the
+optimization routines from :mod:`scipy.optimize`.  A :class:`Parameter` has
+a value that can be varied in the fit or have a fixed value, have upper
+and/or lower bounds.  It can even have a value that is constrained by an
+algebraic expression of other Parameter values.  Since :class:`Parameters`
+live outside the core optimization routines, they can be used in **all**
+optimization routines from :mod:`scipy.optimize`.  By using
+:class:`Parameter` objects instead of plain variables, the objective
+function does not have to be modified to reflect every change of what is
+varied in the fit.  This simplifies the writing of models, allowing general
+models that describe the phenomenon to be written, and gives the user more
+flexibility in using and testing variations of that model.
+
+Whereas a :class:`Parameter` expands on an individual floating point
+variable, the optimization methods need an ordered group of floating point
+variables.  In the :mod:`scipy.optimize` routines this is required to be a
+1-dimensional numpy ndarray.  For lmfit, where each :class:`Parameter` has
+a name, this is replaced by a :class:`Parameters` class, which works as an
+ordered dictionary of :class:`Parameter` objects, with a few additional
+features and methods.  That is, while the concept of a :class:`Parameter`
+is central to lmfit, one normally creates and interacts with a
+:class:`Parameters` instance that contains many :class:`Parameter` objects.
+The objective functions you write for lmfit will take an instance of
+:class:`Parameters` as its first argument.
+
+
+The :class:`Parameter` class
+========================================
+
+.. class:: Parameter(name=None[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]])
+
+    create a Parameter object.
+
+    :param name: parameter name
+    :type name: ``None`` or string -- will be overwritten during fit if ``None``.
+    :param value: the numerical value for the parameter
+    :param vary:  whether to vary the parameter or not.
+    :type vary:  boolean (``True``/``False``) [default ``True``]
+    :param min:  lower bound for value (``None`` = no lower bound).
+    :param max:  upper bound for value (``None`` = no upper bound).
+    :param expr:  mathematical expression to use to evaluate value during fit.
+    :type expr: ``None`` or string
+
+    Each of these inputs is turned into an attribute of the same name.
+
+    After a fit, a Parameter for a fitted variable (that is with ``vary =
+    True``) may have its :attr:`value` attribute to hold the best-fit value.
+    Depending on the success of the fit and fitting algorithm used, it may also
+    have attributes :attr:`stderr` and :attr:`correl`.
+
+    .. attribute:: stderr
+
+       the estimated standard error for the best-fit value.
+
+    .. attribute:: correl
+
+       a dictionary of the correlation with the other fitted variables in the
+       fit, of the form::
+
+       {'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}
+
+    See :ref:`bounds_chapter` for details on the math used to implement the
+    bounds with :attr:`min` and :attr:`max`.
+
+    The :attr:`expr` attribute can contain a mathematical expression that will
+    be used to compute the value for the Parameter at each step in the fit.
+    See :ref:`constraints_chapter` for more details and examples of this
+    feature.
+
+    .. index:: Removing a Constraint Expression
+
+    .. method:: set(value=None[, vary=None[, min=None[, max=None[, expr=None]]]])
+
+       set or update a Parameters value or other attributes.
+
+       :param name:  parameter name
+       :param value: the numerical value for the parameter
+       :param vary:  whether to vary the parameter or not.
+       :param min:   lower bound for value
+       :param max:   upper bound for value
+       :param expr:  mathematical expression to use to evaluate value during fit.
+
+    Each argument of :meth:`set` has a default value of ``None``, and will
+    be set only if the provided value is not ``None``.  You can use this to
+    update some Parameter attribute without affecting others, for example::
+
+        p1 = Parameter('a', value=2.0)
+        p2 = Parameter('b', value=0.0)
+        p1.set(min=0)
+        p2.set(vary=False)
+
+    to set a lower bound, or to set a Parameter as have a fixed value.
+
+    Note that to use this approach to lift a lower or upper bound, doing::
+
+        p1.set(min=0)
+        .....
+        # now lift the lower bound
+        p1.set(min=None)   # won't work!  lower bound NOT changed
+
+    won't work -- this will not change the current lower bound.  Instead
+    you'll have to use ``np.inf`` to remove a lower or upper bound::
+
+        # now lift the lower bound
+        p1.set(min=-np.inf)   # will work!
+
+    Similarly, to clear an expression of a parameter, you need to pass an
+    empty string, not ``None``.  You also need to give a value and
+    explicitly tell it to vary::
+
+        p3 = Parameter('c', expr='(a+b)/2')
+        p3.set(expr=None)     # won't work!  expression NOT changed
+
+        # remove constraint expression
+        p3.set(value=1.0, vary=True, expr='')  # will work!  parameter now unconstrained
+
+
+The :class:`Parameters` class
+========================================
+
+.. currentmodule:: lmfit.parameter
+
+.. class:: Parameters()
+
+   create a Parameters object.  This is little more than a fancy ordered
+   dictionary, with the restrictions that:
+
+   1. keys must be valid Python symbol names, so that they can be used in
+      expressions of mathematical constraints.  This means the names must
+      match ``[a-z_][a-z0-9_]*``  and cannot be a Python reserved word.
+
+   2. values must be valid :class:`Parameter` objects.
+
+   Two methods are provided for convenient initialization of a :class:`Parameters`,
+   and one for extracting :class:`Parameter` values into a plain dictionary.
+
+    .. method:: add(name[, value=None[, vary=True[, min=None[, max=None[, expr=None]]]]])
+
+       add a named parameter.  This creates a :class:`Parameter`
+       object associated with the key `name`, with optional arguments
+       passed to :class:`Parameter`::
+
+         p = Parameters()
+         p.add('myvar', value=1, vary=True)
+
+    .. method:: add_many(self, paramlist)
+
+       add a list of named parameters.  Each entry must be a tuple
+       with the following entries::
+
+            name, value, vary, min, max, expr
+
+       This method is somewhat rigid and verbose (no default values), but can
+       be useful when initially defining a parameter list so that it looks
+       table-like::
+
+         p = Parameters()
+         #           (Name,  Value,  Vary,   Min,  Max,  Expr)
+         p.add_many(('amp1',    10,  True, None, None,  None),
+                    ('cen1',   1.2,  True,  0.5,  2.0,  None),
+                    ('wid1',   0.8,  True,  0.1, None,  None),
+                    ('amp2',   7.5,  True, None, None,  None),
+                    ('cen2',   1.9,  True,  1.0,  3.0,  None),
+                    ('wid2',  None, False, None, None, '2*wid1/3'))
+
+
+    .. automethod:: Parameters.pretty_print
+
+    .. method:: valuesdict()
+
+       return an ordered dictionary of name:value pairs with the
+       Paramater name as the key and Parameter value as value.
+
+       This is distinct from the :class:`Parameters` itself, as the dictionary
+       values are not :class:`Parameter` objects, just the :attr:`value`.
+       Using :meth:`valuesdict` can be a very convenient way to get updated
+       values in a objective function.
+
+    .. method:: dumps(**kws)
+
+       return a JSON string representation of the :class:`Parameter` object.
+       This can be saved or used to re-create or re-set parameters, using the
+       :meth:`loads` method.
+
+       Optional keywords are sent :py:func:`json.dumps`.
+
+    .. method:: dump(file, **kws)
+
+       write a JSON representation of the :class:`Parameter` object to a file
+       or file-like object in `file` -- really any object with a :meth:`write`
+       method.  Optional keywords are sent :py:func:`json.dumps`.
+
+    .. method:: loads(sval, **kws)
+
+       use a JSON string representation of the :class:`Parameter` object in
+       `sval` to set all parameter settings. Optional keywords are sent
+       :py:func:`json.loads`.
+
+    .. method:: load(file, **kws)
+
+       read and use a JSON string representation of the :class:`Parameter`
+       object from a file or file-like object in `file` -- really any object
+       with a :meth:`read` method.  Optional keywords are sent
+       :py:func:`json.loads`.
+
+
+Simple Example
+==================
+
+Using :class:`Parameters`` and :func:`minimize` function (discussed in the
+next chapter) might look like this:
+
+.. literalinclude:: ../examples/doc_basic.py
+
+
+Here, the objective function explicitly unpacks each Parameter value.  This
+can be simplified using the :class:`Parameters` :meth:`valuesdict` method,
+which would make the objective function ``fcn2min`` above look like::
+
+    def fcn2min(params, x, data):
+        """ model decaying sine wave, subtract data"""
+        v = params.valuesdict()
+
+        model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
+        return model - data
+
+The results are identical, and the difference is a stylistic choice.
diff --git a/doc/sphinx/ext_mathjax.py b/doc/sphinx/ext_mathjax.py
index 3e54c82..40de659 100644
--- a/doc/sphinx/ext_mathjax.py
+++ b/doc/sphinx/ext_mathjax.py
@@ -1,10 +1,10 @@
-# sphinx extensions for mathjax
-extensions = ['sphinx.ext.autodoc',
-              'sphinx.ext.todo',
-              'sphinx.ext.coverage',
-              'sphinx.ext.intersphinx',
-              'numpydoc']
-mathjax = 'sphinx.ext.mathjax'
-pngmath = 'sphinx.ext.pngmath'
-
-extensions.append(mathjax)
+# sphinx extensions for mathjax
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.todo',
+              'sphinx.ext.coverage',
+              'sphinx.ext.intersphinx',
+              'numpydoc']
+mathjax = 'sphinx.ext.mathjax'
+pngmath = 'sphinx.ext.pngmath'
+
+extensions.append(mathjax)
diff --git a/doc/sphinx/ext_pngmath.py b/doc/sphinx/ext_pngmath.py
index 10997b0..cf153fe 100644
--- a/doc/sphinx/ext_pngmath.py
+++ b/doc/sphinx/ext_pngmath.py
@@ -1,10 +1,10 @@
-# sphinx extensions for pngmath
-extensions = ['sphinx.ext.autodoc',
-              'sphinx.ext.todo',
-              'sphinx.ext.coverage',
-              'sphinx.ext.intersphinx',
-              'numpydoc']
-mathjax = 'sphinx.ext.mathjax'
-pngmath = 'sphinx.ext.pngmath'
-
-extensions.append(pngmath)
+# sphinx extensions for pngmath
+extensions = ['sphinx.ext.autodoc',
+              'sphinx.ext.todo',
+              'sphinx.ext.coverage',
+              'sphinx.ext.intersphinx',
+              'numpydoc']
+mathjax = 'sphinx.ext.mathjax'
+pngmath = 'sphinx.ext.pngmath'
+
+extensions.append(pngmath)
diff --git a/doc/sphinx/theme/lmfitdoc/layout.html b/doc/sphinx/theme/lmfitdoc/layout.html
index 42409a1..6a31b9d 100644
--- a/doc/sphinx/theme/lmfitdoc/layout.html
+++ b/doc/sphinx/theme/lmfitdoc/layout.html
@@ -1,66 +1,66 @@
-{#
-    sphinxdoc/layout.html
-    ~~~~~~~~~~~~~~~~~~~~~
-
-    Sphinx layout template for the sphinxdoc theme.
-
-    :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-#}
-{%- extends "basic/layout.html" %}
-
-{%- block extrahead %}
-  <script type="text/x-mathjax-config">
-     MathJax.Hub.Config({
-        "TeX": {Macros: {AA : "{\\unicode{x212B}}"}},
-        "HTML-CSS": {scale: 90}
-  });</script>
-{% endblock %}
-
-
-
-{% block rootrellink %}
-   <li>[<a href="{{ pathto('intro') }}">intro</a>|</li>
-   <li><a href="{{ pathto('parameters') }}">parameters</a>|</li>
-   <li><a href="{{ pathto('fitting') }}"> minimize</a>|</li>
-   <li><a href="{{ pathto('model') }}"> model</a>|</li>
-   <li><a href="{{ pathto('builtin_models') }}"> builtin models</a>|</li>
-   <li><a href="{{ pathto('confidence') }}">confidence intervals</a>|</li>
-   <li><a href="{{ pathto('bounds') }}">bounds</a>|</li>
-   <li><a href="{{ pathto('constraints') }}">constraints</a>]</li>
-{% endblock %}
-
-{% block relbar1 %}
-<div>
-<table border=0>
-  <tr><td></td><td width=85% padding=5 align=left>
-       <a href="index.html" style="color: #157"> <font size=+3>LMFIT</font></a>
-     </td>
-     <td width=7% align=left>
-         <a href="contents.html" style="color: #882222">
-         <font size+=1>Contents</font></a> </td>
-     <td width=7% align=left>
-          <a href="installation.html" style="color: #882222">
-          <font size+=1>Download</font></a></td>
-     <td></td>
-  </tr>
-  <tr><td></td><td width=75% padding=5 align=left>
-        <a href="index.html" style="color: #157"> <font size=+2>
-	Non-Linear Least-Squares Minimization and Curve-Fitting for Python</font></a>
-     </td>
-     <td width=7% align=left>
-         <a href="faq.html" style="color: #882222">
-         <font size+=1>FAQ</font></a> </td>
-     <td width=7% align=left>
-        <a href="https://github.com/lmfit/lmfit-py/" style="color: #882222">
-         <font size+=1>Develop</font></a></td>
-     <td></td>
-  </tr>
-</table>
-</div>
-{{ super() }}
-{% endblock %}
-
-{# put the sidebar before the body #}
-{% block sidebar1 %}{{ sidebar() }}{% endblock %}
-{% block sidebar2 %}{% endblock %}
+{#
+    sphinxdoc/layout.html
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Sphinx layout template for the sphinxdoc theme.
+
+    :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+#}
+{%- extends "basic/layout.html" %}
+
+{%- block extrahead %}
+  <script type="text/x-mathjax-config">
+     MathJax.Hub.Config({
+        "TeX": {Macros: {AA : "{\\unicode{x212B}}"}},
+        "HTML-CSS": {scale: 90}
+  });</script>
+{% endblock %}
+
+
+
+{% block rootrellink %}
+   <li>[<a href="{{ pathto('intro') }}">intro</a>|</li>
+   <li><a href="{{ pathto('parameters') }}">parameters</a>|</li>
+   <li><a href="{{ pathto('fitting') }}"> minimize</a>|</li>
+   <li><a href="{{ pathto('model') }}"> model</a>|</li>
+   <li><a href="{{ pathto('builtin_models') }}"> builtin models</a>|</li>
+   <li><a href="{{ pathto('confidence') }}">confidence intervals</a>|</li>
+   <li><a href="{{ pathto('bounds') }}">bounds</a>|</li>
+   <li><a href="{{ pathto('constraints') }}">constraints</a>]</li>
+{% endblock %}
+
+{% block relbar1 %}
+<div>
+<table border=0>
+  <tr><td></td><td width=85% padding=5 align=left>
+       <a href="index.html" style="color: #157"> <font size=+3>LMFIT</font></a>
+     </td>
+     <td width=7% align=left>
+         <a href="contents.html" style="color: #882222">
+         <font size+=1>Contents</font></a> </td>
+     <td width=7% align=left>
+          <a href="installation.html" style="color: #882222">
+          <font size+=1>Download</font></a></td>
+     <td></td>
+  </tr>
+  <tr><td></td><td width=75% padding=5 align=left>
+        <a href="index.html" style="color: #157"> <font size=+2>
+	Non-Linear Least-Squares Minimization and Curve-Fitting for Python</font></a>
+     </td>
+     <td width=7% align=left>
+         <a href="faq.html" style="color: #882222">
+         <font size+=1>FAQ</font></a> </td>
+     <td width=7% align=left>
+        <a href="https://github.com/lmfit/lmfit-py/" style="color: #882222">
+         <font size+=1>Develop</font></a></td>
+     <td></td>
+  </tr>
+</table>
+</div>
+{{ super() }}
+{% endblock %}
+
+{# put the sidebar before the body #}
+{% block sidebar1 %}{{ sidebar() }}{% endblock %}
+{% block sidebar2 %}{% endblock %}
diff --git a/doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t b/doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t
index 89bd30c..92b6913 100644
--- a/doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t
+++ b/doc/sphinx/theme/lmfitdoc/static/lmfitdoc.css_t
@@ -1,348 +1,348 @@
-/*
- * lmfitdoc.css_t
- * minor riff on sphinxdoc.css_t
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Sphinx stylesheet -- sphinxdoc theme.  Originally created by
- * Armin Ronacher for Werkzeug.
- *
- * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
- * :license: BSD, see LICENSE for details.
- *
- */
-
- at import url("basic.css");
-
-/* -- page layout ----------------------------------------------------------- */
-
-body {
-    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
-    font-size: 14px;
-    letter-spacing: -0.01em;
-    line-height: 150%;
-    text-align: center;
-    background-color: #D6DAC4;
-    color: black;
-    padding: 0;
-    border: 0px solid #D0D0C0;
-    margin: 15px 15px 15px 15px;
-    min-width: 740px;
-}
-
-div.document {
-    background-color: white;
-    text-align: left;
-    background-image: url(contents.png);
-    background-repeat: repeat-x;
-}
-
-div.bodywrapper {
-    margin: 0 {{ theme_sidebarwidth|toint + 10 }}px 0 0;
-    border-right: 1px solid #ccc;
-}
-
-div.body {
-    margin: 0;
-    padding: 0.5em 20px 20px 20px;
-}
-
-div.related {
-    font-size: 1em;
-    background-color: #0D0;
-}
-
-div.related ul {
-    height: 2em;
-    border-top: 1px solid #ddd;
-    border-bottom: 1px solid #ddd;
-    background-color: #F0EFE4;
-    color: #157;
-}
-
-div.related ul li {
-    margin: 0;
-    padding: 0;
-    height: 2em;
-    float: left;
-    background-color: #D0000;
-}
-
-div.related ul li.right {
-    float: right;
-    margin-right: 5px;
-}
-
-div.related ul li a {
-    margin: 0;
-    padding: 0 5px 0 5px;
-    line-height: 1.75em;
-    color: #EE9816;
-    color: #157;
-}
-
-div.related ul li a:hover {
-    color: #822;
-}
-
-div.sphinxsidebarwrapper {
-    padding: 0;
-}
-
-div.sphinxsidebar {
-    margin: 0;
-    padding: 0.5em 15px 15px 0;
-    width: {{ theme_sidebarwidth|toint - 20 }}px;
-    float: right;
-    font-size: 1em;
-    text-align: left;
-}
-
-div.sphinxsidebar h3, div.sphinxsidebar h4 {
-    margin: 1em 0 0.5em 0;
-    font-size: 1em;
-    padding: 0.1em 0 0.1em 0.5em;
-    color: #157;
-    border: 1px solid #A0A090;
-    background-color: #D0D0C4;
-}
-
-div.sphinxsidebar h3 a {
-    color: #157;
-    background-color: #D0D0C4;
-}
-
-div.sphinxsidebar ul {
-    padding-left: 1.5em;
-    margin-top: 7px;
-    padding: 0;
-    line-height: 130%;
-}
-
-div.sphinxsidebar ul ul {
-    margin-left: 20px;
-}
-
-div.footer {
-    background-color: #E0E8D4;
-    color: #86989B;
-    padding: 3px 8px 3px 0;
-    clear: both;
-    font-size: 0.8em;
-    text-align: right;
-}
-
-div.footer a {
-    color: #86989B;
-    text-decoration: underline;
-}
-
-/* -- body styles ----------------------------------------------------------- */
-
-p {
-    margin: 0.8em 0 0.5em 0;
-}
-
-a {
-    color: #CA7900;
-    text-decoration: none;
-}
-
-a:hover {
-    color: #2491CF;
-}
-
-div.body a {
-    text-decoration: underline;
-}
-
-h1 {
-    padding: 0.2em 0 0.2em 0;
-    margin: 0.7em 0 0.3em 0;
-    font-size: 1.5em;
-    color: #157;
-    background-color: #F0EFE4;
-}
-
-h2 {
-    padding: 0.2em 0 0.2em 0;
-    margin: 1.3em 0 0.2em 0;
-    font-size: 1.35em;
-    padding: 0;
-    background-color: #FAFAF0;
-}
-
-h3 {
-    padding: 0.2em 0 0.2em 0;
-    margin: 1em 0 -0.3em 0;
-    font-size: 1.2em;
-    background-color: #FBFBF3;
-}
-
-div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
-    color: black!important;
-}
-
-h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
-    display: none;
-    margin: 0 0 0 0.3em;
-    padding: 0 0.2em 0 0.2em;
-    color: #aaa!important;
-}
-
-h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
-h5:hover a.anchor, h6:hover a.anchor {
-    display: inline;
-}
-
-h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
-h5 a.anchor:hover, h6 a.anchor:hover {
-    color: #777;
-    background-color: #eee;
-}
-
-a.headerlink {
-    color: #c60f0f!important;
-    font-size: 1em;
-    margin-left: 6px;
-    padding: 0 4px 0 4px;
-    text-decoration: none!important;
-}
-
-a.headerlink:hover {
-    background-color: #ccc;
-    color: white!important;
-}
-
-cite, code, tt {
-    font-family: 'Consolas', 'Deja Vu Sans Mono',
-                 'Bitstream Vera Sans Mono', monospace;
-    font-size: 0.95em;
-    letter-spacing: 0.01em;
-}
-
-tt {
-    background-color: #f2f2f2;
-    border-bottom: 1px solid #ddd;
-    color: #333;
-}
-
-tt.descname, tt.descclassname, tt.xref {
-    border: 0;
-}
-
-hr {
-    border: 1px solid #abc;
-    margin: 2em;
-}
-
-a tt {
-    border: 0;
-    color: #CA7900;
-}
-
-a tt:hover {
-    color: #2491CF;
-}
-
-pre {
-    font-family: 'Consolas', 'Deja Vu Sans Mono',
-                 'Bitstream Vera Sans Mono', monospace;
-    font-size: 0.95em;
-    letter-spacing: 0.015em;
-    line-height: 120%;
-    padding: 0.5em;
-    border: 1px solid #ccc;
-    background-color: #f8f8f8;
-}
-
-pre a {
-    color: inherit;
-    text-decoration: underline;
-}
-
-td.linenos pre {
-    padding: 0.5em 0;
-}
-
-div.quotebar {
-    background-color: #f8f8f8;
-    max-width: 250px;
-    float: right;
-    padding: 2px 7px;
-    border: 1px solid #ccc;
-}
-
-div.topic {
-    background-color: #f8f8f8;
-}
-
-table {
-    border-collapse: collapse;
-    margin: 0 -0.5em 0 -0.5em;
-}
-
-table td, table th {
-    padding: 0.2em 0.5em 0.2em 0.5em;
-}
-
-div.admonition, div.warning {
-    font-size: 0.9em;
-    margin: 1em 0 1em 0;
-    border: 1px solid #86989B;
-    background-color: #f7f7f7;
-    padding: 0;
-}
-
-div.admonition p, div.warning p {
-    margin: 0.5em 1em 0.5em 1em;
-    padding: 0;
-}
-
-div.admonition pre, div.warning pre {
-    margin: 0.4em 1em 0.4em 1em;
-}
-
-div.admonition p.admonition-title,
-div.warning p.admonition-title {
-    margin: 0;
-    padding: 0.1em 0 0.1em 0.5em;
-    color: white;
-    border-bottom: 1px solid #86989B;
-    font-weight: bold;
-    background-color: #AFC1C4;
-}
-
-div.warning {
-    border: 1px solid #940000;
-}
-
-div.warning p.admonition-title {
-    background-color: #CF0000;
-    border-bottom-color: #940000;
-}
-
-div.admonition ul, div.admonition ol,
-div.warning ul, div.warning ol {
-    margin: 0.1em 0.5em 0.5em 3em;
-    padding: 0;
-}
-
-div.versioninfo {
-    margin: 1em 0 0 0;
-    border: 1px solid #ccc;
-    background-color: #DDEAF0;
-    padding: 8px;
-    line-height: 1.3em;
-    font-size: 0.9em;
-}
-
-.viewcode-back {
-    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
-                 'Verdana', sans-serif;
-}
-
-div.viewcode-block:target {
-    background-color: #f4debf;
-    border-top: 1px solid #ac9;
-    border-bottom: 1px solid #ac9;
-}
+/*
+ * lmfitdoc.css_t
+ * minor riff on sphinxdoc.css_t
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Sphinx stylesheet -- sphinxdoc theme.  Originally created by
+ * Armin Ronacher for Werkzeug.
+ *
+ * :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+ at import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif;
+    font-size: 14px;
+    letter-spacing: -0.01em;
+    line-height: 150%;
+    text-align: center;
+    background-color: #D6DAC4;
+    color: black;
+    padding: 0;
+    border: 0px solid #D0D0C0;
+    margin: 15px 15px 15px 15px;
+    min-width: 740px;
+}
+
+div.document {
+    background-color: white;
+    text-align: left;
+    background-image: url(contents.png);
+    background-repeat: repeat-x;
+}
+
+div.bodywrapper {
+    margin: 0 {{ theme_sidebarwidth|toint + 10 }}px 0 0;
+    border-right: 1px solid #ccc;
+}
+
+div.body {
+    margin: 0;
+    padding: 0.5em 20px 20px 20px;
+}
+
+div.related {
+    font-size: 1em;
+    background-color: #0D0;
+}
+
+div.related ul {
+    height: 2em;
+    border-top: 1px solid #ddd;
+    border-bottom: 1px solid #ddd;
+    background-color: #F0EFE4;
+    color: #157;
+}
+
+div.related ul li {
+    margin: 0;
+    padding: 0;
+    height: 2em;
+    float: left;
+    background-color: #D0000;
+}
+
+div.related ul li.right {
+    float: right;
+    margin-right: 5px;
+}
+
+div.related ul li a {
+    margin: 0;
+    padding: 0 5px 0 5px;
+    line-height: 1.75em;
+    color: #EE9816;
+    color: #157;
+}
+
+div.related ul li a:hover {
+    color: #822;
+}
+
+div.sphinxsidebarwrapper {
+    padding: 0;
+}
+
+div.sphinxsidebar {
+    margin: 0;
+    padding: 0.5em 15px 15px 0;
+    width: {{ theme_sidebarwidth|toint - 20 }}px;
+    float: right;
+    font-size: 1em;
+    text-align: left;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4 {
+    margin: 1em 0 0.5em 0;
+    font-size: 1em;
+    padding: 0.1em 0 0.1em 0.5em;
+    color: #157;
+    border: 1px solid #A0A090;
+    background-color: #D0D0C4;
+}
+
+div.sphinxsidebar h3 a {
+    color: #157;
+    background-color: #D0D0C4;
+}
+
+div.sphinxsidebar ul {
+    padding-left: 1.5em;
+    margin-top: 7px;
+    padding: 0;
+    line-height: 130%;
+}
+
+div.sphinxsidebar ul ul {
+    margin-left: 20px;
+}
+
+div.footer {
+    background-color: #E0E8D4;
+    color: #86989B;
+    padding: 3px 8px 3px 0;
+    clear: both;
+    font-size: 0.8em;
+    text-align: right;
+}
+
+div.footer a {
+    color: #86989B;
+    text-decoration: underline;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+p {
+    margin: 0.8em 0 0.5em 0;
+}
+
+a {
+    color: #CA7900;
+    text-decoration: none;
+}
+
+a:hover {
+    color: #2491CF;
+}
+
+div.body a {
+    text-decoration: underline;
+}
+
+h1 {
+    padding: 0.2em 0 0.2em 0;
+    margin: 0.7em 0 0.3em 0;
+    font-size: 1.5em;
+    color: #157;
+    background-color: #F0EFE4;
+}
+
+h2 {
+    padding: 0.2em 0 0.2em 0;
+    margin: 1.3em 0 0.2em 0;
+    font-size: 1.35em;
+    padding: 0;
+    background-color: #FAFAF0;
+}
+
+h3 {
+    padding: 0.2em 0 0.2em 0;
+    margin: 1em 0 -0.3em 0;
+    font-size: 1.2em;
+    background-color: #FBFBF3;
+}
+
+div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a {
+    color: black!important;
+}
+
+h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor {
+    display: none;
+    margin: 0 0 0 0.3em;
+    padding: 0 0.2em 0 0.2em;
+    color: #aaa!important;
+}
+
+h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor,
+h5:hover a.anchor, h6:hover a.anchor {
+    display: inline;
+}
+
+h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover,
+h5 a.anchor:hover, h6 a.anchor:hover {
+    color: #777;
+    background-color: #eee;
+}
+
+a.headerlink {
+    color: #c60f0f!important;
+    font-size: 1em;
+    margin-left: 6px;
+    padding: 0 4px 0 4px;
+    text-decoration: none!important;
+}
+
+a.headerlink:hover {
+    background-color: #ccc;
+    color: white!important;
+}
+
+cite, code, tt {
+    font-family: 'Consolas', 'Deja Vu Sans Mono',
+                 'Bitstream Vera Sans Mono', monospace;
+    font-size: 0.95em;
+    letter-spacing: 0.01em;
+}
+
+tt {
+    background-color: #f2f2f2;
+    border-bottom: 1px solid #ddd;
+    color: #333;
+}
+
+tt.descname, tt.descclassname, tt.xref {
+    border: 0;
+}
+
+hr {
+    border: 1px solid #abc;
+    margin: 2em;
+}
+
+a tt {
+    border: 0;
+    color: #CA7900;
+}
+
+a tt:hover {
+    color: #2491CF;
+}
+
+pre {
+    font-family: 'Consolas', 'Deja Vu Sans Mono',
+                 'Bitstream Vera Sans Mono', monospace;
+    font-size: 0.95em;
+    letter-spacing: 0.015em;
+    line-height: 120%;
+    padding: 0.5em;
+    border: 1px solid #ccc;
+    background-color: #f8f8f8;
+}
+
+pre a {
+    color: inherit;
+    text-decoration: underline;
+}
+
+td.linenos pre {
+    padding: 0.5em 0;
+}
+
+div.quotebar {
+    background-color: #f8f8f8;
+    max-width: 250px;
+    float: right;
+    padding: 2px 7px;
+    border: 1px solid #ccc;
+}
+
+div.topic {
+    background-color: #f8f8f8;
+}
+
+table {
+    border-collapse: collapse;
+    margin: 0 -0.5em 0 -0.5em;
+}
+
+table td, table th {
+    padding: 0.2em 0.5em 0.2em 0.5em;
+}
+
+div.admonition, div.warning {
+    font-size: 0.9em;
+    margin: 1em 0 1em 0;
+    border: 1px solid #86989B;
+    background-color: #f7f7f7;
+    padding: 0;
+}
+
+div.admonition p, div.warning p {
+    margin: 0.5em 1em 0.5em 1em;
+    padding: 0;
+}
+
+div.admonition pre, div.warning pre {
+    margin: 0.4em 1em 0.4em 1em;
+}
+
+div.admonition p.admonition-title,
+div.warning p.admonition-title {
+    margin: 0;
+    padding: 0.1em 0 0.1em 0.5em;
+    color: white;
+    border-bottom: 1px solid #86989B;
+    font-weight: bold;
+    background-color: #AFC1C4;
+}
+
+div.warning {
+    border: 1px solid #940000;
+}
+
+div.warning p.admonition-title {
+    background-color: #CF0000;
+    border-bottom-color: #940000;
+}
+
+div.admonition ul, div.admonition ol,
+div.warning ul, div.warning ol {
+    margin: 0.1em 0.5em 0.5em 3em;
+    padding: 0;
+}
+
+div.versioninfo {
+    margin: 1em 0 0 0;
+    border: 1px solid #ccc;
+    background-color: #DDEAF0;
+    padding: 8px;
+    line-height: 1.3em;
+    font-size: 0.9em;
+}
+
+.viewcode-back {
+    font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+                 'Verdana', sans-serif;
+}
+
+div.viewcode-block:target {
+    background-color: #f4debf;
+    border-top: 1px solid #ac9;
+    border-bottom: 1px solid #ac9;
+}
diff --git a/doc/sphinx/theme/lmfitdoc/theme.conf b/doc/sphinx/theme/lmfitdoc/theme.conf
index 82db5fb..d3bfaad 100644
--- a/doc/sphinx/theme/lmfitdoc/theme.conf
+++ b/doc/sphinx/theme/lmfitdoc/theme.conf
@@ -1,4 +1,4 @@
-[theme]
-inherit = basic
-stylesheet = lmfitdoc.css
-pygments_style = friendly
+[theme]
+inherit = basic
+stylesheet = lmfitdoc.css
+pygments_style = friendly
diff --git a/doc/support.rst b/doc/support.rst
index 9a08328..4fe2c3e 100644
--- a/doc/support.rst
+++ b/doc/support.rst
@@ -1,30 +1,30 @@
-.. _support_chapter:
-
-===========================
-Getting Help
-===========================
-
-.. _mailing list:  https://groups.google.com/group/lmfit-py
-.. _github issues: https://github.com/lmfit/lmfit-py/issues
-
-If you have questions, comments, or suggestions for LMFIT, please use the
-`mailing list`_.  This provides an on-line conversation that is and
-archived well and can be searched well with standard web searches.  If you
-find a bug with the code or documentation, use the `github issues`_ Issue
-tracker to submit a report.  If you have an idea for how to solve the
-problem and are familiar with python and github, submitting a github Pull
-Request would be greatly appreciated.
-
-If you are unsure whether to use the mailing list or the Issue tracker,
-please start a conversation on the `mailing list`_.  That is, the problem
-you're having may or may not be due to a bug.  If it is due to a bug,
-creating an Issue from the conversation is easy.  If it is not a bug, the
-problem will be discussed and then the Issue will be closed.  While one
-*can* search through closed Issues on github, these are not so easily
-searched, and the conversation is not easily useful to others later.
-Starting the conversation on the mailing list with "How do I do this?" or
-"Why didn't this work?" instead of "This should work and doesn't" is
-generally preferred, and will better help others with similar questions.
-Of course, there is not always an obvious way to decide if something is a
-Question or an Issue, and we will try our best to engage in all
-discussions.
+.. _support_chapter:
+
+===========================
+Getting Help
+===========================
+
+.. _mailing list:  https://groups.google.com/group/lmfit-py
+.. _github issues: https://github.com/lmfit/lmfit-py/issues
+
+If you have questions, comments, or suggestions for LMFIT, please use the
+`mailing list`_.  This provides an on-line conversation that is and
+archived well and can be searched well with standard web searches.  If you
+find a bug with the code or documentation, use the `github issues`_ Issue
+tracker to submit a report.  If you have an idea for how to solve the
+problem and are familiar with python and github, submitting a github Pull
+Request would be greatly appreciated.
+
+If you are unsure whether to use the mailing list or the Issue tracker,
+please start a conversation on the `mailing list`_.  That is, the problem
+you're having may or may not be due to a bug.  If it is due to a bug,
+creating an Issue from the conversation is easy.  If it is not a bug, the
+problem will be discussed and then the Issue will be closed.  While one
+*can* search through closed Issues on github, these are not so easily
+searched, and the conversation is not easily useful to others later.
+Starting the conversation on the mailing list with "How do I do this?" or
+"Why didn't this work?" instead of "This should work and doesn't" is
+generally preferred, and will better help others with similar questions.
+Of course, there is not always an obvious way to decide if something is a
+Question or an Issue, and we will try our best to engage in all
+discussions.
diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst
index 2d94a51..125f2c7 100644
--- a/doc/whatsnew.rst
+++ b/doc/whatsnew.rst
@@ -1,97 +1,97 @@
-.. _whatsnew_chapter:
-
-=====================
-Release Notes
-=====================
-
-.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
-
-This section discusses changes between versions, especially significant
-changes to the use and behavior of the library.  This is not meant to be a
-comprehensive list of changes.  For such a complete record, consult the
-`lmfit github repository`_.
-
-.. _whatsnew_090_label:
-
-Version 0.9.0 Release Notes
-==========================================
-
-This upgrade makes an important, non-backward-compatible change to the way
-many fitting scripts and programs will work.  Scripts that work with
-version 0.8.3 will not work with version 0.9.0 and vice versa.  The change
-was not made lightly or without ample discussion, and is really an
-improvement.  Modifying scripts that did work with 0.8.3 to work with 0.9.0
-is easy, but needs to be done.
-
-
-
-Summary
-~~~~~~~~~~~~
-
-The upgrade from 0.8.3 to 0.9.0 introduced the :class:`MinimizerResult`
-class (see :ref:`fit-results-label`) which is now used to hold the return
-value from :func:`minimize` and :meth:`Minimizer.minimize`.  This returned
-object contains many goodness of fit statistics, and holds the optimized
-parameters from the fit.  Importantly, the parameters passed into
-:func:`minimize` and :meth:`Minimizer.minimize` are no longer modified by
-the fit. Instead, a copy of the passed-in parameters is made which is
-changed and returns as the :attr:`params` attribute of the returned
-:class:`MinimizerResult`.
-
-
-Impact
-~~~~~~~~~~~~~
-
-This upgrade means that a script that does::
-
-    my_pars = Parameters()
-    my_pars.add('amp',    value=300.0, min=0)
-    my_pars.add('center', value=  5.0, min=0, max=10)
-    my_pars.add('decay',  value=  1.0, vary=False)
-
-    result = minimize(objfunc, my_pars)
-
-will still work, but that ``my_pars`` will **NOT** be changed by the fit.
-Instead, ``my_pars`` is copied to an internal set of parameters that is
-changed in the fit, and this copy is then put in ``result.params``.  To
-look at fit results, use ``result.params``, not ``my_pars``.
-
-This has the effect that ``my_pars`` will still hold the starting parameter
-values, while all of the results from the fit are held in the ``result``
-object returned by :func:`minimize`.
-
-If you want to do an initial fit, then refine that fit to, for example, do
-a pre-fit, then refine that result different fitting method, such as::
-
-    result1 = minimize(objfunc, my_pars, method='nelder')
-    result1.params['decay'].vary = True
-    result2 = minimize(objfunc, result1.params, method='leastsq')
-
-and have access to all of the starting parameters ``my_pars``, the result of the
-first fit ``result1``, and the result of the final fit ``result2``.
-
-
-
-Discussion
-~~~~~~~~~~~~~~
-
-The main goal for making this change were to
-
-   1. give a better return value to :func:`minimize` and
-      :meth:`Minimizer.minimize` that can hold all of the information
-      about a fit.  By having the return value be an instance of the
-      :class:`MinimizerResult` class, it can hold an arbitrary amount of
-      information that is easily accessed by attribute name, and even
-      be given methods.  Using objects is good!
-
-   2. To limit or even elimate the amount of "state information" a
-      :class:`Minimizer` holds.  By state information, we mean how much of
-      the previous fit is remembered after a fit is done.  Keeping (and
-      especially using) such information about a previous fit means that
-      a :class:`Minimizer` might give different results even for the same
-      problem if run a second time.  While it's desirable to be able to
-      adjust a set of :class:`Parameters` re-run a fit to get an improved
-      result, doing this by changing an *internal attribute
-      (:attr:`Minimizer.params`) has the undesirable side-effect of not
-      being able to "go back", and makes it somewhat cumbersome to keep
-      track of changes made while adjusting parameters and re-running fits.
+.. _whatsnew_chapter:
+
+=====================
+Release Notes
+=====================
+
+.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
+
+This section discusses changes between versions, especially significant
+changes to the use and behavior of the library.  This is not meant to be a
+comprehensive list of changes.  For such a complete record, consult the
+`lmfit github repository`_.
+
+.. _whatsnew_090_label:
+
+Version 0.9.0 Release Notes
+==========================================
+
+This upgrade makes an important, non-backward-compatible change to the way
+many fitting scripts and programs will work.  Scripts that work with
+version 0.8.3 will not work with version 0.9.0 and vice versa.  The change
+was not made lightly or without ample discussion, and is really an
+improvement.  Modifying scripts that did work with 0.8.3 to work with 0.9.0
+is easy, but needs to be done.
+
+
+
+Summary
+~~~~~~~~~~~~
+
+The upgrade from 0.8.3 to 0.9.0 introduced the :class:`MinimizerResult`
+class (see :ref:`fit-results-label`) which is now used to hold the return
+value from :func:`minimize` and :meth:`Minimizer.minimize`.  This returned
+object contains many goodness of fit statistics, and holds the optimized
+parameters from the fit.  Importantly, the parameters passed into
+:func:`minimize` and :meth:`Minimizer.minimize` are no longer modified by
+the fit. Instead, a copy of the passed-in parameters is made which is
+changed and returns as the :attr:`params` attribute of the returned
+:class:`MinimizerResult`.
+
+
+Impact
+~~~~~~~~~~~~~
+
+This upgrade means that a script that does::
+
+    my_pars = Parameters()
+    my_pars.add('amp',    value=300.0, min=0)
+    my_pars.add('center', value=  5.0, min=0, max=10)
+    my_pars.add('decay',  value=  1.0, vary=False)
+
+    result = minimize(objfunc, my_pars)
+
+will still work, but that ``my_pars`` will **NOT** be changed by the fit.
+Instead, ``my_pars`` is copied to an internal set of parameters that is
+changed in the fit, and this copy is then put in ``result.params``.  To
+look at fit results, use ``result.params``, not ``my_pars``.
+
+This has the effect that ``my_pars`` will still hold the starting parameter
+values, while all of the results from the fit are held in the ``result``
+object returned by :func:`minimize`.
+
+If you want to do an initial fit, then refine that fit to, for example, do
+a pre-fit, then refine that result different fitting method, such as::
+
+    result1 = minimize(objfunc, my_pars, method='nelder')
+    result1.params['decay'].vary = True
+    result2 = minimize(objfunc, result1.params, method='leastsq')
+
+and have access to all of the starting parameters ``my_pars``, the result of the
+first fit ``result1``, and the result of the final fit ``result2``.
+
+
+
+Discussion
+~~~~~~~~~~~~~~
+
+The main goal for making this change were to
+
+1. give a better return value to :func:`minimize` and
+   :meth:`Minimizer.minimize` that can hold all of the information
+   about a fit.  By having the return value be an instance of the
+   :class:`MinimizerResult` class, it can hold an arbitrary amount of
+   information that is easily accessed by attribute name, and even
+   be given methods.  Using objects is good!
+
+2. To limit or even eliminate the amount of "state information" a
+   :class:`Minimizer` holds.  By state information, we mean how much of
+   the previous fit is remembered after a fit is done.  Keeping (and
+   especially using) such information about a previous fit means that
+   a :class:`Minimizer` might give different results even for the same
+   problem if run a second time.  While it's desirable to be able to
+   adjust a set of :class:`Parameters` re-run a fit to get an improved
+   result, doing this by changing an internal attribute
+   (:attr:`Minimizer.params`) has the undesirable side-effect of not
+   being able to "go back", and makes it somewhat cumbersome to keep
+   track of changes made while adjusting parameters and re-running fits.
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
index df6a441..30d0d6b 100644
--- a/lmfit/__init__.py
+++ b/lmfit/__init__.py
@@ -1,53 +1,53 @@
-"""
-Lmfit provides a high-level interface to non-linear optimization and curve
-fitting problems for Python. Lmfit builds on Levenberg-Marquardt algorithm of
-scipy.optimize.leastsq(), but also supports most of the optimization methods
-from scipy.optimize.  It has a number of useful enhancements, including:
-
-  * Using Parameter objects instead of plain floats as variables.  A Parameter
-    has a value that can be varied in the fit, fixed, have upper and/or lower
-    bounds.  It can even have a value that is constrained by an algebraic
-    expression of other Parameter values.
-
-  * Ease of changing fitting algorithms.  Once a fitting model is set up, one
-    can change the fitting algorithm without changing the objective function.
-
-  * Improved estimation of confidence intervals.  While
-    scipy.optimize.leastsq() will automatically calculate uncertainties and
-    correlations from the covariance matrix, lmfit also has functions to
-    explicitly explore parameter space to determine confidence levels even for
-    the most difficult cases.
-
-  * Improved curve-fitting with the Model class.  This which extends the
-    capabilities of scipy.optimize.curve_fit(), allowing you to turn a function
-    that models for your data into a python class that helps you parametrize
-    and fit data with that model.
-
-  * Many pre-built models for common lineshapes are included and ready to use.
-
-   version: 0.8.0
-   last update: 2014-Sep-21
-   License: MIT
-   Authors:  Matthew Newville, The University of Chicago
-             Till Stensitzki, Freie Universitat Berlin
-             Daniel B. Allen, Johns Hopkins University
-             Antonino Ingargiola, University of California, Los Angeles
-"""
-
-from .minimizer import minimize, Minimizer, MinimizerException
-from .parameter import Parameter, Parameters
-from .confidence import conf_interval, conf_interval2d
-from .printfuncs import (fit_report, ci_report,
-                         report_fit, report_ci, report_errors)
-
-from .model import Model, CompositeModel
-from . import models
-
-from . import uncertainties
-from .uncertainties import ufloat, correlated_values
-
-
-## versioneer code
-from ._version import get_versions
-__version__ = get_versions()['version']
-del get_versions
+"""
+Lmfit provides a high-level interface to non-linear optimization and curve
+fitting problems for Python. Lmfit builds on Levenberg-Marquardt algorithm of
+scipy.optimize.leastsq(), but also supports most of the optimization methods
+from scipy.optimize.  It has a number of useful enhancements, including:
+
+  * Using Parameter objects instead of plain floats as variables.  A Parameter
+    has a value that can be varied in the fit, fixed, have upper and/or lower
+    bounds.  It can even have a value that is constrained by an algebraic
+    expression of other Parameter values.
+
+  * Ease of changing fitting algorithms.  Once a fitting model is set up, one
+    can change the fitting algorithm without changing the objective function.
+
+  * Improved estimation of confidence intervals.  While
+    scipy.optimize.leastsq() will automatically calculate uncertainties and
+    correlations from the covariance matrix, lmfit also has functions to
+    explicitly explore parameter space to determine confidence levels even for
+    the most difficult cases.
+
+  * Improved curve-fitting with the Model class.  This which extends the
+    capabilities of scipy.optimize.curve_fit(), allowing you to turn a function
+    that models for your data into a python class that helps you parametrize
+    and fit data with that model.
+
+  * Many pre-built models for common lineshapes are included and ready to use.
+
+   version: 0.8.0
+   last update: 2014-Sep-21
+   License: MIT
+   Authors:  Matthew Newville, The University of Chicago
+             Till Stensitzki, Freie Universitat Berlin
+             Daniel B. Allen, Johns Hopkins University
+             Antonino Ingargiola, University of California, Los Angeles
+"""
+
+from .minimizer import minimize, Minimizer, MinimizerException
+from .parameter import Parameter, Parameters
+from .confidence import conf_interval, conf_interval2d
+from .printfuncs import (fit_report, ci_report,
+                         report_fit, report_ci, report_errors)
+
+from .model import Model, CompositeModel
+from . import models
+
+from . import uncertainties
+from .uncertainties import ufloat, correlated_values
+
+
+## versioneer code
+from ._version import get_versions
+__version__ = get_versions()['version']
+del get_versions
diff --git a/lmfit/_differentialevolution.py b/lmfit/_differentialevolution.py
index 48f6336..1e1fb66 100644
--- a/lmfit/_differentialevolution.py
+++ b/lmfit/_differentialevolution.py
@@ -1,750 +1,750 @@
-"""
-differential_evolution: The differential evolution global optimization algorithm
-Added by Andrew Nelson 2014
-"""
-from __future__ import division, print_function, absolute_import
-import numpy as np
-from scipy.optimize import minimize
-from scipy.optimize.optimize import _status_message
-import numbers
-
-__all__ = ['differential_evolution']
-
-_MACHEPS = np.finfo(np.float64).eps
-
-
-#------------------------------------------------------------------------------
-# scipy.optimize does not contain OptimizeResult until 0.14. Include here as a
-# fix for scipy < 0.14.
-
-class OptimizeResult(dict):
-    """ Represents the optimization result.
-    Attributes
-    ----------
-    x : ndarray
-        The solution of the optimization.
-    success : bool
-        Whether or not the optimizer exited successfully.
-    status : int
-        Termination status of the optimizer. Its value depends on the
-        underlying solver. Refer to `message` for details.
-    message : str
-        Description of the cause of the termination.
-    fun, jac, hess, hess_inv : ndarray
-        Values of objective function, Jacobian, Hessian or its inverse (if
-        available). The Hessians may be approximations, see the documentation
-        of the function in question.
-    nfev, njev, nhev : int
-        Number of evaluations of the objective functions and of its
-        Jacobian and Hessian.
-    nit : int
-        Number of iterations performed by the optimizer.
-    maxcv : float
-        The maximum constraint violation.
-    Notes
-    -----
-    There may be additional attributes not listed above depending of the
-    specific solver. Since this class is essentially a subclass of dict
-    with attribute accessors, one can see which attributes are available
-    using the `keys()` method.
-    """
-    def __getattr__(self, name):
-        try:
-            return self[name]
-        except KeyError:
-            raise AttributeError(name)
-
-    __setattr__ = dict.__setitem__
-    __delattr__ = dict.__delitem__
-
-    def __repr__(self):
-        if self.keys():
-            m = max(map(len, list(self.keys()))) + 1
-            return '\n'.join([k.rjust(m) + ': ' + repr(v)
-                              for k, v in self.items()])
-        else:
-            return self.__class__.__name__ + "()"
-#------------------------------------------------------------------------------
-
-
-def differential_evolution(func, bounds, args=(), strategy='best1bin',
-                           maxiter=None, popsize=15, tol=0.01,
-                           mutation=(0.5, 1), recombination=0.7, seed=None,
-                           callback=None, disp=False, polish=True,
-                           init='latinhypercube'):
-    """Finds the global minimum of a multivariate function.
-    Differential Evolution is stochastic in nature (does not use gradient
-    methods) to find the minimium, and can search large areas of candidate
-    space, but often requires larger numbers of function evaluations than
-    conventional gradient based techniques.
-
-    The algorithm is due to Storn and Price [1]_.
-
-    Parameters
-    ----------
-    func : callable
-        The objective function to be minimized.  Must be in the form
-        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
-        and ``args`` is a  tuple of any additional fixed parameters needed to
-        completely specify the function.
-    bounds : sequence
-        Bounds for variables.  ``(min, max)`` pairs for each element in ``x``,
-        defining the lower and upper bounds for the optimizing argument of
-        `func`. It is required to have ``len(bounds) == len(x)``.
-        ``len(bounds)`` is used to determine the number of parameters in ``x``.
-    args : tuple, optional
-        Any additional fixed parameters needed to
-        completely specify the objective function.
-    strategy : str, optional
-        The differential evolution strategy to use. Should be one of:
-
-            - 'best1bin'
-            - 'best1exp'
-            - 'rand1exp'
-            - 'randtobest1exp'
-            - 'best2exp'
-            - 'rand2exp'
-            - 'randtobest1bin'
-            - 'best2bin'
-            - 'rand2bin'
-            - 'rand1bin'
-
-        The default is 'best1bin'.
-    maxiter : int, optional
-        The maximum number of times the entire population is evolved.
-        The maximum number of function evaluations is:
-        ``maxiter * popsize * len(x)``
-    popsize : int, optional
-        A multiplier for setting the total population size.  The population has
-        ``popsize * len(x)`` individuals.
-    tol : float, optional
-        When the mean of the population energies, multiplied by tol,
-        divided by the standard deviation of the population energies
-        is greater than 1 the solving process terminates:
-        ``convergence = mean(pop) * tol / stdev(pop) > 1``
-    mutation : float or tuple(float, float), optional
-        The mutation constant.
-        If specified as a float it should be in the range [0, 2].
-        If specified as a tuple ``(min, max)`` dithering is employed. Dithering
-        randomly changes the mutation constant on a generation by generation
-        basis. The mutation constant for that generation is taken from
-        ``U[min, max)``. Dithering can help speed convergence significantly.
-        Increasing the mutation constant increases the search radius, but will
-        slow down convergence.
-    recombination : float, optional
-        The recombination constant, should be in the range [0, 1]. Increasing
-        this value allows a larger number of mutants to progress into the next
-        generation, but at the risk of population stability.
-    seed : int or `np.random.RandomState`, optional
-        If `seed` is not specified the `np.RandomState` singleton is used.
-        If `seed` is an int, a new `np.random.RandomState` instance is used,
-        seeded with seed.
-        If `seed` is already a `np.random.RandomState instance`, then that
-        `np.random.RandomState` instance is used.
-        Specify `seed` for repeatable minimizations.
-    disp : bool, optional
-        Display status messages
-    callback : callable, `callback(xk, convergence=val)`, optional:
-        A function to follow the progress of the minimization. ``xk`` is
-        the current value of ``x0``. ``val`` represents the fractional
-        value of the population convergence.  When ``val`` is greater than one
-        the function halts. If callback returns `True`, then the minimization
-        is halted (any polishing is still carried out).
-    polish : bool, optional
-        If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
-        method is used to polish the best population member at the end, which
-        can improve the minimization slightly.
-    init : string, optional
-        Specify how the population initialization is performed. Should be
-        one of:
-
-            - 'latinhypercube'
-            - 'random'
-
-        The default is 'latinhypercube'. Latin Hypercube sampling tries to
-        maximize coverage of the available parameter space. 'random' initializes
-        the population randomly - this has the drawback that clustering can
-        occur, preventing the whole of parameter space being covered.
-
-    Returns
-    -------
-    res : OptimizeResult
-        The optimization result represented as a `OptimizeResult` object.
-        Important attributes are: ``x`` the solution array, ``success`` a
-        Boolean flag indicating if the optimizer exited successfully and
-        ``message`` which describes the cause of the termination. See
-        `OptimizeResult` for a description of other attributes. If `polish`
-        was employed, then OptimizeResult also contains the `jac` attribute.
-
-    Notes
-    -----
-    Differential evolution is a stochastic population based method that is
-    useful for global optimization problems. At each pass through the population
-    the algorithm mutates each candidate solution by mixing with other candidate
-    solutions to create a trial candidate. There are several strategies [2]_ for
-    creating trial candidates, which suit some problems more than others. The
-    'best1bin' strategy is a good starting point for many systems. In this
-    strategy two members of the population are randomly chosen. Their difference
-    is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`,
-    so far:
-
-    .. math::
-
-        b' = b_0 + mutation * (population[rand0] - population[rand1])
-
-    A trial vector is then constructed. Starting with a randomly chosen 'i'th
-    parameter the trial is sequentially filled (in modulo) with parameters from
-    `b'` or the original candidate. The choice of whether to use `b'` or the
-    original candidate is made with a binomial distribution (the 'bin' in
-    'best1bin') - a random number in [0, 1) is generated.  If this number is
-    less than the `recombination` constant then the parameter is loaded from
-    `b'`, otherwise it is loaded from the original candidate.  The final
-    parameter is always loaded from `b'`.  Once the trial candidate is built
-    its fitness is assessed. If the trial is better than the original candidate
-    then it takes its place. If it is also better than the best overall
-    candidate it also replaces that.
-    To improve your chances of finding a global minimum use higher `popsize`
-    values, with higher `mutation` and (dithering), but lower `recombination`
-    values. This has the effect of widening the search radius, but slowing
-    convergence.
-
-    .. versionadded:: 0.15.0
-
-    Examples
-    --------
-    Let us consider the problem of minimizing the Rosenbrock function. This
-    function is implemented in `rosen` in `scipy.optimize`.
-
-    >>> from scipy.optimize import rosen, differential_evolution
-    >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
-    >>> result = differential_evolution(rosen, bounds)
-    >>> result.x, result.fun
-    (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
-
-    Next find the minimum of the Ackley function
-    (http://en.wikipedia.org/wiki/Test_functions_for_optimization).
-
-    >>> from scipy.optimize import differential_evolution
-    >>> import numpy as np
-    >>> def ackley(x):
-    ...     arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
-    ...     arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
-    ...     return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
-    >>> bounds = [(-5, 5), (-5, 5)]
-    >>> result = differential_evolution(ackley, bounds)
-    >>> result.x, result.fun
-    (array([ 0.,  0.]), 4.4408920985006262e-16)
-
-    References
-    ----------
-    .. [1] Storn, R and Price, K, Differential Evolution - a Simple and
-           Efficient Heuristic for Global Optimization over Continuous Spaces,
-           Journal of Global Optimization, 1997, 11, 341 - 359.
-    .. [2] http://www1.icsi.berkeley.edu/~storn/code.html
-    .. [3] http://en.wikipedia.org/wiki/Differential_evolution
-    """
-
-    solver = DifferentialEvolutionSolver(func, bounds, args=args,
-                                         strategy=strategy, maxiter=maxiter,
-                                         popsize=popsize, tol=tol,
-                                         mutation=mutation,
-                                         recombination=recombination,
-                                         seed=seed, polish=polish,
-                                         callback=callback,
-                                         disp=disp,
-                                         init=init)
-    return solver.solve()
-
-
-class DifferentialEvolutionSolver(object):
-
-    """This class implements the differential evolution solver
-
-    Parameters
-    ----------
-    func : callable
-        The objective function to be minimized.  Must be in the form
-        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
-        and ``args`` is a  tuple of any additional fixed parameters needed to
-        completely specify the function.
-    bounds : sequence
-        Bounds for variables.  ``(min, max)`` pairs for each element in ``x``,
-        defining the lower and upper bounds for the optimizing argument of
-        `func`. It is required to have ``len(bounds) == len(x)``.
-        ``len(bounds)`` is used to determine the number of parameters in ``x``.
-    args : tuple, optional
-        Any additional fixed parameters needed to
-        completely specify the objective function.
-    strategy : str, optional
-        The differential evolution strategy to use. Should be one of:
-
-            - 'best1bin'
-            - 'best1exp'
-            - 'rand1exp'
-            - 'randtobest1exp'
-            - 'best2exp'
-            - 'rand2exp'
-            - 'randtobest1bin'
-            - 'best2bin'
-            - 'rand2bin'
-            - 'rand1bin'
-
-        The default is 'best1bin'
-
-    maxiter : int, optional
-        The maximum number of times the entire population is evolved. The
-        maximum number of function evaluations is:
-        ``maxiter * popsize * len(x)``
-    popsize : int, optional
-        A multiplier for setting the total population size.  The population has
-        ``popsize * len(x)`` individuals.
-    tol : float, optional
-        When the mean of the population energies, multiplied by tol,
-        divided by the standard deviation of the population energies
-        is greater than 1 the solving process terminates:
-        ``convergence = mean(pop) * tol / stdev(pop) > 1``
-    mutation : float or tuple(float, float), optional
-        The mutation constant.
-        If specified as a float it should be in the range [0, 2].
-        If specified as a tuple ``(min, max)`` dithering is employed. Dithering
-        randomly changes the mutation constant on a generation by generation
-        basis. The mutation constant for that generation is taken from
-        U[min, max). Dithering can help speed convergence significantly.
-        Increasing the mutation constant increases the search radius, but will
-        slow down convergence.
-    recombination : float, optional
-        The recombination constant, should be in the range [0, 1]. Increasing
-        this value allows a larger number of mutants to progress into the next
-        generation, but at the risk of population stability.
-    seed : int or `np.random.RandomState`, optional
-        If `seed` is not specified the `np.random.RandomState` singleton is
-        used.
-        If `seed` is an int, a new `np.random.RandomState` instance is used,
-        seeded with `seed`.
-        If `seed` is already a `np.random.RandomState` instance, then that
-        `np.random.RandomState` instance is used.
-        Specify `seed` for repeatable minimizations.
-    disp : bool, optional
-        Display status messages
-    callback : callable, `callback(xk, convergence=val)`, optional
-        A function to follow the progress of the minimization. ``xk`` is
-        the current value of ``x0``. ``val`` represents the fractional
-        value of the population convergence.  When ``val`` is greater than one
-        the function halts. If callback returns `True`, then the minimization
-        is halted (any polishing is still carried out).
-    polish : bool, optional
-        If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method
-        is used to polish the best population member at the end. This requires
-        a few more function evaluations.
-    maxfun : int, optional
-        Set the maximum number of function evaluations. However, it probably
-        makes more sense to set `maxiter` instead.
-    init : string, optional
-        Specify which type of population initialization is performed. Should be
-        one of:
-
-            - 'latinhypercube'
-            - 'random'
-    """
-
-    # Dispatch of mutation strategy method (binomial or exponential).
-    _binomial = {'best1bin': '_best1',
-                 'randtobest1bin': '_randtobest1',
-                 'best2bin': '_best2',
-                 'rand2bin': '_rand2',
-                 'rand1bin': '_rand1'}
-    _exponential = {'best1exp': '_best1',
-                    'rand1exp': '_rand1',
-                    'randtobest1exp': '_randtobest1',
-                    'best2exp': '_best2',
-                    'rand2exp': '_rand2'}
-
-    def __init__(self, func, bounds, args=(),
-                 strategy='best1bin', maxiter=None, popsize=15,
-                 tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
-                 maxfun=None, callback=None, disp=False, polish=True,
-                 init='latinhypercube'):
-
-        if strategy in self._binomial:
-            self.mutation_func = getattr(self, self._binomial[strategy])
-        elif strategy in self._exponential:
-            self.mutation_func = getattr(self, self._exponential[strategy])
-        else:
-            raise ValueError("Please select a valid mutation strategy")
-        self.strategy = strategy
-
-        self.callback = callback
-        self.polish = polish
-        self.tol = tol
-
-        #Mutation constant should be in [0, 2). If specified as a sequence
-        #then dithering is performed.
-        self.scale = mutation
-        if (not np.all(np.isfinite(mutation)) or
-                np.any(np.array(mutation) >= 2) or
-                np.any(np.array(mutation) < 0)):
-            raise ValueError('The mutation constant must be a float in '
-                             'U[0, 2), or specified as a tuple(min, max)'
-                             ' where min < max and min, max are in U[0, 2).')
-
-        self.dither = None
-        if hasattr(mutation, '__iter__') and len(mutation) > 1:
-            self.dither = [mutation[0], mutation[1]]
-            self.dither.sort()
-
-        self.cross_over_probability = recombination
-
-        self.func = func
-        self.args = args
-
-        # convert tuple of lower and upper bounds to limits
-        # [(low_0, high_0), ..., (low_n, high_n]
-        #     -> [[low_0, ..., low_n], [high_0, ..., high_n]]
-        self.limits = np.array(bounds, dtype='float').T
-        if (np.size(self.limits, 0) != 2
-                or not np.all(np.isfinite(self.limits))):
-            raise ValueError('bounds should be a sequence containing '
-                             'real valued (min, max) pairs for each value'
-                             ' in x')
-
-        self.maxiter = maxiter or 1000
-        self.maxfun = (maxfun or ((self.maxiter + 1) * popsize *
-                                  np.size(self.limits, 1)))
-
-        # population is scaled to between [0, 1].
-        # We have to scale between parameter <-> population
-        # save these arguments for _scale_parameter and
-        # _unscale_parameter. This is an optimization
-        self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
-        self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
-
-        parameter_count = np.size(self.limits, 1)
-        self.random_number_generator = _make_random_gen(seed)
-
-        #default initialization is a latin hypercube design, but there
-        #are other population initializations possible.
-        self.population = np.zeros((popsize * parameter_count,
-                                    parameter_count))
-        if init == 'latinhypercube':
-            self.init_population_lhs()
-        elif init == 'random':
-            self.init_population_random()
-        else:
-            raise ValueError("The population initialization method must be one"
-                             "of 'latinhypercube' or 'random'")
-
-        self.population_energies = np.ones(
-            popsize * parameter_count) * np.inf
-
-        self.disp = disp
-
-    def init_population_lhs(self):
-        """
-        Initializes the population with Latin Hypercube Sampling
-        Latin Hypercube Sampling ensures that the sampling of parameter space
-        is maximised.
-        """
-        samples = np.size(self.population, 0)
-        N = np.size(self.population, 1)
-        rng = self.random_number_generator
-
-        # Generate the intervals
-        segsize = 1.0 / samples
-
-        # Fill points uniformly in each interval
-        rdrange = rng.rand(samples, N) * segsize
-        rdrange += np.atleast_2d(np.arange(0., 1., segsize)).T
-
-        # Make the random pairings
-        self.population = np.zeros_like(rdrange)
-
-        for j in range(N):
-            order = rng.permutation(range(samples))
-            self.population[:, j] = rdrange[order, j]
-
-    def init_population_random(self):
-        """
-        Initialises the population at random.  This type of initialization
-        can possess clustering, Latin Hypercube sampling is generally better.
-        """
-        rng = self.random_number_generator
-        self.population = rng.random_sample(self.population.shape)
-
-    @property
-    def x(self):
-        """
-        The best solution from the solver
-
-        Returns
-        -------
-        x - ndarray
-            The best solution from the solver.
-        """
-        return self._scale_parameters(self.population[0])
-
-    def solve(self):
-        """
-        Runs the DifferentialEvolutionSolver.
-
-        Returns
-        -------
-        res : OptimizeResult
-            The optimization result represented as a ``OptimizeResult`` object.
-            Important attributes are: ``x`` the solution array, ``success`` a
-            Boolean flag indicating if the optimizer exited successfully and
-            ``message`` which describes the cause of the termination. See
-            `OptimizeResult` for a description of other attributes. If polish
-            was employed, then OptimizeResult also contains the ``hess_inv`` and
-            ``jac`` attributes.
-        """
-
-        nfev, nit, warning_flag = 0, 0, False
-        status_message = _status_message['success']
-
-        # calculate energies to start with
-        for index, candidate in enumerate(self.population):
-            parameters = self._scale_parameters(candidate)
-            self.population_energies[index] = self.func(parameters,
-                                                        *self.args)
-            nfev += 1
-
-            if nfev > self.maxfun:
-                warning_flag = True
-                status_message = _status_message['maxfev']
-                break
-
-        minval = np.argmin(self.population_energies)
-
-        # put the lowest energy into the best solution position.
-        lowest_energy = self.population_energies[minval]
-        self.population_energies[minval] = self.population_energies[0]
-        self.population_energies[0] = lowest_energy
-
-        self.population[[0, minval], :] = self.population[[minval, 0], :]
-
-        if warning_flag:
-            return OptimizeResult(
-                           x=self.x,
-                           fun=self.population_energies[0],
-                           nfev=nfev,
-                           nit=nit,
-                           message=status_message,
-                           success=(warning_flag != True))
-
-        # do the optimisation.
-        for nit in range(1, self.maxiter + 1):
-            if self.dither is not None:
-                self.scale = self.random_number_generator.rand(
-                ) * (self.dither[1] - self.dither[0]) + self.dither[0]
-            for candidate in range(np.size(self.population, 0)):
-                if nfev > self.maxfun:
-                    warning_flag = True
-                    status_message = _status_message['maxfev']
-                    break
-
-                trial = self._mutate(candidate)
-                self._ensure_constraint(trial)
-                parameters = self._scale_parameters(trial)
-
-                energy = self.func(parameters, *self.args)
-                nfev += 1
-
-                if energy < self.population_energies[candidate]:
-                    self.population[candidate] = trial
-                    self.population_energies[candidate] = energy
-
-                    if energy < self.population_energies[0]:
-                        self.population_energies[0] = energy
-                        self.population[0] = trial
-
-            # stop when the fractional s.d. of the population is less than tol
-            # of the mean energy
-            convergence = (np.std(self.population_energies) /
-                           np.abs(np.mean(self.population_energies) +
-                                  _MACHEPS))
-
-            if self.disp:
-                print("differential_evolution step %d: f(x)= %g"
-                      % (nit,
-                         self.population_energies[0]))
-
-            if (self.callback and
-                    self.callback(self._scale_parameters(self.population[0]),
-                                  convergence=self.tol / convergence) is True):
-
-                warning_flag = True
-                status_message = ('callback function requested stop early '
-                                  'by returning True')
-                break
-
-            if convergence < self.tol or warning_flag:
-                break
-
-        else:
-            status_message = _status_message['maxiter']
-            warning_flag = True
-
-        DE_result = OptimizeResult(
-            x=self.x,
-            fun=self.population_energies[0],
-            nfev=nfev,
-            nit=nit,
-            message=status_message,
-            success=(warning_flag != True))
-
-        if self.polish:
-            result = minimize(self.func,
-                              np.copy(DE_result.x),
-                              method='L-BFGS-B',
-                              bounds=self.limits.T,
-                              args=self.args)
-
-            nfev += result.nfev
-            DE_result.nfev = nfev
-
-            if result.fun < DE_result.fun:
-                DE_result.fun = result.fun
-                DE_result.x = result.x
-                DE_result.jac = result.jac
-                # to keep internal state consistent
-                self.population_energies[0] = result.fun
-                self.population[0] = self._unscale_parameters(result.x)
-
-        return DE_result
-
-    def _scale_parameters(self, trial):
-        """
-        scale from a number between 0 and 1 to parameters
-        """
-        return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
-
-    def _unscale_parameters(self, parameters):
-        """
-        scale from parameters to a number between 0 and 1.
-        """
-        return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
-
-    def _ensure_constraint(self, trial):
-        """
-        make sure the parameters lie between the limits
-        """
-        for index, param in enumerate(trial):
-            if param > 1 or param < 0:
-                trial[index] = self.random_number_generator.rand()
-
-    def _mutate(self, candidate):
-        """
-        create a trial vector based on a mutation strategy
-        """
-        trial = np.copy(self.population[candidate])
-        parameter_count = np.size(trial, 0)
-
-        fill_point = self.random_number_generator.randint(0, parameter_count)
-
-        if (self.strategy == 'randtobest1exp'
-                or self.strategy == 'randtobest1bin'):
-            bprime = self.mutation_func(candidate,
-                                        self._select_samples(candidate, 5))
-        else:
-            bprime = self.mutation_func(self._select_samples(candidate, 5))
-
-        if self.strategy in self._binomial:
-            crossovers = self.random_number_generator.rand(parameter_count)
-            crossovers = crossovers < self.cross_over_probability
-            # the last one is always from the bprime vector for binomial
-            # If you fill in modulo with a loop you have to set the last one to
-            # true. If you don't use a loop then you can have any random entry
-            # be True.
-            crossovers[fill_point] = True
-            trial = np.where(crossovers, bprime, trial)
-            return trial
-
-        elif self.strategy in self._exponential:
-            i = 0
-            while (i < parameter_count and
-                   self.random_number_generator.rand() <
-                   self.cross_over_probability):
-
-                trial[fill_point] = bprime[fill_point]
-                fill_point = (fill_point + 1) % parameter_count
-                i += 1
-
-            return trial
-
-    def _best1(self, samples):
-        """
-        best1bin, best1exp
-        """
-        r0, r1 = samples[:2]
-        return (self.population[0] + self.scale *
-                (self.population[r0] - self.population[r1]))
-
-    def _rand1(self, samples):
-        """
-        rand1bin, rand1exp
-        """
-        r0, r1, r2 = samples[:3]
-        return (self.population[r0] + self.scale *
-                (self.population[r1] - self.population[r2]))
-
-    def _randtobest1(self, candidate, samples):
-        """
-        randtobest1bin, randtobest1exp
-        """
-        r0, r1 = samples[:2]
-        bprime = np.copy(self.population[candidate])
-        bprime += self.scale * (self.population[0] - bprime)
-        bprime += self.scale * (self.population[r0] -
-                                self.population[r1])
-        return bprime
-
-    def _best2(self, samples):
-        """
-        best2bin, best2exp
-        """
-        r0, r1, r2, r3 = samples[:4]
-        bprime = (self.population[0] + self.scale *
-                            (self.population[r0] + self.population[r1]
-                           - self.population[r2] - self.population[r3]))
-
-        return bprime
-
-    def _rand2(self, samples):
-        """
-        rand2bin, rand2exp
-        """
-        r0, r1, r2, r3, r4 = samples
-        bprime = (self.population[r0] + self.scale *
-                 (self.population[r1] + self.population[r2] -
-                  self.population[r3] - self.population[r4]))
-
-        return bprime
-
-    def _select_samples(self, candidate, number_samples):
-        """
-        obtain random integers from range(np.size(self.population, 0)),
-        without replacement.  You can't have the original candidate either.
-        """
-        idxs = list(range(np.size(self.population, 0)))
-        idxs.remove(candidate)
-        self.random_number_generator.shuffle(idxs)
-        idxs = idxs[:number_samples]
-        return idxs
-
-
-def _make_random_gen(seed):
-    """Turn seed into a np.random.RandomState instance
-
-    If seed is None, return the RandomState singleton used by np.random.
-    If seed is an int, return a new RandomState instance seeded with seed.
-    If seed is already a RandomState instance, return it.
-    Otherwise raise ValueError.
-    """
-    if seed is None or seed is np.random:
-        return np.random.mtrand._rand
-    if isinstance(seed, (numbers.Integral, np.integer)):
-        return np.random.RandomState(seed)
-    if isinstance(seed, np.random.RandomState):
-        return seed
-    raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
-                     ' instance' % seed)
+"""
+differential_evolution: The differential evolution global optimization algorithm
+Added by Andrew Nelson 2014
+"""
+from __future__ import division, print_function, absolute_import
+import numpy as np
+from scipy.optimize import minimize
+from scipy.optimize.optimize import _status_message
+import numbers
+
+__all__ = ['differential_evolution']
+
+_MACHEPS = np.finfo(np.float64).eps
+
+
+#------------------------------------------------------------------------------
+# scipy.optimize does not contain OptimizeResult until 0.14. Include here as a
+# fix for scipy < 0.14.
+
+class OptimizeResult(dict):
+    """ Represents the optimization result.
+    Attributes
+    ----------
+    x : ndarray
+        The solution of the optimization.
+    success : bool
+        Whether or not the optimizer exited successfully.
+    status : int
+        Termination status of the optimizer. Its value depends on the
+        underlying solver. Refer to `message` for details.
+    message : str
+        Description of the cause of the termination.
+    fun, jac, hess, hess_inv : ndarray
+        Values of objective function, Jacobian, Hessian or its inverse (if
+        available). The Hessians may be approximations, see the documentation
+        of the function in question.
+    nfev, njev, nhev : int
+        Number of evaluations of the objective functions and of its
+        Jacobian and Hessian.
+    nit : int
+        Number of iterations performed by the optimizer.
+    maxcv : float
+        The maximum constraint violation.
+    Notes
+    -----
+    There may be additional attributes not listed above depending of the
+    specific solver. Since this class is essentially a subclass of dict
+    with attribute accessors, one can see which attributes are available
+    using the `keys()` method.
+    """
+    def __getattr__(self, name):
+        try:
+            return self[name]
+        except KeyError:
+            raise AttributeError(name)
+
+    __setattr__ = dict.__setitem__
+    __delattr__ = dict.__delitem__
+
+    def __repr__(self):
+        if self.keys():
+            m = max(map(len, list(self.keys()))) + 1
+            return '\n'.join([k.rjust(m) + ': ' + repr(v)
+                              for k, v in self.items()])
+        else:
+            return self.__class__.__name__ + "()"
+#------------------------------------------------------------------------------
+
+
+def differential_evolution(func, bounds, args=(), strategy='best1bin',
+                           maxiter=None, popsize=15, tol=0.01,
+                           mutation=(0.5, 1), recombination=0.7, seed=None,
+                           callback=None, disp=False, polish=True,
+                           init='latinhypercube'):
+    """Finds the global minimum of a multivariate function.
+    Differential Evolution is stochastic in nature (does not use gradient
+    methods) to find the minimium, and can search large areas of candidate
+    space, but often requires larger numbers of function evaluations than
+    conventional gradient based techniques.
+
+    The algorithm is due to Storn and Price [1]_.
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized.  Must be in the form
+        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
+        and ``args`` is a  tuple of any additional fixed parameters needed to
+        completely specify the function.
+    bounds : sequence
+        Bounds for variables.  ``(min, max)`` pairs for each element in ``x``,
+        defining the lower and upper bounds for the optimizing argument of
+        `func`. It is required to have ``len(bounds) == len(x)``.
+        ``len(bounds)`` is used to determine the number of parameters in ``x``.
+    args : tuple, optional
+        Any additional fixed parameters needed to
+        completely specify the objective function.
+    strategy : str, optional
+        The differential evolution strategy to use. Should be one of:
+
+            - 'best1bin'
+            - 'best1exp'
+            - 'rand1exp'
+            - 'randtobest1exp'
+            - 'best2exp'
+            - 'rand2exp'
+            - 'randtobest1bin'
+            - 'best2bin'
+            - 'rand2bin'
+            - 'rand1bin'
+
+        The default is 'best1bin'.
+    maxiter : int, optional
+        The maximum number of times the entire population is evolved.
+        The maximum number of function evaluations is:
+        ``maxiter * popsize * len(x)``
+    popsize : int, optional
+        A multiplier for setting the total population size.  The population has
+        ``popsize * len(x)`` individuals.
+    tol : float, optional
+        When the mean of the population energies, multiplied by tol,
+        divided by the standard deviation of the population energies
+        is greater than 1 the solving process terminates:
+        ``convergence = mean(pop) * tol / stdev(pop) > 1``
+    mutation : float or tuple(float, float), optional
+        The mutation constant.
+        If specified as a float it should be in the range [0, 2].
+        If specified as a tuple ``(min, max)`` dithering is employed. Dithering
+        randomly changes the mutation constant on a generation by generation
+        basis. The mutation constant for that generation is taken from
+        ``U[min, max)``. Dithering can help speed convergence significantly.
+        Increasing the mutation constant increases the search radius, but will
+        slow down convergence.
+    recombination : float, optional
+        The recombination constant, should be in the range [0, 1]. Increasing
+        this value allows a larger number of mutants to progress into the next
+        generation, but at the risk of population stability.
+    seed : int or `np.random.RandomState`, optional
+        If `seed` is not specified the `np.RandomState` singleton is used.
+        If `seed` is an int, a new `np.random.RandomState` instance is used,
+        seeded with seed.
+        If `seed` is already a `np.random.RandomState instance`, then that
+        `np.random.RandomState` instance is used.
+        Specify `seed` for repeatable minimizations.
+    disp : bool, optional
+        Display status messages
+    callback : callable, `callback(xk, convergence=val)`, optional:
+        A function to follow the progress of the minimization. ``xk`` is
+        the current value of ``x0``. ``val`` represents the fractional
+        value of the population convergence.  When ``val`` is greater than one
+        the function halts. If callback returns `True`, then the minimization
+        is halted (any polishing is still carried out).
+    polish : bool, optional
+        If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
+        method is used to polish the best population member at the end, which
+        can improve the minimization slightly.
+    init : string, optional
+        Specify how the population initialization is performed. Should be
+        one of:
+
+            - 'latinhypercube'
+            - 'random'
+
+        The default is 'latinhypercube'. Latin Hypercube sampling tries to
+        maximize coverage of the available parameter space. 'random' initializes
+        the population randomly - this has the drawback that clustering can
+        occur, preventing the whole of parameter space being covered.
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a `OptimizeResult` object.
+        Important attributes are: ``x`` the solution array, ``success`` a
+        Boolean flag indicating if the optimizer exited successfully and
+        ``message`` which describes the cause of the termination. See
+        `OptimizeResult` for a description of other attributes. If `polish`
+        was employed, then OptimizeResult also contains the `jac` attribute.
+
+    Notes
+    -----
+    Differential evolution is a stochastic population based method that is
+    useful for global optimization problems. At each pass through the population
+    the algorithm mutates each candidate solution by mixing with other candidate
+    solutions to create a trial candidate. There are several strategies [2]_ for
+    creating trial candidates, which suit some problems more than others. The
+    'best1bin' strategy is a good starting point for many systems. In this
+    strategy two members of the population are randomly chosen. Their difference
+    is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`,
+    so far:
+
+    .. math::
+
+        b' = b_0 + mutation * (population[rand0] - population[rand1])
+
+    A trial vector is then constructed. Starting with a randomly chosen 'i'th
+    parameter the trial is sequentially filled (in modulo) with parameters from
+    `b'` or the original candidate. The choice of whether to use `b'` or the
+    original candidate is made with a binomial distribution (the 'bin' in
+    'best1bin') - a random number in [0, 1) is generated.  If this number is
+    less than the `recombination` constant then the parameter is loaded from
+    `b'`, otherwise it is loaded from the original candidate.  The final
+    parameter is always loaded from `b'`.  Once the trial candidate is built
+    its fitness is assessed. If the trial is better than the original candidate
+    then it takes its place. If it is also better than the best overall
+    candidate it also replaces that.
+    To improve your chances of finding a global minimum use higher `popsize`
+    values, with higher `mutation` and (dithering), but lower `recombination`
+    values. This has the effect of widening the search radius, but slowing
+    convergence.
+
+    .. versionadded:: 0.15.0
+
+    Examples
+    --------
+    Let us consider the problem of minimizing the Rosenbrock function. This
+    function is implemented in `rosen` in `scipy.optimize`.
+
+    >>> from scipy.optimize import rosen, differential_evolution
+    >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
+    >>> result = differential_evolution(rosen, bounds)
+    >>> result.x, result.fun
+    (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
+
+    Next find the minimum of the Ackley function
+    (http://en.wikipedia.org/wiki/Test_functions_for_optimization).
+
+    >>> from scipy.optimize import differential_evolution
+    >>> import numpy as np
+    >>> def ackley(x):
+    ...     arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
+    ...     arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
+    ...     return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
+    >>> bounds = [(-5, 5), (-5, 5)]
+    >>> result = differential_evolution(ackley, bounds)
+    >>> result.x, result.fun
+    (array([ 0.,  0.]), 4.4408920985006262e-16)
+
+    References
+    ----------
+    .. [1] Storn, R and Price, K, Differential Evolution - a Simple and
+           Efficient Heuristic for Global Optimization over Continuous Spaces,
+           Journal of Global Optimization, 1997, 11, 341 - 359.
+    .. [2] http://www1.icsi.berkeley.edu/~storn/code.html
+    .. [3] http://en.wikipedia.org/wiki/Differential_evolution
+    """
+
+    solver = DifferentialEvolutionSolver(func, bounds, args=args,
+                                         strategy=strategy, maxiter=maxiter,
+                                         popsize=popsize, tol=tol,
+                                         mutation=mutation,
+                                         recombination=recombination,
+                                         seed=seed, polish=polish,
+                                         callback=callback,
+                                         disp=disp,
+                                         init=init)
+    return solver.solve()
+
+
+class DifferentialEvolutionSolver(object):
+
+    """This class implements the differential evolution solver
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized.  Must be in the form
+        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
+        and ``args`` is a  tuple of any additional fixed parameters needed to
+        completely specify the function.
+    bounds : sequence
+        Bounds for variables.  ``(min, max)`` pairs for each element in ``x``,
+        defining the lower and upper bounds for the optimizing argument of
+        `func`. It is required to have ``len(bounds) == len(x)``.
+        ``len(bounds)`` is used to determine the number of parameters in ``x``.
+    args : tuple, optional
+        Any additional fixed parameters needed to
+        completely specify the objective function.
+    strategy : str, optional
+        The differential evolution strategy to use. Should be one of:
+
+            - 'best1bin'
+            - 'best1exp'
+            - 'rand1exp'
+            - 'randtobest1exp'
+            - 'best2exp'
+            - 'rand2exp'
+            - 'randtobest1bin'
+            - 'best2bin'
+            - 'rand2bin'
+            - 'rand1bin'
+
+        The default is 'best1bin'
+
+    maxiter : int, optional
+        The maximum number of times the entire population is evolved. The
+        maximum number of function evaluations is:
+        ``maxiter * popsize * len(x)``
+    popsize : int, optional
+        A multiplier for setting the total population size.  The population has
+        ``popsize * len(x)`` individuals.
+    tol : float, optional
+        When the mean of the population energies, multiplied by tol,
+        divided by the standard deviation of the population energies
+        is greater than 1 the solving process terminates:
+        ``convergence = mean(pop) * tol / stdev(pop) > 1``
+    mutation : float or tuple(float, float), optional
+        The mutation constant.
+        If specified as a float it should be in the range [0, 2].
+        If specified as a tuple ``(min, max)`` dithering is employed. Dithering
+        randomly changes the mutation constant on a generation by generation
+        basis. The mutation constant for that generation is taken from
+        U[min, max). Dithering can help speed convergence significantly.
+        Increasing the mutation constant increases the search radius, but will
+        slow down convergence.
+    recombination : float, optional
+        The recombination constant, should be in the range [0, 1]. Increasing
+        this value allows a larger number of mutants to progress into the next
+        generation, but at the risk of population stability.
+    seed : int or `np.random.RandomState`, optional
+        If `seed` is not specified the `np.random.RandomState` singleton is
+        used.
+        If `seed` is an int, a new `np.random.RandomState` instance is used,
+        seeded with `seed`.
+        If `seed` is already a `np.random.RandomState` instance, then that
+        `np.random.RandomState` instance is used.
+        Specify `seed` for repeatable minimizations.
+    disp : bool, optional
+        Display status messages
+    callback : callable, `callback(xk, convergence=val)`, optional
+        A function to follow the progress of the minimization. ``xk`` is
+        the current value of ``x0``. ``val`` represents the fractional
+        value of the population convergence.  When ``val`` is greater than one
+        the function halts. If callback returns `True`, then the minimization
+        is halted (any polishing is still carried out).
+    polish : bool, optional
+        If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method
+        is used to polish the best population member at the end. This requires
+        a few more function evaluations.
+    maxfun : int, optional
+        Set the maximum number of function evaluations. However, it probably
+        makes more sense to set `maxiter` instead.
+    init : string, optional
+        Specify which type of population initialization is performed. Should be
+        one of:
+
+            - 'latinhypercube'
+            - 'random'
+    """
+
+    # Dispatch of mutation strategy method (binomial or exponential).
+    _binomial = {'best1bin': '_best1',
+                 'randtobest1bin': '_randtobest1',
+                 'best2bin': '_best2',
+                 'rand2bin': '_rand2',
+                 'rand1bin': '_rand1'}
+    _exponential = {'best1exp': '_best1',
+                    'rand1exp': '_rand1',
+                    'randtobest1exp': '_randtobest1',
+                    'best2exp': '_best2',
+                    'rand2exp': '_rand2'}
+
+    def __init__(self, func, bounds, args=(),
+                 strategy='best1bin', maxiter=None, popsize=15,
+                 tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
+                 maxfun=None, callback=None, disp=False, polish=True,
+                 init='latinhypercube'):
+
+        if strategy in self._binomial:
+            self.mutation_func = getattr(self, self._binomial[strategy])
+        elif strategy in self._exponential:
+            self.mutation_func = getattr(self, self._exponential[strategy])
+        else:
+            raise ValueError("Please select a valid mutation strategy")
+        self.strategy = strategy
+
+        self.callback = callback
+        self.polish = polish
+        self.tol = tol
+
+        #Mutation constant should be in [0, 2). If specified as a sequence
+        #then dithering is performed.
+        self.scale = mutation
+        if (not np.all(np.isfinite(mutation)) or
+                np.any(np.array(mutation) >= 2) or
+                np.any(np.array(mutation) < 0)):
+            raise ValueError('The mutation constant must be a float in '
+                             'U[0, 2), or specified as a tuple(min, max)'
+                             ' where min < max and min, max are in U[0, 2).')
+
+        self.dither = None
+        if hasattr(mutation, '__iter__') and len(mutation) > 1:
+            self.dither = [mutation[0], mutation[1]]
+            self.dither.sort()
+
+        self.cross_over_probability = recombination
+
+        self.func = func
+        self.args = args
+
+        # convert tuple of lower and upper bounds to limits
+        # [(low_0, high_0), ..., (low_n, high_n]
+        #     -> [[low_0, ..., low_n], [high_0, ..., high_n]]
+        self.limits = np.array(bounds, dtype='float').T
+        if (np.size(self.limits, 0) != 2
+                or not np.all(np.isfinite(self.limits))):
+            raise ValueError('bounds should be a sequence containing '
+                             'real valued (min, max) pairs for each value'
+                             ' in x')
+
+        self.maxiter = maxiter or 1000
+        self.maxfun = (maxfun or ((self.maxiter + 1) * popsize *
+                                  np.size(self.limits, 1)))
+
+        # population is scaled to between [0, 1].
+        # We have to scale between parameter <-> population
+        # save these arguments for _scale_parameter and
+        # _unscale_parameter. This is an optimization
+        self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
+        self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
+
+        parameter_count = np.size(self.limits, 1)
+        self.random_number_generator = _make_random_gen(seed)
+
+        #default initialization is a latin hypercube design, but there
+        #are other population initializations possible.
+        self.population = np.zeros((popsize * parameter_count,
+                                    parameter_count))
+        if init == 'latinhypercube':
+            self.init_population_lhs()
+        elif init == 'random':
+            self.init_population_random()
+        else:
+            raise ValueError("The population initialization method must be one"
+                             "of 'latinhypercube' or 'random'")
+
+        self.population_energies = np.ones(
+            popsize * parameter_count) * np.inf
+
+        self.disp = disp
+
+    def init_population_lhs(self):
+        """
+        Initializes the population with Latin Hypercube Sampling
+        Latin Hypercube Sampling ensures that the sampling of parameter space
+        is maximised.
+        """
+        samples = np.size(self.population, 0)
+        N = np.size(self.population, 1)
+        rng = self.random_number_generator
+
+        # Generate the intervals
+        segsize = 1.0 / samples
+
+        # Fill points uniformly in each interval
+        rdrange = rng.rand(samples, N) * segsize
+        rdrange += np.atleast_2d(np.arange(0., 1., segsize)).T
+
+        # Make the random pairings
+        self.population = np.zeros_like(rdrange)
+
+        for j in range(N):
+            order = rng.permutation(range(samples))
+            self.population[:, j] = rdrange[order, j]
+
+    def init_population_random(self):
+        """
+        Initialises the population at random.  This type of initialization
+        can possess clustering, Latin Hypercube sampling is generally better.
+        """
+        rng = self.random_number_generator
+        self.population = rng.random_sample(self.population.shape)
+
+    @property
+    def x(self):
+        """
+        The best solution from the solver
+
+        Returns
+        -------
+        x - ndarray
+            The best solution from the solver.
+        """
+        return self._scale_parameters(self.population[0])
+
+    def solve(self):
+        """
+        Runs the DifferentialEvolutionSolver.
+
+        Returns
+        -------
+        res : OptimizeResult
+            The optimization result represented as a ``OptimizeResult`` object.
+            Important attributes are: ``x`` the solution array, ``success`` a
+            Boolean flag indicating if the optimizer exited successfully and
+            ``message`` which describes the cause of the termination. See
+            `OptimizeResult` for a description of other attributes. If polish
+            was employed, then OptimizeResult also contains the ``hess_inv`` and
+            ``jac`` attributes.
+        """
+
+        nfev, nit, warning_flag = 0, 0, False
+        status_message = _status_message['success']
+
+        # calculate energies to start with
+        for index, candidate in enumerate(self.population):
+            parameters = self._scale_parameters(candidate)
+            self.population_energies[index] = self.func(parameters,
+                                                        *self.args)
+            nfev += 1
+
+            if nfev > self.maxfun:
+                warning_flag = True
+                status_message = _status_message['maxfev']
+                break
+
+        minval = np.argmin(self.population_energies)
+
+        # put the lowest energy into the best solution position.
+        lowest_energy = self.population_energies[minval]
+        self.population_energies[minval] = self.population_energies[0]
+        self.population_energies[0] = lowest_energy
+
+        self.population[[0, minval], :] = self.population[[minval, 0], :]
+
+        if warning_flag:
+            return OptimizeResult(
+                           x=self.x,
+                           fun=self.population_energies[0],
+                           nfev=nfev,
+                           nit=nit,
+                           message=status_message,
+                           success=(warning_flag != True))
+
+        # do the optimisation.
+        for nit in range(1, self.maxiter + 1):
+            if self.dither is not None:
+                self.scale = self.random_number_generator.rand(
+                ) * (self.dither[1] - self.dither[0]) + self.dither[0]
+            for candidate in range(np.size(self.population, 0)):
+                if nfev > self.maxfun:
+                    warning_flag = True
+                    status_message = _status_message['maxfev']
+                    break
+
+                trial = self._mutate(candidate)
+                self._ensure_constraint(trial)
+                parameters = self._scale_parameters(trial)
+
+                energy = self.func(parameters, *self.args)
+                nfev += 1
+
+                if energy < self.population_energies[candidate]:
+                    self.population[candidate] = trial
+                    self.population_energies[candidate] = energy
+
+                    if energy < self.population_energies[0]:
+                        self.population_energies[0] = energy
+                        self.population[0] = trial
+
+            # stop when the fractional s.d. of the population is less than tol
+            # of the mean energy
+            convergence = (np.std(self.population_energies) /
+                           np.abs(np.mean(self.population_energies) +
+                                  _MACHEPS))
+
+            if self.disp:
+                print("differential_evolution step %d: f(x)= %g"
+                      % (nit,
+                         self.population_energies[0]))
+
+            if (self.callback and
+                    self.callback(self._scale_parameters(self.population[0]),
+                                  convergence=self.tol / convergence) is True):
+
+                warning_flag = True
+                status_message = ('callback function requested stop early '
+                                  'by returning True')
+                break
+
+            if convergence < self.tol or warning_flag:
+                break
+
+        else:
+            status_message = _status_message['maxiter']
+            warning_flag = True
+
+        DE_result = OptimizeResult(
+            x=self.x,
+            fun=self.population_energies[0],
+            nfev=nfev,
+            nit=nit,
+            message=status_message,
+            success=(warning_flag != True))
+
+        if self.polish:
+            result = minimize(self.func,
+                              np.copy(DE_result.x),
+                              method='L-BFGS-B',
+                              bounds=self.limits.T,
+                              args=self.args)
+
+            nfev += result.nfev
+            DE_result.nfev = nfev
+
+            if result.fun < DE_result.fun:
+                DE_result.fun = result.fun
+                DE_result.x = result.x
+                DE_result.jac = result.jac
+                # to keep internal state consistent
+                self.population_energies[0] = result.fun
+                self.population[0] = self._unscale_parameters(result.x)
+
+        return DE_result
+
+    def _scale_parameters(self, trial):
+        """
+        scale from a number between 0 and 1 to parameters
+        """
+        return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
+
+    def _unscale_parameters(self, parameters):
+        """
+        scale from parameters to a number between 0 and 1.
+        """
+        return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
+
+    def _ensure_constraint(self, trial):
+        """
+        make sure the parameters lie between the limits
+        """
+        for index, param in enumerate(trial):
+            if param > 1 or param < 0:
+                trial[index] = self.random_number_generator.rand()
+
+    def _mutate(self, candidate):
+        """
+        create a trial vector based on a mutation strategy
+        """
+        trial = np.copy(self.population[candidate])
+        parameter_count = np.size(trial, 0)
+
+        fill_point = self.random_number_generator.randint(0, parameter_count)
+
+        if (self.strategy == 'randtobest1exp'
+                or self.strategy == 'randtobest1bin'):
+            bprime = self.mutation_func(candidate,
+                                        self._select_samples(candidate, 5))
+        else:
+            bprime = self.mutation_func(self._select_samples(candidate, 5))
+
+        if self.strategy in self._binomial:
+            crossovers = self.random_number_generator.rand(parameter_count)
+            crossovers = crossovers < self.cross_over_probability
+            # the last one is always from the bprime vector for binomial
+            # If you fill in modulo with a loop you have to set the last one to
+            # true. If you don't use a loop then you can have any random entry
+            # be True.
+            crossovers[fill_point] = True
+            trial = np.where(crossovers, bprime, trial)
+            return trial
+
+        elif self.strategy in self._exponential:
+            i = 0
+            while (i < parameter_count and
+                   self.random_number_generator.rand() <
+                   self.cross_over_probability):
+
+                trial[fill_point] = bprime[fill_point]
+                fill_point = (fill_point + 1) % parameter_count
+                i += 1
+
+            return trial
+
+    def _best1(self, samples):
+        """
+        best1bin, best1exp
+        """
+        r0, r1 = samples[:2]
+        return (self.population[0] + self.scale *
+                (self.population[r0] - self.population[r1]))
+
+    def _rand1(self, samples):
+        """
+        rand1bin, rand1exp
+        """
+        r0, r1, r2 = samples[:3]
+        return (self.population[r0] + self.scale *
+                (self.population[r1] - self.population[r2]))
+
+    def _randtobest1(self, candidate, samples):
+        """
+        randtobest1bin, randtobest1exp
+        """
+        r0, r1 = samples[:2]
+        bprime = np.copy(self.population[candidate])
+        bprime += self.scale * (self.population[0] - bprime)
+        bprime += self.scale * (self.population[r0] -
+                                self.population[r1])
+        return bprime
+
+    def _best2(self, samples):
+        """
+        best2bin, best2exp
+        """
+        r0, r1, r2, r3 = samples[:4]
+        bprime = (self.population[0] + self.scale *
+                            (self.population[r0] + self.population[r1]
+                           - self.population[r2] - self.population[r3]))
+
+        return bprime
+
+    def _rand2(self, samples):
+        """
+        rand2bin, rand2exp
+        """
+        r0, r1, r2, r3, r4 = samples
+        bprime = (self.population[r0] + self.scale *
+                 (self.population[r1] + self.population[r2] -
+                  self.population[r3] - self.population[r4]))
+
+        return bprime
+
+    def _select_samples(self, candidate, number_samples):
+        """
+        obtain random integers from range(np.size(self.population, 0)),
+        without replacement.  You can't have the original candidate either.
+        """
+        idxs = list(range(np.size(self.population, 0)))
+        idxs.remove(candidate)
+        self.random_number_generator.shuffle(idxs)
+        idxs = idxs[:number_samples]
+        return idxs
+
+
+def _make_random_gen(seed):
+    """Turn seed into a np.random.RandomState instance
+
+    If seed is None, return the RandomState singleton used by np.random.
+    If seed is an int, return a new RandomState instance seeded with seed.
+    If seed is already a RandomState instance, return it.
+    Otherwise raise ValueError.
+    """
+    if seed is None or seed is np.random:
+        return np.random.mtrand._rand
+    if isinstance(seed, (numbers.Integral, np.integer)):
+        return np.random.RandomState(seed)
+    if isinstance(seed, np.random.RandomState):
+        return seed
+    raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
+                     ' instance' % seed)
diff --git a/lmfit/_version.py b/lmfit/_version.py
index b0cc39e..fa233a4 100644
--- a/lmfit/_version.py
+++ b/lmfit/_version.py
@@ -4,8 +4,8 @@
 # unpacked source archive. Distribution tarballs contain a pre-generated copy
 # of this file.
 
-version_version = '0.9.2'
-version_full = '208871db96689c911e00b9d59dace2de68df3220'
+version_version = '0.9.3'
+version_full = '8a4eb4f5675628bc8ee2d0da70b9e9397f80c280'
 def get_versions(default={}, verbose=False):
     return {'version': version_version, 'full': version_full}
 
diff --git a/lmfit/asteval.py b/lmfit/asteval.py
index 87daadc..e8409aa 100644
--- a/lmfit/asteval.py
+++ b/lmfit/asteval.py
@@ -1,802 +1,804 @@
-"""
-Safe(ish) evaluator of python expressions, using ast module.
-The emphasis here is on mathematical expressions, and so
-numpy functions are imported if available and used.
-
-Symbols are held in the Interpreter symtable -- a simple
-dictionary supporting a simple, flat namespace.
-
-Expressions can be compiled into ast node and then evaluated
-later, using the current values in the
-"""
-
-from __future__ import division, print_function
-from sys import exc_info, stdout, version_info
-import ast
-import math
-
-from .astutils import (FROM_PY, FROM_MATH, FROM_NUMPY, UNSAFE_ATTRS,
-                       LOCALFUNCS, NUMPY_RENAMES, op2func,
-                       ExceptionHolder, ReturnedNone, valid_symbol_name)
-
-HAS_NUMPY = False
-try:
-    import numpy
-    HAS_NUMPY = True
-except ImportError:
-    print("Warning: numpy not available... functionality will be limited.")
-
-
-class Interpreter:
-    """mathematical expression compiler and interpreter.
-
-    This module compiles expressions and statements to AST representation,
-    using python's ast module, and then executes the AST representation
-    using a dictionary of named object (variable, functions).
-
-    The result is a restricted, simplified version of Python meant for
-    numerical caclulations that is somewhat safer than 'eval' because some
-    operations (such as 'import' and 'eval') are simply not allowed.  The
-    resulting language uses a flat namespace that works on Python objects,
-    but does not allow new classes to be defined.
-
-    Many parts of Python syntax are supported, including:
-        for loops, while loops, if-then-elif-else conditionals
-        try-except (including 'finally')
-        function definitions with def
-        advanced slicing:    a[::-1], array[-3:, :, ::2]
-        if-expressions:      out = one_thing if TEST else other
-        list comprehension   out = [sqrt(i) for i in values]
-
-    The following Python syntax elements are not supported:
-        Import, Exec, Lambda, Class, Global, Generators,
-        Yield, Decorators
-
-    In addition, while many builtin functions are supported, several
-    builtin functions are missing ('eval', 'exec', and 'getattr' for
-    example) that can be considered unsafe.
-
-    If numpy is installed, many numpy functions are also imported.
-
-    """
-
-    supported_nodes = ('arg', 'assert', 'assign', 'attribute', 'augassign',
-                       'binop', 'boolop', 'break', 'call', 'compare',
-                       'continue', 'delete', 'dict', 'ellipsis',
-                       'excepthandler', 'expr', 'extslice', 'for',
-                       'functiondef', 'if', 'ifexp', 'index', 'interrupt',
-                       'list', 'listcomp', 'module', 'name', 'num', 'pass',
-                       'print', 'raise', 'repr', 'return', 'slice', 'str',
-                       'subscript', 'try', 'tuple', 'unaryop', 'while')
-
-    def __init__(self, symtable=None, writer=None, use_numpy=True):
-        self.writer = writer or stdout
-
-        if symtable is None:
-            symtable = {}
-        self.symtable = symtable
-        self._interrupt = None
-        self.error = []
-        self.error_msg = None
-        self.expr = None
-        self.retval = None
-        self.lineno = 0
-        self.use_numpy = HAS_NUMPY and use_numpy
-
-        symtable['print'] = self._printer
-
-        # add python symbols
-        py_symtable = {sym: __builtins__[sym] for sym in FROM_PY
-                              if sym in __builtins__}
-        symtable.update(py_symtable)
-
-        # add local symbols
-        local_symtable = {sym: obj for (sym, obj) in LOCALFUNCS.items()}
-        symtable.update(local_symtable)
-
-        # add math symbols
-        math_symtable = {sym: getattr(math, sym) for sym in FROM_MATH
-                              if hasattr(math, sym)}
-        symtable.update(math_symtable)
-
-        # add numpy symbols
-        if self.use_numpy:
-            numpy_symtable = {sym: getattr(numpy, sym) for sym in FROM_NUMPY
-                              if hasattr(numpy, sym)}
-            symtable.update(numpy_symtable)
-
-            npy_rename_symtable = {name: getattr(numpy, sym) for name, sym
-                                   in NUMPY_RENAMES.items()
-                                   if hasattr(numpy, sym)}
-            symtable.update(npy_rename_symtable)
-
-        self.node_handlers = dict(((node, getattr(self, "on_%s" % node))
-                                   for node in self.supported_nodes))
-
-        # to rationalize try/except try/finally for Python2.6 through Python3.3
-        self.node_handlers['tryexcept'] = self.node_handlers['try']
-        self.node_handlers['tryfinally'] = self.node_handlers['try']
-
-        self.no_deepcopy = [key for key, val in symtable.items()
-                            if (callable(val)
-                                or 'numpy.lib.index_tricks' in repr(val))]
-
-    def user_defined_symbols(self):
-        """
-        Return a set of symbols that have been added to symtable after
-        construction. I.e. the symbols from self.symtable that are not in
-        self.no_deepcopy.
-
-        Returns
-        -------
-        unique_symbols : set
-            symbols in symtable that are not in self.no_deepcopy
-        """
-        sym_in_current = set(self.symtable.keys())
-        sym_from_construction = set(self.no_deepcopy)
-        unique_symbols = sym_in_current.difference(sym_from_construction)
-        return unique_symbols
-
-    def unimplemented(self, node):
-        "unimplemented nodes"
-        self.raise_exception(node, exc=NotImplementedError,
-                             msg="'%s' not supported" %
-                             (node.__class__.__name__))
-
-    def raise_exception(self, node, exc=None, msg='', expr=None,
-                        lineno=None):
-        "add an exception"
-        if self.error is None:
-            self.error = []
-        if expr is None:
-            expr = self.expr
-        if len(self.error) > 0 and not isinstance(node, ast.Module):
-            msg = '%s' % msg
-        err = ExceptionHolder(node, exc=exc, msg=msg, expr=expr, lineno=lineno)
-        self._interrupt = ast.Break()
-        self.error.append(err)
-        if self.error_msg is None:
-            self.error_msg = "%s in expr='%s'" % (msg, self.expr)
-        elif len(msg) > 0:
-            self.error_msg = "%s\n %s" % (self.error_msg, msg)
-        if exc is None:
-            try:
-                exc = self.error[0].exc
-            except:
-                exc = RuntimeError
-        raise exc(self.error_msg)
-
-
-    # main entry point for Ast node evaluation
-    #  parse:  text of statements -> ast
-    #  run:    ast -> result
-    #  eval:   string statement -> result = run(parse(statement))
-    def parse(self, text):
-        """parse statement/expression to Ast representation"""
-        self.expr = text
-        try:
-            return ast.parse(text)
-        except SyntaxError:
-            self.raise_exception(None, msg='Syntax Error', expr=text)
-        except:
-            self.raise_exception(None, msg='Runtime Error', expr=text)
-
-    def run(self, node, expr=None, lineno=None, with_raise=True):
-        """executes parsed Ast representation for an expression"""
-        # Note: keep the 'node is None' test: internal code here may run
-        #    run(None) and expect a None in return.
-        if len(self.error) > 0:
-            return
-        if node is None:
-            return None
-        if isinstance(node, str):
-            node = self.parse(node)
-        if lineno is not None:
-            self.lineno = lineno
-        if expr is not None:
-            self.expr = expr
-
-        # get handler for this node:
-        #   on_xxx with handle nodes of type 'xxx', etc
-        try:
-            handler = self.node_handlers[node.__class__.__name__.lower()]
-        except KeyError:
-            return self.unimplemented(node)
-
-        # run the handler:  this will likely generate
-        # recursive calls into this run method.
-        try:
-            ret = handler(node)
-            if isinstance(ret, enumerate):
-                ret = list(ret)
-            return ret
-        except:
-            if with_raise:
-                self.raise_exception(node, expr=expr)
-
-    def __call__(self, expr, **kw):
-        return self.eval(expr, **kw)
-
-    def eval(self, expr, lineno=0, show_errors=True):
-        """evaluates a single statement"""
-        self.lineno = lineno
-        self.error = []
-        try:
-            node = self.parse(expr)
-        except:
-            errmsg = exc_info()[1]
-            if len(self.error) > 0:
-                errmsg = "\n".join(self.error[0].get_error())
-            if not show_errors:
-                try:
-                    exc = self.error[0].exc
-                except:
-                    exc = RuntimeError
-                raise exc(errmsg)
-            print(errmsg, file=self.writer)
-            return
-        try:
-            return self.run(node, expr=expr, lineno=lineno)
-        except:
-            errmsg = exc_info()[1]
-            if len(self.error) > 0:
-                errmsg = "\n".join(self.error[0].get_error())
-            if not show_errors:
-                try:
-                    exc = self.error[0].exc
-                except:
-                    exc = RuntimeError
-                raise exc(errmsg)
-            print(errmsg, file=self.writer)
-            return
-
-    def dump(self, node, **kw):
-        "simple ast dumper"
-        return ast.dump(node, **kw)
-
-    # handlers for ast components
-    def on_expr(self, node):
-        "expression"
-        return self.run(node.value)  # ('value',)
-
-    def on_index(self, node):
-        "index"
-        return self.run(node.value)  # ('value',)
-
-    def on_return(self, node):  # ('value',)
-        "return statement: look for None, return special sentinal"
-        self.retval = self.run(node.value)
-        if self.retval is None:
-            self.retval = ReturnedNone
-        return
-
-    def on_repr(self, node):
-        "repr "
-        return repr(self.run(node.value))  # ('value',)
-
-    def on_module(self, node):    # ():('body',)
-        "module def"
-        out = None
-        for tnode in node.body:
-            out = self.run(tnode)
-        return out
-
-    def on_pass(self, node):
-        "pass statement"
-        return None  # ()
-
-    def on_ellipsis(self, node):
-        "ellipses"
-        return Ellipsis
-
-    # for break and continue: set the instance variable _interrupt
-    def on_interrupt(self, node):    # ()
-        "interrupt handler"
-        self._interrupt = node
-        return node
-
-    def on_break(self, node):
-        "break"
-        return self.on_interrupt(node)
-
-    def on_continue(self, node):
-        "continue"
-        return self.on_interrupt(node)
-
-    def on_assert(self, node):    # ('test', 'msg')
-        "assert statement"
-        if not self.run(node.test):
-            self.raise_exception(node, exc=AssertionError, msg=node.msg)
-        return True
-
-    def on_list(self, node):    # ('elt', 'ctx')
-        "list"
-        return [self.run(e) for e in node.elts]
-
-    def on_tuple(self, node):    # ('elts', 'ctx')
-        "tuple"
-        return tuple(self.on_list(node))
-
-    def on_dict(self, node):    # ('keys', 'values')
-        "dictionary"
-        return dict([(self.run(k), self.run(v)) for k, v in
-                     zip(node.keys, node.values)])
-
-    def on_num(self, node):   # ('n',)
-        'return number'
-        return node.n
-
-    def on_str(self, node):   # ('s',)
-        'return string'
-        return node.s
-
-    def on_name(self, node):    # ('id', 'ctx')
-        """ Name node """
-        ctx = node.ctx.__class__
-        if ctx in (ast.Param, ast.Del):
-            return str(node.id)
-        else:
-            if node.id in self.symtable:
-                return self.symtable[node.id]
-            else:
-                msg = "name '%s' is not defined" % node.id
-                self.raise_exception(node, exc=NameError, msg=msg)
-
-    def node_assign(self, node, val):
-        """here we assign a value (not the node.value object) to a node
-        this is used by on_assign, but also by for, list comprehension, etc.
-        """
-        if node.__class__ == ast.Name:
-            if not valid_symbol_name(node.id):
-                errmsg = "invalid symbol name (reserved word?) %s" % node.id
-                self.raise_exception(node, exc=NameError, msg=errmsg)
-            sym = self.symtable[node.id] = val
-            if node.id in self.no_deepcopy:
-                self.no_deepcopy.pop(node.id)
-
-        elif node.__class__ == ast.Attribute:
-            if node.ctx.__class__ == ast.Load:
-                msg = "cannot assign to attribute %s" % node.attr
-                self.raise_exception(node, exc=AttributeError, msg=msg)
-
-            setattr(self.run(node.value), node.attr, val)
-
-        elif node.__class__ == ast.Subscript:
-            sym = self.run(node.value)
-            xslice = self.run(node.slice)
-            if isinstance(node.slice, ast.Index):
-                sym[xslice] = val
-            elif isinstance(node.slice, ast.Slice):
-                sym[slice(xslice.start, xslice.stop)] = val
-            elif isinstance(node.slice, ast.ExtSlice):
-                sym[(xslice)] = val
-        elif node.__class__ in (ast.Tuple, ast.List):
-            if len(val) == len(node.elts):
-                for telem, tval in zip(node.elts, val):
-                    self.node_assign(telem, tval)
-            else:
-                raise ValueError('too many values to unpack')
-
-    def on_attribute(self, node):    # ('value', 'attr', 'ctx')
-        "extract attribute"
-        ctx = node.ctx.__class__
-        if ctx == ast.Store:
-            msg = "attribute for storage: shouldn't be here!"
-            self.raise_exception(node, exc=RuntimeError, msg=msg)
-
-        sym = self.run(node.value)
-        if ctx == ast.Del:
-            return delattr(sym, node.attr)
-
-        # ctx is ast.Load
-        fmt = "cannnot access attribute '%s' for %s"
-        if node.attr not in UNSAFE_ATTRS:
-            fmt = "no attribute '%s' for %s"
-            try:
-                return getattr(sym, node.attr)
-            except AttributeError:
-                pass
-
-        # AttributeError or accessed unsafe attribute
-        obj = self.run(node.value)
-        msg = fmt % (node.attr, obj)
-        self.raise_exception(node, exc=AttributeError, msg=msg)
-
-    def on_assign(self, node):    # ('targets', 'value')
-        "simple assignment"
-        val = self.run(node.value)
-        for tnode in node.targets:
-            self.node_assign(tnode, val)
-        return
-
-    def on_augassign(self, node):    # ('target', 'op', 'value')
-        "augmented assign"
-        return self.on_assign(ast.Assign(targets=[node.target],
-                                         value=ast.BinOp(left=node.target,
-                                                         op=node.op,
-                                                         right=node.value)))
-
-    def on_slice(self, node):    # ():('lower', 'upper', 'step')
-        "simple slice"
-        return slice(self.run(node.lower),
-                     self.run(node.upper),
-                     self.run(node.step))
-
-    def on_extslice(self, node):    # ():('dims',)
-        "extended slice"
-        return tuple([self.run(tnode) for tnode in node.dims])
-
-    def on_subscript(self, node):    # ('value', 'slice', 'ctx')
-        "subscript handling -- one of the tricky parts"
-        val = self.run(node.value)
-        nslice = self.run(node.slice)
-        ctx = node.ctx.__class__
-        if ctx in (ast.Load, ast.Store):
-            if isinstance(node.slice, (ast.Index, ast.Slice, ast.Ellipsis)):
-                return val.__getitem__(nslice)
-            elif isinstance(node.slice, ast.ExtSlice):
-                return val[(nslice)]
-        else:
-            msg = "subscript with unknown context"
-            self.raise_exception(node, msg=msg)
-
-    def on_delete(self, node):    # ('targets',)
-        "delete statement"
-        for tnode in node.targets:
-            if tnode.ctx.__class__ != ast.Del:
-                break
-            children = []
-            while tnode.__class__ == ast.Attribute:
-                children.append(tnode.attr)
-                tnode = tnode.value
-
-            if tnode.__class__ == ast.Name:
-                children.append(tnode.id)
-                children.reverse()
-                self.symtable.pop('.'.join(children))
-            else:
-                msg = "could not delete symbol"
-                self.raise_exception(node, msg=msg)
-
-    def on_unaryop(self, node):    # ('op', 'operand')
-        "unary operator"
-        return op2func(node.op)(self.run(node.operand))
-
-    def on_binop(self, node):    # ('left', 'op', 'right')
-        "binary operator"
-        return op2func(node.op)(self.run(node.left),
-                                self.run(node.right))
-
-    def on_boolop(self, node):    # ('op', 'values')
-        "boolean operator"
-        val = self.run(node.values[0])
-        is_and = ast.And == node.op.__class__
-        if (is_and and val) or (not is_and and not val):
-            for n in node.values:
-                val = op2func(node.op)(val, self.run(n))
-                if (is_and and not val) or (not is_and and val):
-                    break
-        return val
-
-    def on_compare(self, node):    # ('left', 'ops', 'comparators')
-        "comparison operators"
-        lval = self.run(node.left)
-        out = True
-        for op, rnode in zip(node.ops, node.comparators):
-            rval = self.run(rnode)
-            out = op2func(op)(lval, rval)
-            lval = rval
-            if self.use_numpy and isinstance(out, numpy.ndarray) and out.any():
-                break
-            elif not out:
-                break
-        return out
-
-    def on_print(self, node):    # ('dest', 'values', 'nl')
-        """ note: implements Python2 style print statement, not
-        print() function.  May need improvement...."""
-        dest = self.run(node.dest) or self.writer
-        end = ''
-        if node.nl:
-            end = '\n'
-        out = [self.run(tnode) for tnode in node.values]
-        if out and len(self.error) == 0:
-            self._printer(*out, file=dest, end=end)
-
-    def _printer(self, *out, **kws):
-        "generic print function"
-        flush = kws.pop('flush', True)
-        fileh = kws.pop('file', self.writer)
-        sep = kws.pop('sep', ' ')
-        end = kws.pop('sep', '\n')
-
-        print(*out, file=fileh, sep=sep, end=end)
-        if flush:
-            fileh.flush()
-
-    def on_if(self, node):    # ('test', 'body', 'orelse')
-        "regular if-then-else statement"
-        block = node.body
-        if not self.run(node.test):
-            block = node.orelse
-        for tnode in block:
-            self.run(tnode)
-
-    def on_ifexp(self, node):    # ('test', 'body', 'orelse')
-        "if expressions"
-        expr = node.orelse
-        if self.run(node.test):
-            expr = node.body
-        return self.run(expr)
-
-    def on_while(self, node):    # ('test', 'body', 'orelse')
-        "while blocks"
-        while self.run(node.test):
-            self._interrupt = None
-            for tnode in node.body:
-                self.run(tnode)
-                if self._interrupt is not None:
-                    break
-            if isinstance(self._interrupt, ast.Break):
-                break
-        else:
-            for tnode in node.orelse:
-                self.run(tnode)
-        self._interrupt = None
-
-    def on_for(self, node):    # ('target', 'iter', 'body', 'orelse')
-        "for blocks"
-        for val in self.run(node.iter):
-            self.node_assign(node.target, val)
-            self._interrupt = None
-            for tnode in node.body:
-                self.run(tnode)
-                if self._interrupt is not None:
-                    break
-            if isinstance(self._interrupt, ast.Break):
-                break
-        else:
-            for tnode in node.orelse:
-                self.run(tnode)
-        self._interrupt = None
-
-    def on_listcomp(self, node):    # ('elt', 'generators')
-        "list comprehension"
-        out = []
-        for tnode in node.generators:
-            if tnode.__class__ == ast.comprehension:
-                for val in self.run(tnode.iter):
-                    self.node_assign(tnode.target, val)
-                    add = True
-                    for cond in tnode.ifs:
-                        add = add and self.run(cond)
-                    if add:
-                        out.append(self.run(node.elt))
-        return out
-
-    def on_excepthandler(self, node):  # ('type', 'name', 'body')
-        "exception handler..."
-        return (self.run(node.type), node.name, node.body)
-
-    def on_try(self, node):    # ('body', 'handlers', 'orelse', 'finalbody')
-        "try/except/else/finally blocks"
-        no_errors = True
-        for tnode in node.body:
-            self.run(tnode, with_raise=False)
-            no_errors = no_errors and len(self.error) == 0
-            if len(self.error) > 0:
-                e_type, e_value, e_tback = self.error[-1].exc_info
-                for hnd in node.handlers:
-                    htype = None
-                    if hnd.type is not None:
-                        htype = __builtins__.get(hnd.type.id, None)
-                    if htype is None or isinstance(e_type(), htype):
-                        self.error = []
-                        if hnd.name is not None:
-                            self.node_assign(hnd.name, e_value)
-                        for tline in hnd.body:
-                            self.run(tline)
-                        break
-        if no_errors and hasattr(node, 'orelse'):
-            for tnode in node.orelse:
-                self.run(tnode)
-
-        if hasattr(node, 'finalbody'):
-            for tnode in node.finalbody:
-                self.run(tnode)
-
-    def on_raise(self, node):    # ('type', 'inst', 'tback')
-        "raise statement: note difference for python 2 and 3"
-        if version_info[0] == 3:
-            excnode = node.exc
-            msgnode = node.cause
-        else:
-            excnode = node.type
-            msgnode = node.inst
-        out = self.run(excnode)
-        msg = ' '.join(out.args)
-        msg2 = self.run(msgnode)
-        if msg2 not in (None, 'None'):
-            msg = "%s: %s" % (msg, msg2)
-        self.raise_exception(None, exc=out.__class__, msg=msg, expr='')
-
-    def on_call(self, node):
-        "function execution"
-        #  ('func', 'args', 'keywords', 'starargs', 'kwargs')
-        func = self.run(node.func)
-        if not hasattr(func, '__call__') and not isinstance(func, type):
-            msg = "'%s' is not callable!!" % (func)
-            self.raise_exception(node, exc=TypeError, msg=msg)
-
-        args = [self.run(targ) for targ in node.args]
-        if node.starargs is not None:
-            args = args + self.run(node.starargs)
-
-        keywords = {}
-        for key in node.keywords:
-            if not isinstance(key, ast.keyword):
-                msg = "keyword error in function call '%s'" % (func)
-                self.raise_exception(node, msg=msg)
-
-            keywords[key.arg] = self.run(key.value)
-        if node.kwargs is not None:
-            keywords.update(self.run(node.kwargs))
-
-        try:
-            return func(*args, **keywords)
-        except:
-            self.raise_exception(node, msg="Error running %s" % (func))
-
-    def on_arg(self, node):    # ('test', 'msg')
-        "arg for function definitions"
-        # print(" ON ARG ! ", node, node.arg)
-        return node.arg
-
-    def on_functiondef(self, node):
-        "define procedures"
-        # ('name', 'args', 'body', 'decorator_list')
-        if node.decorator_list != []:
-            raise Warning("decorated procedures not supported!")
-        kwargs = []
-
-        offset = len(node.args.args) - len(node.args.defaults)
-        for idef, defnode in enumerate(node.args.defaults):
-            defval = self.run(defnode)
-            keyval = self.run(node.args.args[idef+offset])
-            kwargs.append((keyval, defval))
-
-        if version_info[0] == 3:
-            args = [tnode.arg for tnode in node.args.args[:offset]]
-        else:
-            args = [tnode.id for tnode in node.args.args[:offset]]
-
-        doc = None
-        nb0 = node.body[0]
-        if isinstance(nb0, ast.Expr) and isinstance(nb0.value, ast.Str):
-            doc = nb0.value.s
-
-        self.symtable[node.name] = Procedure(node.name, self, doc=doc,
-                                             lineno=self.lineno,
-                                             body=node.body,
-                                             args=args, kwargs=kwargs,
-                                             vararg=node.args.vararg,
-                                             varkws=node.args.kwarg)
-        if node.name in self.no_deepcopy:
-            self.no_deepcopy.pop(node.name)
-
-
-class Procedure(object):
-    """Procedure: user-defined function for asteval
-
-    This stores the parsed ast nodes as from the
-    'functiondef' ast node for later evaluation.
-    """
-    def __init__(self, name, interp, doc=None, lineno=0,
-                 body=None, args=None, kwargs=None,
-                 vararg=None, varkws=None):
-        self.name = name
-        self.__asteval__ = interp
-        self.raise_exc = self.__asteval__.raise_exception
-        self.__doc__ = doc
-        self.body = body
-        self.argnames = args
-        self.kwargs = kwargs
-        self.vararg = vararg
-        self.varkws = varkws
-        self.lineno = lineno
-
-    def __repr__(self):
-        sig = ""
-        if len(self.argnames) > 0:
-            sig = "%s%s" % (sig, ', '.join(self.argnames))
-        if self.vararg is not None:
-            sig = "%s, *%s" % (sig, self.vararg)
-        if len(self.kwargs) > 0:
-            if len(sig) > 0:
-                sig = "%s, " % sig
-            _kw = ["%s=%s" % (k, v) for k, v in self.kwargs]
-            sig = "%s%s" % (sig, ', '.join(_kw))
-
-        if self.varkws is not None:
-            sig = "%s, **%s" % (sig, self.varkws)
-        sig = "<Procedure %s(%s)>" % (self.name, sig)
-        if self.__doc__ is not None:
-            sig = "%s\n  %s" % (sig, self.__doc__)
-        return sig
-
-    def __call__(self, *args, **kwargs):
-        symlocals = {}
-        args = list(args)
-        n_args = len(args)
-        n_names = len(self.argnames)
-        n_kws = len(kwargs)
-
-        # may need to move kwargs to args if names align!
-        if (n_args < n_names) and n_kws > 0:
-            for name in self.argnames[n_args:]:
-                if name in kwargs:
-                    args.append(kwargs.pop(name))
-            n_args = len(args)
-            n_names = len(self.argnames)
-            n_kws = len(kwargs)
-
-        if len(self.argnames) > 0 and kwargs is not None:
-            msg = "multiple values for keyword argument '%s' in Procedure %s"
-            for targ in self.argnames:
-                if targ in kwargs:
-                    self.raise_exc(None, exc=TypeError,
-                                   msg=msg % (targ, self.name),
-                                   lineno=self.lineno)
-
-        if n_args != n_names:
-            msg = None
-            if n_args < n_names:
-                msg = 'not enough arguments for Procedure %s()' % self.name
-                msg = '%s (expected %i, got %i)' % (msg, n_names, n_args)
-                self.raise_exc(None, exc=TypeError, msg=msg)
-
-        for argname in self.argnames:
-            symlocals[argname] = args.pop(0)
-
-        try:
-            if self.vararg is not None:
-                symlocals[self.vararg] = tuple(args)
-
-            for key, val in self.kwargs:
-                if key in kwargs:
-                    val = kwargs.pop(key)
-                symlocals[key] = val
-
-            if self.varkws is not None:
-                symlocals[self.varkws] = kwargs
-
-            elif len(kwargs) > 0:
-                msg = 'extra keyword arguments for Procedure %s (%s)'
-                msg = msg % (self.name, ','.join(list(kwargs.keys())))
-                self.raise_exc(None, msg=msg, exc=TypeError,
-                               lineno=self.lineno)
-
-        except (ValueError, LookupError, TypeError,
-                NameError, AttributeError):
-            msg = 'incorrect arguments for Procedure %s' % self.name
-            self.raise_exc(None, msg=msg, lineno=self.lineno)
-
-        save_symtable = self.__asteval__.symtable.copy()
-        self.__asteval__.symtable.update(symlocals)
-        self.__asteval__.retval = None
-        retval = None
-
-        # evaluate script of function
-        for node in self.body:
-            self.__asteval__.run(node, expr='<>', lineno=self.lineno)
-            if len(self.__asteval__.error) > 0:
-                break
-            if self.__asteval__.retval is not None:
-                retval = self.__asteval__.retval
-                if retval is ReturnedNone:
-                    retval = None
-                break
-
-        self.__asteval__.symtable = save_symtable
-        symlocals = None
-        return retval
+"""
+Safe(ish) evaluator of python expressions, using ast module.
+The emphasis here is on mathematical expressions, and so
+numpy functions are imported if available and used.
+
+Symbols are held in the Interpreter symtable -- a simple
+dictionary supporting a simple, flat namespace.
+
+Expressions can be compiled into ast node and then evaluated
+later, using the current values in the
+"""
+
+from __future__ import division, print_function
+from sys import exc_info, stdout, version_info
+import ast
+import math
+
+from .astutils import (FROM_PY, FROM_MATH, FROM_NUMPY, UNSAFE_ATTRS,
+                       LOCALFUNCS, NUMPY_RENAMES, op2func,
+                       ExceptionHolder, ReturnedNone, valid_symbol_name)
+
+HAS_NUMPY = False
+try:
+    import numpy
+    HAS_NUMPY = True
+except ImportError:
+    print("Warning: numpy not available... functionality will be limited.")
+
+
+class Interpreter:
+    """mathematical expression compiler and interpreter.
+
+    This module compiles expressions and statements to AST representation,
+    using python's ast module, and then executes the AST representation
+    using a dictionary of named object (variable, functions).
+
+    The result is a restricted, simplified version of Python meant for
+    numerical caclulations that is somewhat safer than 'eval' because some
+    operations (such as 'import' and 'eval') are simply not allowed.  The
+    resulting language uses a flat namespace that works on Python objects,
+    but does not allow new classes to be defined.
+
+    Many parts of Python syntax are supported, including:
+        for loops, while loops, if-then-elif-else conditionals
+        try-except (including 'finally')
+        function definitions with def
+        advanced slicing:    a[::-1], array[-3:, :, ::2]
+        if-expressions:      out = one_thing if TEST else other
+        list comprehension   out = [sqrt(i) for i in values]
+
+    The following Python syntax elements are not supported:
+        Import, Exec, Lambda, Class, Global, Generators,
+        Yield, Decorators
+
+    In addition, while many builtin functions are supported, several
+    builtin functions are missing ('eval', 'exec', and 'getattr' for
+    example) that can be considered unsafe.
+
+    If numpy is installed, many numpy functions are also imported.
+
+    """
+
+    supported_nodes = ('arg', 'assert', 'assign', 'attribute', 'augassign',
+                       'binop', 'boolop', 'break', 'call', 'compare',
+                       'continue', 'delete', 'dict', 'ellipsis',
+                       'excepthandler', 'expr', 'extslice', 'for',
+                       'functiondef', 'if', 'ifexp', 'index', 'interrupt',
+                       'list', 'listcomp', 'module', 'name', 'num', 'pass',
+                       'print', 'raise', 'repr', 'return', 'slice', 'str',
+                       'subscript', 'try', 'tuple', 'unaryop', 'while')
+
+    def __init__(self, symtable=None, writer=None, use_numpy=True):
+        self.writer = writer or stdout
+
+        if symtable is None:
+            symtable = {}
+        self.symtable = symtable
+        self._interrupt = None
+        self.error = []
+        self.error_msg = None
+        self.expr = None
+        self.retval = None
+        self.lineno = 0
+        self.use_numpy = HAS_NUMPY and use_numpy
+
+        symtable['print'] = self._printer
+
+        # add python symbols
+        py_symtable = {sym: __builtins__[sym] for sym in FROM_PY
+                              if sym in __builtins__}
+        symtable.update(py_symtable)
+
+        # add local symbols
+        local_symtable = {sym: obj for (sym, obj) in LOCALFUNCS.items()}
+        symtable.update(local_symtable)
+
+        # add math symbols
+        math_symtable = {sym: getattr(math, sym) for sym in FROM_MATH
+                              if hasattr(math, sym)}
+        symtable.update(math_symtable)
+
+        # add numpy symbols
+        if self.use_numpy:
+            numpy_symtable = {sym: getattr(numpy, sym) for sym in FROM_NUMPY
+                              if hasattr(numpy, sym)}
+            symtable.update(numpy_symtable)
+
+            npy_rename_symtable = {name: getattr(numpy, sym) for name, sym
+                                   in NUMPY_RENAMES.items()
+                                   if hasattr(numpy, sym)}
+            symtable.update(npy_rename_symtable)
+
+        self.node_handlers = dict(((node, getattr(self, "on_%s" % node))
+                                   for node in self.supported_nodes))
+
+        # to rationalize try/except try/finally for Python2.6 through Python3.3
+        self.node_handlers['tryexcept'] = self.node_handlers['try']
+        self.node_handlers['tryfinally'] = self.node_handlers['try']
+
+        self.no_deepcopy = [key for key, val in symtable.items()
+                            if (callable(val)
+                                or 'numpy.lib.index_tricks' in repr(val))]
+
+    def user_defined_symbols(self):
+        """
+        Return a set of symbols that have been added to symtable after
+        construction. I.e. the symbols from self.symtable that are not in
+        self.no_deepcopy.
+
+        Returns
+        -------
+        unique_symbols : set
+            symbols in symtable that are not in self.no_deepcopy
+        """
+        sym_in_current = set(self.symtable.keys())
+        sym_from_construction = set(self.no_deepcopy)
+        unique_symbols = sym_in_current.difference(sym_from_construction)
+        return unique_symbols
+
+    def unimplemented(self, node):
+        "unimplemented nodes"
+        self.raise_exception(node, exc=NotImplementedError,
+                             msg="'%s' not supported" %
+                             (node.__class__.__name__))
+
+    def raise_exception(self, node, exc=None, msg='', expr=None,
+                        lineno=None):
+        "add an exception"
+        if self.error is None:
+            self.error = []
+        if expr is None:
+            expr = self.expr
+        if len(self.error) > 0 and not isinstance(node, ast.Module):
+            msg = '%s' % msg
+        err = ExceptionHolder(node, exc=exc, msg=msg, expr=expr, lineno=lineno)
+        self._interrupt = ast.Break()
+        self.error.append(err)
+        if self.error_msg is None:
+            self.error_msg = "%s in expr='%s'" % (msg, self.expr)
+        elif len(msg) > 0:
+            self.error_msg = "%s\n %s" % (self.error_msg, msg)
+        if exc is None:
+            try:
+                exc = self.error[0].exc
+            except:
+                exc = RuntimeError
+        raise exc(self.error_msg)
+
+
+    # main entry point for Ast node evaluation
+    #  parse:  text of statements -> ast
+    #  run:    ast -> result
+    #  eval:   string statement -> result = run(parse(statement))
+    def parse(self, text):
+        """parse statement/expression to Ast representation"""
+        self.expr = text
+        try:
+            return ast.parse(text)
+        except SyntaxError:
+            self.raise_exception(None, msg='Syntax Error', expr=text)
+        except:
+            self.raise_exception(None, msg='Runtime Error', expr=text)
+
+    def run(self, node, expr=None, lineno=None, with_raise=True):
+        """executes parsed Ast representation for an expression"""
+        # Note: keep the 'node is None' test: internal code here may run
+        #    run(None) and expect a None in return.
+        if len(self.error) > 0:
+            return
+        if node is None:
+            return None
+        if isinstance(node, str):
+            node = self.parse(node)
+        if lineno is not None:
+            self.lineno = lineno
+        if expr is not None:
+            self.expr = expr
+
+        # get handler for this node:
+        #   on_xxx with handle nodes of type 'xxx', etc
+        try:
+            handler = self.node_handlers[node.__class__.__name__.lower()]
+        except KeyError:
+            return self.unimplemented(node)
+
+        # run the handler:  this will likely generate
+        # recursive calls into this run method.
+        try:
+            ret = handler(node)
+            if isinstance(ret, enumerate):
+                ret = list(ret)
+            return ret
+        except:
+            if with_raise:
+                self.raise_exception(node, expr=expr)
+
+    def __call__(self, expr, **kw):
+        return self.eval(expr, **kw)
+
+    def eval(self, expr, lineno=0, show_errors=True):
+        """evaluates a single statement"""
+        self.lineno = lineno
+        self.error = []
+        try:
+            node = self.parse(expr)
+        except:
+            errmsg = exc_info()[1]
+            if len(self.error) > 0:
+                errmsg = "\n".join(self.error[0].get_error())
+            if not show_errors:
+                try:
+                    exc = self.error[0].exc
+                except:
+                    exc = RuntimeError
+                raise exc(errmsg)
+            print(errmsg, file=self.writer)
+            return
+        try:
+            return self.run(node, expr=expr, lineno=lineno)
+        except:
+            errmsg = exc_info()[1]
+            if len(self.error) > 0:
+                errmsg = "\n".join(self.error[0].get_error())
+            if not show_errors:
+                try:
+                    exc = self.error[0].exc
+                except:
+                    exc = RuntimeError
+                raise exc(errmsg)
+            print(errmsg, file=self.writer)
+            return
+
+    def dump(self, node, **kw):
+        "simple ast dumper"
+        return ast.dump(node, **kw)
+
+    # handlers for ast components
+    def on_expr(self, node):
+        "expression"
+        return self.run(node.value)  # ('value',)
+
+    def on_index(self, node):
+        "index"
+        return self.run(node.value)  # ('value',)
+
+    def on_return(self, node):  # ('value',)
+        "return statement: look for None, return special sentinal"
+        self.retval = self.run(node.value)
+        if self.retval is None:
+            self.retval = ReturnedNone
+        return
+
+    def on_repr(self, node):
+        "repr "
+        return repr(self.run(node.value))  # ('value',)
+
+    def on_module(self, node):    # ():('body',)
+        "module def"
+        out = None
+        for tnode in node.body:
+            out = self.run(tnode)
+        return out
+
+    def on_pass(self, node):
+        "pass statement"
+        return None  # ()
+
+    def on_ellipsis(self, node):
+        "ellipses"
+        return Ellipsis
+
+    # for break and continue: set the instance variable _interrupt
+    def on_interrupt(self, node):    # ()
+        "interrupt handler"
+        self._interrupt = node
+        return node
+
+    def on_break(self, node):
+        "break"
+        return self.on_interrupt(node)
+
+    def on_continue(self, node):
+        "continue"
+        return self.on_interrupt(node)
+
+    def on_assert(self, node):    # ('test', 'msg')
+        "assert statement"
+        if not self.run(node.test):
+            self.raise_exception(node, exc=AssertionError, msg=node.msg)
+        return True
+
+    def on_list(self, node):    # ('elt', 'ctx')
+        "list"
+        return [self.run(e) for e in node.elts]
+
+    def on_tuple(self, node):    # ('elts', 'ctx')
+        "tuple"
+        return tuple(self.on_list(node))
+
+    def on_dict(self, node):    # ('keys', 'values')
+        "dictionary"
+        return dict([(self.run(k), self.run(v)) for k, v in
+                     zip(node.keys, node.values)])
+
+    def on_num(self, node):   # ('n',)
+        'return number'
+        return node.n
+
+    def on_str(self, node):   # ('s',)
+        'return string'
+        return node.s
+
+    def on_name(self, node):    # ('id', 'ctx')
+        """ Name node """
+        ctx = node.ctx.__class__
+        if ctx in (ast.Param, ast.Del):
+            return str(node.id)
+        else:
+            if node.id in self.symtable:
+                return self.symtable[node.id]
+            else:
+                msg = "name '%s' is not defined" % node.id
+                self.raise_exception(node, exc=NameError, msg=msg)
+
+    def node_assign(self, node, val):
+        """here we assign a value (not the node.value object) to a node
+        this is used by on_assign, but also by for, list comprehension, etc.
+        """
+        if node.__class__ == ast.Name:
+            if not valid_symbol_name(node.id):
+                errmsg = "invalid symbol name (reserved word?) %s" % node.id
+                self.raise_exception(node, exc=NameError, msg=errmsg)
+            sym = self.symtable[node.id] = val
+            if node.id in self.no_deepcopy:
+                self.no_deepcopy.pop(node.id)
+
+        elif node.__class__ == ast.Attribute:
+            if node.ctx.__class__ == ast.Load:
+                msg = "cannot assign to attribute %s" % node.attr
+                self.raise_exception(node, exc=AttributeError, msg=msg)
+
+            setattr(self.run(node.value), node.attr, val)
+
+        elif node.__class__ == ast.Subscript:
+            sym = self.run(node.value)
+            xslice = self.run(node.slice)
+            if isinstance(node.slice, ast.Index):
+                sym[xslice] = val
+            elif isinstance(node.slice, ast.Slice):
+                sym[slice(xslice.start, xslice.stop)] = val
+            elif isinstance(node.slice, ast.ExtSlice):
+                sym[(xslice)] = val
+        elif node.__class__ in (ast.Tuple, ast.List):
+            if len(val) == len(node.elts):
+                for telem, tval in zip(node.elts, val):
+                    self.node_assign(telem, tval)
+            else:
+                raise ValueError('too many values to unpack')
+
+    def on_attribute(self, node):    # ('value', 'attr', 'ctx')
+        "extract attribute"
+        ctx = node.ctx.__class__
+        if ctx == ast.Store:
+            msg = "attribute for storage: shouldn't be here!"
+            self.raise_exception(node, exc=RuntimeError, msg=msg)
+
+        sym = self.run(node.value)
+        if ctx == ast.Del:
+            return delattr(sym, node.attr)
+
+        # ctx is ast.Load
+        fmt = "cannnot access attribute '%s' for %s"
+        if node.attr not in UNSAFE_ATTRS:
+            fmt = "no attribute '%s' for %s"
+            try:
+                return getattr(sym, node.attr)
+            except AttributeError:
+                pass
+
+        # AttributeError or accessed unsafe attribute
+        obj = self.run(node.value)
+        msg = fmt % (node.attr, obj)
+        self.raise_exception(node, exc=AttributeError, msg=msg)
+
+    def on_assign(self, node):    # ('targets', 'value')
+        "simple assignment"
+        val = self.run(node.value)
+        for tnode in node.targets:
+            self.node_assign(tnode, val)
+        return
+
+    def on_augassign(self, node):    # ('target', 'op', 'value')
+        "augmented assign"
+        return self.on_assign(ast.Assign(targets=[node.target],
+                                         value=ast.BinOp(left=node.target,
+                                                         op=node.op,
+                                                         right=node.value)))
+
+    def on_slice(self, node):    # ():('lower', 'upper', 'step')
+        "simple slice"
+        return slice(self.run(node.lower),
+                     self.run(node.upper),
+                     self.run(node.step))
+
+    def on_extslice(self, node):    # ():('dims',)
+        "extended slice"
+        return tuple([self.run(tnode) for tnode in node.dims])
+
+    def on_subscript(self, node):    # ('value', 'slice', 'ctx')
+        "subscript handling -- one of the tricky parts"
+        val = self.run(node.value)
+        nslice = self.run(node.slice)
+        ctx = node.ctx.__class__
+        if ctx in (ast.Load, ast.Store):
+            if isinstance(node.slice, (ast.Index, ast.Slice, ast.Ellipsis)):
+                return val.__getitem__(nslice)
+            elif isinstance(node.slice, ast.ExtSlice):
+                return val[(nslice)]
+        else:
+            msg = "subscript with unknown context"
+            self.raise_exception(node, msg=msg)
+
+    def on_delete(self, node):    # ('targets',)
+        "delete statement"
+        for tnode in node.targets:
+            if tnode.ctx.__class__ != ast.Del:
+                break
+            children = []
+            while tnode.__class__ == ast.Attribute:
+                children.append(tnode.attr)
+                tnode = tnode.value
+
+            if tnode.__class__ == ast.Name:
+                children.append(tnode.id)
+                children.reverse()
+                self.symtable.pop('.'.join(children))
+            else:
+                msg = "could not delete symbol"
+                self.raise_exception(node, msg=msg)
+
+    def on_unaryop(self, node):    # ('op', 'operand')
+        "unary operator"
+        return op2func(node.op)(self.run(node.operand))
+
+    def on_binop(self, node):    # ('left', 'op', 'right')
+        "binary operator"
+        return op2func(node.op)(self.run(node.left),
+                                self.run(node.right))
+
+    def on_boolop(self, node):    # ('op', 'values')
+        "boolean operator"
+        val = self.run(node.values[0])
+        is_and = ast.And == node.op.__class__
+        if (is_and and val) or (not is_and and not val):
+            for n in node.values:
+                val = op2func(node.op)(val, self.run(n))
+                if (is_and and not val) or (not is_and and val):
+                    break
+        return val
+
+    def on_compare(self, node):    # ('left', 'ops', 'comparators')
+        "comparison operators"
+        lval = self.run(node.left)
+        out = True
+        for op, rnode in zip(node.ops, node.comparators):
+            rval = self.run(rnode)
+            out = op2func(op)(lval, rval)
+            lval = rval
+            if self.use_numpy and isinstance(out, numpy.ndarray) and out.any():
+                break
+            elif not out:
+                break
+        return out
+
+    def on_print(self, node):    # ('dest', 'values', 'nl')
+        """ note: implements Python2 style print statement, not
+        print() function.  May need improvement...."""
+        dest = self.run(node.dest) or self.writer
+        end = ''
+        if node.nl:
+            end = '\n'
+        out = [self.run(tnode) for tnode in node.values]
+        if out and len(self.error) == 0:
+            self._printer(*out, file=dest, end=end)
+
+    def _printer(self, *out, **kws):
+        "generic print function"
+        flush = kws.pop('flush', True)
+        fileh = kws.pop('file', self.writer)
+        sep = kws.pop('sep', ' ')
+        end = kws.pop('sep', '\n')
+
+        print(*out, file=fileh, sep=sep, end=end)
+        if flush:
+            fileh.flush()
+
+    def on_if(self, node):    # ('test', 'body', 'orelse')
+        "regular if-then-else statement"
+        block = node.body
+        if not self.run(node.test):
+            block = node.orelse
+        for tnode in block:
+            self.run(tnode)
+
+    def on_ifexp(self, node):    # ('test', 'body', 'orelse')
+        "if expressions"
+        expr = node.orelse
+        if self.run(node.test):
+            expr = node.body
+        return self.run(expr)
+
+    def on_while(self, node):    # ('test', 'body', 'orelse')
+        "while blocks"
+        while self.run(node.test):
+            self._interrupt = None
+            for tnode in node.body:
+                self.run(tnode)
+                if self._interrupt is not None:
+                    break
+            if isinstance(self._interrupt, ast.Break):
+                break
+        else:
+            for tnode in node.orelse:
+                self.run(tnode)
+        self._interrupt = None
+
+    def on_for(self, node):    # ('target', 'iter', 'body', 'orelse')
+        "for blocks"
+        for val in self.run(node.iter):
+            self.node_assign(node.target, val)
+            self._interrupt = None
+            for tnode in node.body:
+                self.run(tnode)
+                if self._interrupt is not None:
+                    break
+            if isinstance(self._interrupt, ast.Break):
+                break
+        else:
+            for tnode in node.orelse:
+                self.run(tnode)
+        self._interrupt = None
+
+    def on_listcomp(self, node):    # ('elt', 'generators')
+        "list comprehension"
+        out = []
+        for tnode in node.generators:
+            if tnode.__class__ == ast.comprehension:
+                for val in self.run(tnode.iter):
+                    self.node_assign(tnode.target, val)
+                    add = True
+                    for cond in tnode.ifs:
+                        add = add and self.run(cond)
+                    if add:
+                        out.append(self.run(node.elt))
+        return out
+
+    def on_excepthandler(self, node):  # ('type', 'name', 'body')
+        "exception handler..."
+        return (self.run(node.type), node.name, node.body)
+
+    def on_try(self, node):    # ('body', 'handlers', 'orelse', 'finalbody')
+        "try/except/else/finally blocks"
+        no_errors = True
+        for tnode in node.body:
+            self.run(tnode, with_raise=False)
+            no_errors = no_errors and len(self.error) == 0
+            if len(self.error) > 0:
+                e_type, e_value, e_tback = self.error[-1].exc_info
+                for hnd in node.handlers:
+                    htype = None
+                    if hnd.type is not None:
+                        htype = __builtins__.get(hnd.type.id, None)
+                    if htype is None or isinstance(e_type(), htype):
+                        self.error = []
+                        if hnd.name is not None:
+                            self.node_assign(hnd.name, e_value)
+                        for tline in hnd.body:
+                            self.run(tline)
+                        break
+        if no_errors and hasattr(node, 'orelse'):
+            for tnode in node.orelse:
+                self.run(tnode)
+
+        if hasattr(node, 'finalbody'):
+            for tnode in node.finalbody:
+                self.run(tnode)
+
+    def on_raise(self, node):    # ('type', 'inst', 'tback')
+        "raise statement: note difference for python 2 and 3"
+        if version_info[0] == 3:
+            excnode = node.exc
+            msgnode = node.cause
+        else:
+            excnode = node.type
+            msgnode = node.inst
+        out = self.run(excnode)
+        msg = ' '.join(out.args)
+        msg2 = self.run(msgnode)
+        if msg2 not in (None, 'None'):
+            msg = "%s: %s" % (msg, msg2)
+        self.raise_exception(None, exc=out.__class__, msg=msg, expr='')
+
+    def on_call(self, node):
+        "function execution"
+        #  ('func', 'args', 'keywords'. Py<3.5 has 'starargs' and 'kwargs' too)
+        func = self.run(node.func)
+        if not hasattr(func, '__call__') and not isinstance(func, type):
+            msg = "'%s' is not callable!!" % (func)
+            self.raise_exception(node, exc=TypeError, msg=msg)
+
+        args = [self.run(targ) for targ in node.args]
+        starargs = getattr(node, 'starargs', None)
+        if starargs is not None:
+            args = args + self.run(starargs)
+
+        keywords = {}
+        for key in node.keywords:
+            if not isinstance(key, ast.keyword):
+                msg = "keyword error in function call '%s'" % (func)
+                self.raise_exception(node, msg=msg)
+            keywords[key.arg] = self.run(key.value)
+
+        kwargs = getattr(node, 'kwargs', None)
+        if kwargs is not None:
+            keywords.update(self.run(kwargs))
+
+        try:
+            return func(*args, **keywords)
+        except:
+            self.raise_exception(node, msg="Error running %s" % (func))
+
+    def on_arg(self, node):    # ('test', 'msg')
+        "arg for function definitions"
+        # print(" ON ARG ! ", node, node.arg)
+        return node.arg
+
+    def on_functiondef(self, node):
+        "define procedures"
+        # ('name', 'args', 'body', 'decorator_list')
+        if node.decorator_list != []:
+            raise Warning("decorated procedures not supported!")
+        kwargs = []
+
+        offset = len(node.args.args) - len(node.args.defaults)
+        for idef, defnode in enumerate(node.args.defaults):
+            defval = self.run(defnode)
+            keyval = self.run(node.args.args[idef+offset])
+            kwargs.append((keyval, defval))
+
+        if version_info[0] == 3:
+            args = [tnode.arg for tnode in node.args.args[:offset]]
+        else:
+            args = [tnode.id for tnode in node.args.args[:offset]]
+
+        doc = None
+        nb0 = node.body[0]
+        if isinstance(nb0, ast.Expr) and isinstance(nb0.value, ast.Str):
+            doc = nb0.value.s
+
+        self.symtable[node.name] = Procedure(node.name, self, doc=doc,
+                                             lineno=self.lineno,
+                                             body=node.body,
+                                             args=args, kwargs=kwargs,
+                                             vararg=node.args.vararg,
+                                             varkws=node.args.kwarg)
+        if node.name in self.no_deepcopy:
+            self.no_deepcopy.pop(node.name)
+
+
+class Procedure(object):
+    """Procedure: user-defined function for asteval
+
+    This stores the parsed ast nodes as from the
+    'functiondef' ast node for later evaluation.
+    """
+    def __init__(self, name, interp, doc=None, lineno=0,
+                 body=None, args=None, kwargs=None,
+                 vararg=None, varkws=None):
+        self.name = name
+        self.__asteval__ = interp
+        self.raise_exc = self.__asteval__.raise_exception
+        self.__doc__ = doc
+        self.body = body
+        self.argnames = args
+        self.kwargs = kwargs
+        self.vararg = vararg
+        self.varkws = varkws
+        self.lineno = lineno
+
+    def __repr__(self):
+        sig = ""
+        if len(self.argnames) > 0:
+            sig = "%s%s" % (sig, ', '.join(self.argnames))
+        if self.vararg is not None:
+            sig = "%s, *%s" % (sig, self.vararg)
+        if len(self.kwargs) > 0:
+            if len(sig) > 0:
+                sig = "%s, " % sig
+            _kw = ["%s=%s" % (k, v) for k, v in self.kwargs]
+            sig = "%s%s" % (sig, ', '.join(_kw))
+
+        if self.varkws is not None:
+            sig = "%s, **%s" % (sig, self.varkws)
+        sig = "<Procedure %s(%s)>" % (self.name, sig)
+        if self.__doc__ is not None:
+            sig = "%s\n  %s" % (sig, self.__doc__)
+        return sig
+
+    def __call__(self, *args, **kwargs):
+        symlocals = {}
+        args = list(args)
+        n_args = len(args)
+        n_names = len(self.argnames)
+        n_kws = len(kwargs)
+
+        # may need to move kwargs to args if names align!
+        if (n_args < n_names) and n_kws > 0:
+            for name in self.argnames[n_args:]:
+                if name in kwargs:
+                    args.append(kwargs.pop(name))
+            n_args = len(args)
+            n_names = len(self.argnames)
+            n_kws = len(kwargs)
+
+        if len(self.argnames) > 0 and kwargs is not None:
+            msg = "multiple values for keyword argument '%s' in Procedure %s"
+            for targ in self.argnames:
+                if targ in kwargs:
+                    self.raise_exc(None, exc=TypeError,
+                                   msg=msg % (targ, self.name),
+                                   lineno=self.lineno)
+
+        if n_args != n_names:
+            msg = None
+            if n_args < n_names:
+                msg = 'not enough arguments for Procedure %s()' % self.name
+                msg = '%s (expected %i, got %i)' % (msg, n_names, n_args)
+                self.raise_exc(None, exc=TypeError, msg=msg)
+
+        for argname in self.argnames:
+            symlocals[argname] = args.pop(0)
+
+        try:
+            if self.vararg is not None:
+                symlocals[self.vararg] = tuple(args)
+
+            for key, val in self.kwargs:
+                if key in kwargs:
+                    val = kwargs.pop(key)
+                symlocals[key] = val
+
+            if self.varkws is not None:
+                symlocals[self.varkws] = kwargs
+
+            elif len(kwargs) > 0:
+                msg = 'extra keyword arguments for Procedure %s (%s)'
+                msg = msg % (self.name, ','.join(list(kwargs.keys())))
+                self.raise_exc(None, msg=msg, exc=TypeError,
+                               lineno=self.lineno)
+
+        except (ValueError, LookupError, TypeError,
+                NameError, AttributeError):
+            msg = 'incorrect arguments for Procedure %s' % self.name
+            self.raise_exc(None, msg=msg, lineno=self.lineno)
+
+        save_symtable = self.__asteval__.symtable.copy()
+        self.__asteval__.symtable.update(symlocals)
+        self.__asteval__.retval = None
+        retval = None
+
+        # evaluate script of function
+        for node in self.body:
+            self.__asteval__.run(node, expr='<>', lineno=self.lineno)
+            if len(self.__asteval__.error) > 0:
+                break
+            if self.__asteval__.retval is not None:
+                retval = self.__asteval__.retval
+                if retval is ReturnedNone:
+                    retval = None
+                break
+
+        self.__asteval__.symtable = save_symtable
+        symlocals = None
+        return retval
diff --git a/lmfit/astutils.py b/lmfit/astutils.py
index 0a8c25a..9df7146 100644
--- a/lmfit/astutils.py
+++ b/lmfit/astutils.py
@@ -1,258 +1,258 @@
-"""
-utility functions for asteval
-
-   Matthew Newville <newville at cars.uchicago.edu>,
-   The University of Chicago
-"""
-from __future__ import division, print_function
-import re
-import ast
-from sys import exc_info
-
-RESERVED_WORDS = ('and', 'as', 'assert', 'break', 'class', 'continue',
-                  'def', 'del', 'elif', 'else', 'except', 'exec',
-                  'finally', 'for', 'from', 'global', 'if', 'import',
-                  'in', 'is', 'lambda', 'not', 'or', 'pass', 'print',
-                  'raise', 'return', 'try', 'while', 'with', 'True',
-                  'False', 'None', 'eval', 'execfile', '__import__',
-                  '__package__')
-
-NAME_MATCH = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$").match
-
-UNSAFE_ATTRS = ('__subclasses__', '__bases__', '__globals__', '__code__',
-                '__closure__', '__func__', '__self__', '__module__',
-                '__dict__', '__class__', '__call__', '__get__',
-                '__getattribute__', '__subclasshook__', '__new__',
-                '__init__', 'func_globals', 'func_code', 'func_closure',
-                'im_class', 'im_func', 'im_self', 'gi_code', 'gi_frame',
-                '__asteval__')
-
-# inherit these from python's __builtins__
-FROM_PY = ('ArithmeticError', 'AssertionError', 'AttributeError',
-           'BaseException', 'BufferError', 'BytesWarning',
-           'DeprecationWarning', 'EOFError', 'EnvironmentError',
-           'Exception', 'False', 'FloatingPointError', 'GeneratorExit',
-           'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
-           'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
-           'MemoryError', 'NameError', 'None',
-           'NotImplementedError', 'OSError', 'OverflowError',
-           'ReferenceError', 'RuntimeError', 'RuntimeWarning',
-           'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError',
-           'SystemExit', 'True', 'TypeError', 'UnboundLocalError',
-           'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError',
-           'UnicodeTranslateError', 'UnicodeWarning', 'ValueError',
-           'Warning', 'ZeroDivisionError', 'abs', 'all', 'any', 'bin',
-           'bool', 'bytearray', 'bytes', 'chr', 'complex', 'dict', 'dir',
-           'divmod', 'enumerate', 'filter', 'float', 'format', 'frozenset',
-           'hash', 'hex', 'id', 'int', 'isinstance', 'len', 'list', 'map',
-           'max', 'min', 'oct', 'ord', 'pow', 'range', 'repr',
-           'reversed', 'round', 'set', 'slice', 'sorted', 'str', 'sum',
-           'tuple', 'type', 'zip')
-
-# inherit these from python's math
-FROM_MATH = ('acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
-             'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'exp',
-             'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum',
-             'hypot', 'isinf', 'isnan', 'ldexp', 'log', 'log10', 'log1p',
-             'modf', 'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan',
-             'tanh', 'trunc')
-
-FROM_NUMPY = ('Inf', 'NAN', 'abs', 'add', 'alen', 'all', 'amax', 'amin',
-              'angle', 'any', 'append', 'arange', 'arccos', 'arccosh',
-              'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
-              'argmax', 'argmin', 'argsort', 'argwhere', 'around', 'array',
-              'array2string', 'asanyarray', 'asarray', 'asarray_chkfinite',
-              'ascontiguousarray', 'asfarray', 'asfortranarray',
-              'asmatrix', 'asscalar', 'atleast_1d', 'atleast_2d',
-              'atleast_3d', 'average', 'bartlett', 'base_repr',
-              'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor',
-              'blackman', 'bool', 'broadcast', 'broadcast_arrays', 'byte',
-              'c_', 'cdouble', 'ceil', 'cfloat', 'chararray', 'choose',
-              'clip', 'clongdouble', 'clongfloat', 'column_stack',
-              'common_type', 'complex', 'complex128', 'complex64',
-              'complex_', 'complexfloating', 'compress', 'concatenate',
-              'conjugate', 'convolve', 'copy', 'copysign', 'corrcoef',
-              'correlate', 'cos', 'cosh', 'cov', 'cross', 'csingle',
-              'cumprod', 'cumsum', 'datetime_data', 'deg2rad', 'degrees',
-              'delete', 'diag', 'diag_indices', 'diag_indices_from',
-              'diagflat', 'diagonal', 'diff', 'digitize', 'divide', 'dot',
-              'double', 'dsplit', 'dstack', 'dtype', 'e', 'ediff1d',
-              'empty', 'empty_like', 'equal', 'exp', 'exp2', 'expand_dims',
-              'expm1', 'extract', 'eye', 'fabs', 'fill_diagonal', 'finfo',
-              'fix', 'flatiter', 'flatnonzero', 'fliplr', 'flipud',
-              'float', 'float32', 'float64', 'float_', 'floating', 'floor',
-              'floor_divide', 'fmax', 'fmin', 'fmod', 'format_parser',
-              'frexp', 'frombuffer', 'fromfile', 'fromfunction',
-              'fromiter', 'frompyfunc', 'fromregex', 'fromstring', 'fv',
-              'genfromtxt', 'getbufsize', 'geterr', 'gradient', 'greater',
-              'greater_equal', 'hamming', 'hanning', 'histogram',
-              'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot',
-              'i0', 'identity', 'iinfo', 'imag', 'in1d', 'index_exp',
-              'indices', 'inexact', 'inf', 'info', 'infty', 'inner',
-              'insert', 'int', 'int0', 'int16', 'int32', 'int64', 'int8',
-              'int_', 'int_asbuffer', 'intc', 'integer', 'interp',
-              'intersect1d', 'intp', 'invert', 'ipmt', 'irr', 'iscomplex',
-              'iscomplexobj', 'isfinite', 'isfortran', 'isinf', 'isnan',
-              'isneginf', 'isposinf', 'isreal', 'isrealobj', 'isscalar',
-              'issctype', 'iterable', 'ix_', 'kaiser', 'kron', 'ldexp',
-              'left_shift', 'less', 'less_equal', 'linspace',
-              'little_endian', 'load', 'loads', 'loadtxt', 'log', 'log10',
-              'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and',
-              'logical_not', 'logical_or', 'logical_xor', 'logspace',
-              'long', 'longcomplex', 'longdouble', 'longfloat', 'longlong',
-              'mafromtxt', 'mask_indices', 'mat', 'matrix', 'max',
-              'maximum', 'maximum_sctype', 'may_share_memory', 'mean',
-              'median', 'memmap', 'meshgrid', 'mgrid', 'min', 'minimum',
-              'mintypecode', 'mirr', 'mod', 'modf', 'msort', 'multiply',
-              'nan', 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax',
-              'nanmin', 'nansum', 'ndarray', 'ndenumerate', 'ndfromtxt',
-              'ndim', 'ndindex', 'negative', 'newaxis', 'nextafter',
-              'nonzero', 'not_equal', 'nper', 'npv', 'number',
-              'obj2sctype', 'ogrid', 'ones', 'ones_like', 'outer',
-              'packbits', 'percentile', 'pi', 'piecewise', 'place', 'pmt',
-              'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv', 'polyfit',
-              'polyint', 'polymul', 'polysub', 'polyval', 'power', 'ppmt',
-              'prod', 'product', 'ptp', 'put', 'putmask', 'pv', 'r_',
-              'rad2deg', 'radians', 'rank', 'rate', 'ravel', 'real',
-              'real_if_close', 'reciprocal', 'record', 'remainder',
-              'repeat', 'reshape', 'resize', 'restoredot', 'right_shift',
-              'rint', 'roll', 'rollaxis', 'roots', 'rot90', 'round',
-              'round_', 'row_stack', 's_', 'sctype2char', 'searchsorted',
-              'select', 'setbufsize', 'setdiff1d', 'seterr', 'setxor1d',
-              'shape', 'short', 'sign', 'signbit', 'signedinteger', 'sin',
-              'sinc', 'single', 'singlecomplex', 'sinh', 'size',
-              'sometrue', 'sort', 'sort_complex', 'spacing', 'split',
-              'sqrt', 'square', 'squeeze', 'std', 'str', 'str_',
-              'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh',
-              'tensordot', 'tile', 'trace', 'transpose', 'trapz', 'tri',
-              'tril', 'tril_indices', 'tril_indices_from', 'trim_zeros',
-              'triu', 'triu_indices', 'triu_indices_from', 'true_divide',
-              'trunc', 'ubyte', 'uint', 'uint0', 'uint16', 'uint32',
-              'uint64', 'uint8', 'uintc', 'uintp', 'ulonglong', 'union1d',
-              'unique', 'unravel_index', 'unsignedinteger', 'unwrap',
-              'ushort', 'vander', 'var', 'vdot', 'vectorize', 'vsplit',
-              'vstack', 'where', 'who', 'zeros', 'zeros_like')
-
-NUMPY_RENAMES = {'ln': 'log', 'asin': 'arcsin', 'acos': 'arccos',
-                 'atan': 'arctan', 'atan2': 'arctan2', 'atanh':
-                 'arctanh', 'acosh': 'arccosh', 'asinh': 'arcsinh'}
-
-def _open(filename, mode='r', buffering=0):
-    """read only version of open()"""
-    umode = 'r'
-    if mode == 'rb':
-        umode = 'rb'
-    return open(filename, umode, buffering)
-
-LOCALFUNCS = {'open': _open}
-
-OPERATORS = {ast.Is: lambda a, b: a is b,
-             ast.IsNot: lambda a, b: a is not b,
-             ast.In: lambda a, b: a in b,
-             ast.NotIn: lambda a, b: a not in b,
-             ast.Add: lambda a, b: a + b,
-             ast.BitAnd: lambda a, b: a & b,
-             ast.BitOr: lambda a, b: a | b,
-             ast.BitXor: lambda a, b: a ^ b,
-             ast.Div: lambda a, b: a / b,
-             ast.FloorDiv: lambda a, b: a // b,
-             ast.LShift: lambda a, b: a << b,
-             ast.RShift: lambda a, b: a >> b,
-             ast.Mult: lambda a, b: a * b,
-             ast.Pow: lambda a, b: a ** b,
-             ast.Sub: lambda a, b: a - b,
-             ast.Mod: lambda a, b: a % b,
-             ast.And: lambda a, b: a and b,
-             ast.Or: lambda a, b: a or b,
-             ast.Eq: lambda a, b: a == b,
-             ast.Gt: lambda a, b: a > b,
-             ast.GtE: lambda a, b: a >= b,
-             ast.Lt: lambda a, b: a < b,
-             ast.LtE: lambda a, b: a <= b,
-             ast.NotEq: lambda a, b: a != b,
-             ast.Invert: lambda a: ~a,
-             ast.Not: lambda a: not a,
-             ast.UAdd: lambda a: +a,
-             ast.USub: lambda a: -a}
-
-
-def valid_symbol_name(name):
-    """determines whether the input symbol name is a valid name
-
-    This checks for reserved words, and that the name matches the
-    regular expression ``[a-zA-Z_][a-zA-Z0-9_]``
-    """
-    if name in RESERVED_WORDS:
-        return False
-    return NAME_MATCH(name) is not None
-
-
-def op2func(op):
-    "return function for operator nodes"
-    return OPERATORS[op.__class__]
-
-
-class Empty:
-    """empty class"""
-    def __init__(self):
-        pass
-
-    def __nonzero__(self):
-        return False
-
-ReturnedNone = Empty()
-
-
-class ExceptionHolder(object):
-    "basic exception handler"
-    def __init__(self, node, exc=None, msg='', expr=None, lineno=None):
-        self.node = node
-        self.expr = expr
-        self.msg = msg
-        self.exc = exc
-        self.lineno = lineno
-        self.exc_info = exc_info()
-        if self.exc is None and self.exc_info[0] is not None:
-            self.exc = self.exc_info[0]
-        if self.msg is '' and self.exc_info[1] is not None:
-            self.msg = self.exc_info[1]
-
-    def get_error(self):
-        "retrieve error data"
-        col_offset = -1
-        if self.node is not None:
-            try:
-                col_offset = self.node.col_offset
-            except AttributeError:
-                pass
-        try:
-            exc_name = self.exc.__name__
-        except AttributeError:
-            exc_name = str(self.exc)
-        if exc_name in (None, 'None'):
-            exc_name = 'UnknownError'
-
-        out = ["   %s" % self.expr]
-        if col_offset > 0:
-            out.append("    %s^^^" % ((col_offset)*' '))
-        out.append(str(self.msg))
-        return (exc_name, '\n'.join(out))
-
-
-class NameFinder(ast.NodeVisitor):
-    """find all symbol names used by a parsed node"""
-    def __init__(self):
-        self.names = []
-        ast.NodeVisitor.__init__(self)
-
-    def generic_visit(self, node):
-        if node.__class__.__name__ == 'Name':
-            if node.ctx.__class__ == ast.Load and node.id not in self.names:
-                self.names.append(node.id)
-        ast.NodeVisitor.generic_visit(self, node)
-
-def get_ast_names(astnode):
-    "returns symbol Names from an AST node"
-    finder = NameFinder()
-    finder.generic_visit(astnode)
-    return finder.names
+"""
+utility functions for asteval
+
+   Matthew Newville <newville at cars.uchicago.edu>,
+   The University of Chicago
+"""
+from __future__ import division, print_function
+import re
+import ast
+from sys import exc_info
+
+RESERVED_WORDS = ('and', 'as', 'assert', 'break', 'class', 'continue',
+                  'def', 'del', 'elif', 'else', 'except', 'exec',
+                  'finally', 'for', 'from', 'global', 'if', 'import',
+                  'in', 'is', 'lambda', 'not', 'or', 'pass', 'print',
+                  'raise', 'return', 'try', 'while', 'with', 'True',
+                  'False', 'None', 'eval', 'execfile', '__import__',
+                  '__package__')
+
+NAME_MATCH = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$").match
+
+UNSAFE_ATTRS = ('__subclasses__', '__bases__', '__globals__', '__code__',
+                '__closure__', '__func__', '__self__', '__module__',
+                '__dict__', '__class__', '__call__', '__get__',
+                '__getattribute__', '__subclasshook__', '__new__',
+                '__init__', 'func_globals', 'func_code', 'func_closure',
+                'im_class', 'im_func', 'im_self', 'gi_code', 'gi_frame',
+                '__asteval__')
+
+# inherit these from python's __builtins__
+FROM_PY = ('ArithmeticError', 'AssertionError', 'AttributeError',
+           'BaseException', 'BufferError', 'BytesWarning',
+           'DeprecationWarning', 'EOFError', 'EnvironmentError',
+           'Exception', 'False', 'FloatingPointError', 'GeneratorExit',
+           'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
+           'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
+           'MemoryError', 'NameError', 'None',
+           'NotImplementedError', 'OSError', 'OverflowError',
+           'ReferenceError', 'RuntimeError', 'RuntimeWarning',
+           'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError',
+           'SystemExit', 'True', 'TypeError', 'UnboundLocalError',
+           'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError',
+           'UnicodeTranslateError', 'UnicodeWarning', 'ValueError',
+           'Warning', 'ZeroDivisionError', 'abs', 'all', 'any', 'bin',
+           'bool', 'bytearray', 'bytes', 'chr', 'complex', 'dict', 'dir',
+           'divmod', 'enumerate', 'filter', 'float', 'format', 'frozenset',
+           'hash', 'hex', 'id', 'int', 'isinstance', 'len', 'list', 'map',
+           'max', 'min', 'oct', 'ord', 'pow', 'range', 'repr',
+           'reversed', 'round', 'set', 'slice', 'sorted', 'str', 'sum',
+           'tuple', 'type', 'zip')
+
+# inherit these from python's math
+FROM_MATH = ('acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
+             'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'exp',
+             'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum',
+             'hypot', 'isinf', 'isnan', 'ldexp', 'log', 'log10', 'log1p',
+             'modf', 'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan',
+             'tanh', 'trunc')
+
+FROM_NUMPY = ('Inf', 'NAN', 'abs', 'add', 'alen', 'all', 'amax', 'amin',
+              'angle', 'any', 'append', 'arange', 'arccos', 'arccosh',
+              'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
+              'argmax', 'argmin', 'argsort', 'argwhere', 'around', 'array',
+              'array2string', 'asanyarray', 'asarray', 'asarray_chkfinite',
+              'ascontiguousarray', 'asfarray', 'asfortranarray',
+              'asmatrix', 'asscalar', 'atleast_1d', 'atleast_2d',
+              'atleast_3d', 'average', 'bartlett', 'base_repr',
+              'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor',
+              'blackman', 'bool', 'broadcast', 'broadcast_arrays', 'byte',
+              'c_', 'cdouble', 'ceil', 'cfloat', 'chararray', 'choose',
+              'clip', 'clongdouble', 'clongfloat', 'column_stack',
+              'common_type', 'complex', 'complex128', 'complex64',
+              'complex_', 'complexfloating', 'compress', 'concatenate',
+              'conjugate', 'convolve', 'copy', 'copysign', 'corrcoef',
+              'correlate', 'cos', 'cosh', 'cov', 'cross', 'csingle',
+              'cumprod', 'cumsum', 'datetime_data', 'deg2rad', 'degrees',
+              'delete', 'diag', 'diag_indices', 'diag_indices_from',
+              'diagflat', 'diagonal', 'diff', 'digitize', 'divide', 'dot',
+              'double', 'dsplit', 'dstack', 'dtype', 'e', 'ediff1d',
+              'empty', 'empty_like', 'equal', 'exp', 'exp2', 'expand_dims',
+              'expm1', 'extract', 'eye', 'fabs', 'fill_diagonal', 'finfo',
+              'fix', 'flatiter', 'flatnonzero', 'fliplr', 'flipud',
+              'float', 'float32', 'float64', 'float_', 'floating', 'floor',
+              'floor_divide', 'fmax', 'fmin', 'fmod', 'format_parser',
+              'frexp', 'frombuffer', 'fromfile', 'fromfunction',
+              'fromiter', 'frompyfunc', 'fromregex', 'fromstring', 'fv',
+              'genfromtxt', 'getbufsize', 'geterr', 'gradient', 'greater',
+              'greater_equal', 'hamming', 'hanning', 'histogram',
+              'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot',
+              'i0', 'identity', 'iinfo', 'imag', 'in1d', 'index_exp',
+              'indices', 'inexact', 'inf', 'info', 'infty', 'inner',
+              'insert', 'int', 'int0', 'int16', 'int32', 'int64', 'int8',
+              'int_', 'int_asbuffer', 'intc', 'integer', 'interp',
+              'intersect1d', 'intp', 'invert', 'ipmt', 'irr', 'iscomplex',
+              'iscomplexobj', 'isfinite', 'isfortran', 'isinf', 'isnan',
+              'isneginf', 'isposinf', 'isreal', 'isrealobj', 'isscalar',
+              'issctype', 'iterable', 'ix_', 'kaiser', 'kron', 'ldexp',
+              'left_shift', 'less', 'less_equal', 'linspace',
+              'little_endian', 'load', 'loads', 'loadtxt', 'log', 'log10',
+              'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and',
+              'logical_not', 'logical_or', 'logical_xor', 'logspace',
+              'long', 'longcomplex', 'longdouble', 'longfloat', 'longlong',
+              'mafromtxt', 'mask_indices', 'mat', 'matrix', 'max',
+              'maximum', 'maximum_sctype', 'may_share_memory', 'mean',
+              'median', 'memmap', 'meshgrid', 'mgrid', 'min', 'minimum',
+              'mintypecode', 'mirr', 'mod', 'modf', 'msort', 'multiply',
+              'nan', 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax',
+              'nanmin', 'nansum', 'ndarray', 'ndenumerate', 'ndfromtxt',
+              'ndim', 'ndindex', 'negative', 'newaxis', 'nextafter',
+              'nonzero', 'not_equal', 'nper', 'npv', 'number',
+              'obj2sctype', 'ogrid', 'ones', 'ones_like', 'outer',
+              'packbits', 'percentile', 'pi', 'piecewise', 'place', 'pmt',
+              'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv', 'polyfit',
+              'polyint', 'polymul', 'polysub', 'polyval', 'power', 'ppmt',
+              'prod', 'product', 'ptp', 'put', 'putmask', 'pv', 'r_',
+              'rad2deg', 'radians', 'rank', 'rate', 'ravel', 'real',
+              'real_if_close', 'reciprocal', 'record', 'remainder',
+              'repeat', 'reshape', 'resize', 'restoredot', 'right_shift',
+              'rint', 'roll', 'rollaxis', 'roots', 'rot90', 'round',
+              'round_', 'row_stack', 's_', 'sctype2char', 'searchsorted',
+              'select', 'setbufsize', 'setdiff1d', 'seterr', 'setxor1d',
+              'shape', 'short', 'sign', 'signbit', 'signedinteger', 'sin',
+              'sinc', 'single', 'singlecomplex', 'sinh', 'size',
+              'sometrue', 'sort', 'sort_complex', 'spacing', 'split',
+              'sqrt', 'square', 'squeeze', 'std', 'str', 'str_',
+              'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh',
+              'tensordot', 'tile', 'trace', 'transpose', 'trapz', 'tri',
+              'tril', 'tril_indices', 'tril_indices_from', 'trim_zeros',
+              'triu', 'triu_indices', 'triu_indices_from', 'true_divide',
+              'trunc', 'ubyte', 'uint', 'uint0', 'uint16', 'uint32',
+              'uint64', 'uint8', 'uintc', 'uintp', 'ulonglong', 'union1d',
+              'unique', 'unravel_index', 'unsignedinteger', 'unwrap',
+              'ushort', 'vander', 'var', 'vdot', 'vectorize', 'vsplit',
+              'vstack', 'where', 'who', 'zeros', 'zeros_like')
+
+NUMPY_RENAMES = {'ln': 'log', 'asin': 'arcsin', 'acos': 'arccos',
+                 'atan': 'arctan', 'atan2': 'arctan2', 'atanh':
+                 'arctanh', 'acosh': 'arccosh', 'asinh': 'arcsinh'}
+
+def _open(filename, mode='r', buffering=0):
+    """read only version of open()"""
+    umode = 'r'
+    if mode == 'rb':
+        umode = 'rb'
+    return open(filename, umode, buffering)
+
+LOCALFUNCS = {'open': _open}
+
+OPERATORS = {ast.Is: lambda a, b: a is b,
+             ast.IsNot: lambda a, b: a is not b,
+             ast.In: lambda a, b: a in b,
+             ast.NotIn: lambda a, b: a not in b,
+             ast.Add: lambda a, b: a + b,
+             ast.BitAnd: lambda a, b: a & b,
+             ast.BitOr: lambda a, b: a | b,
+             ast.BitXor: lambda a, b: a ^ b,
+             ast.Div: lambda a, b: a / b,
+             ast.FloorDiv: lambda a, b: a // b,
+             ast.LShift: lambda a, b: a << b,
+             ast.RShift: lambda a, b: a >> b,
+             ast.Mult: lambda a, b: a * b,
+             ast.Pow: lambda a, b: a ** b,
+             ast.Sub: lambda a, b: a - b,
+             ast.Mod: lambda a, b: a % b,
+             ast.And: lambda a, b: a and b,
+             ast.Or: lambda a, b: a or b,
+             ast.Eq: lambda a, b: a == b,
+             ast.Gt: lambda a, b: a > b,
+             ast.GtE: lambda a, b: a >= b,
+             ast.Lt: lambda a, b: a < b,
+             ast.LtE: lambda a, b: a <= b,
+             ast.NotEq: lambda a, b: a != b,
+             ast.Invert: lambda a: ~a,
+             ast.Not: lambda a: not a,
+             ast.UAdd: lambda a: +a,
+             ast.USub: lambda a: -a}
+
+
+def valid_symbol_name(name):
+    """determines whether the input symbol name is a valid name
+
+    This checks for reserved words, and that the name matches the
+    regular expression ``[a-zA-Z_][a-zA-Z0-9_]``
+    """
+    if name in RESERVED_WORDS:
+        return False
+    return NAME_MATCH(name) is not None
+
+
+def op2func(op):
+    "return function for operator nodes"
+    return OPERATORS[op.__class__]
+
+
+class Empty:
+    """empty class"""
+    def __init__(self):
+        pass
+
+    def __nonzero__(self):
+        return False
+
+ReturnedNone = Empty()
+
+
+class ExceptionHolder(object):
+    "basic exception handler"
+    def __init__(self, node, exc=None, msg='', expr=None, lineno=None):
+        self.node = node
+        self.expr = expr
+        self.msg = msg
+        self.exc = exc
+        self.lineno = lineno
+        self.exc_info = exc_info()
+        if self.exc is None and self.exc_info[0] is not None:
+            self.exc = self.exc_info[0]
+        if self.msg is '' and self.exc_info[1] is not None:
+            self.msg = self.exc_info[1]
+
+    def get_error(self):
+        "retrieve error data"
+        col_offset = -1
+        if self.node is not None:
+            try:
+                col_offset = self.node.col_offset
+            except AttributeError:
+                pass
+        try:
+            exc_name = self.exc.__name__
+        except AttributeError:
+            exc_name = str(self.exc)
+        if exc_name in (None, 'None'):
+            exc_name = 'UnknownError'
+
+        out = ["   %s" % self.expr]
+        if col_offset > 0:
+            out.append("    %s^^^" % ((col_offset)*' '))
+        out.append(str(self.msg))
+        return (exc_name, '\n'.join(out))
+
+
+class NameFinder(ast.NodeVisitor):
+    """find all symbol names used by a parsed node"""
+    def __init__(self):
+        self.names = []
+        ast.NodeVisitor.__init__(self)
+
+    def generic_visit(self, node):
+        if node.__class__.__name__ == 'Name':
+            if node.ctx.__class__ == ast.Load and node.id not in self.names:
+                self.names.append(node.id)
+        ast.NodeVisitor.generic_visit(self, node)
+
+def get_ast_names(astnode):
+    "returns symbol Names from an AST node"
+    finder = NameFinder()
+    finder.generic_visit(astnode)
+    return finder.names
diff --git a/lmfit/confidence.py b/lmfit/confidence.py
index d8a2934..b1bce11 100644
--- a/lmfit/confidence.py
+++ b/lmfit/confidence.py
@@ -1,419 +1,416 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-"""
-Contains functions to calculate confidence intervals.
-"""
-from __future__ import print_function
-from warnings import warn
-import numpy as np
-from scipy.stats import f
-from scipy.optimize import brentq
-from .minimizer import MinimizerException
-
-try:
-    from collections import OrderedDict
-except ImportError:
-    from ordereddict import OrderedDict
-
-CONF_ERR_GEN    = 'Cannot determine Confidence Intervals'
-CONF_ERR_STDERR = '%s without sensible uncertainty estimates' % CONF_ERR_GEN
-CONF_ERR_NVARS  = '%s with < 2 variables' % CONF_ERR_GEN
-
-def f_compare(ndata, nparas, new_chi, best_chi, nfix=1.):
-    """
-    Returns the probalitiy for two given parameter sets.
-    nfix is the number of fixed parameters.
-    """
-    nparas = nparas + nfix
-    nfree = ndata - nparas
-    nfix = 1.0*nfix
-    dchi = new_chi / best_chi - 1.0
-    return f.cdf(dchi * nfree / nfix, nfix, nfree)
-
-
-def copy_vals(params):
-    """Saves the values and stderrs of params in temporay dict"""
-    tmp_params = {}
-    for para_key in params:
-        tmp_params[para_key] = (params[para_key].value,
-                                params[para_key].stderr)
-    return tmp_params
-
-
-def restore_vals(tmp_params, params):
-    """Restores values and stderrs of params in temporay dict"""
-    for para_key in params:
-        params[para_key].value, params[para_key].stderr = tmp_params[para_key]
-
-
-def conf_interval(minimizer, result, p_names=None, sigmas=(0.674, 0.95, 0.997),
-                  trace=False, maxiter=200, verbose=False, prob_func=None):
-    r"""Calculates the confidence interval for parameters
-    from the given a MinimizerResult, output from minimize.
-
-    The parameter for which the ci is calculated will be varied, while
-    the remaining parameters are re-optimized for minimizing chi-square.
-    The resulting chi-square is used  to calculate the probability with
-    a given statistic e.g. F-statistic. This function uses a 1d-rootfinder
-    from scipy to find the values resulting in the searched confidence
-    region.
-
-    Parameters
-    ----------
-    minimizer : Minimizer
-        The minimizer to use, holding objective function.
-    result : MinimizerResult
-        The result of running minimize().
-    p_names : list, optional
-        Names of the parameters for which the ci is calculated. If None,
-        the ci is calculated for every parameter.
-    sigmas : list, optional
-        The probabilities (1-alpha) to find. Default is 1,2 and 3-sigma.
-    trace : bool, optional
-        Defaults to False, if true, each result of a probability calculation
-        is saved along with the parameter. This can be used to plot so
-        called "profile traces".
-
-    Returns
-    -------
-    output : dict
-        A dict, which contains a list of (sigma, vals)-tuples for each name.
-    trace_dict : dict
-        Only if trace is set true. Is a dict, the key is the parameter which
-        was fixed.The values are again a dict with the names as keys, but with
-        an additional key 'prob'. Each contains an array of the corresponding
-        values.
-
-    See also
-    --------
-    conf_interval2d
-
-    Other Parameters
-    ----------------
-    maxiter : int
-        Maximum of iteration to find an upper limit.
-    prob_func : ``None`` or callable
-        Function to calculate the probability from the optimized chi-square.
-        Default (``None``) uses built-in f_compare (F test).
-    verbose: bool
-        print extra debuggin information. Default is ``False``.
-
-
-    Examples
-    --------
-
-    >>> from lmfit.printfuncs import *
-    >>> mini = minimize(some_func, params)
-    >>> mini.leastsq()
-    True
-    >>> report_errors(params)
-    ... #report
-    >>> ci = conf_interval(mini)
-    >>> report_ci(ci)
-    ... #report
-
-    Now with quantiles for the sigmas and using the trace.
-
-    >>> ci, trace = conf_interval(mini, sigmas=(0.25, 0.5, 0.75, 0.999), trace=True)
-    >>> fixed = trace['para1']['para1']
-    >>> free = trace['para1']['not_para1']
-    >>> prob = trace['para1']['prob']
-
-    This makes it possible to plot the dependence between free and fixed.
-    """
-    ci = ConfidenceInterval(minimizer, result, p_names, prob_func, sigmas,
-                            trace, verbose, maxiter)
-    output = ci.calc_all_ci()
-    if trace:
-        return output, ci.trace_dict
-    return output
-
-
-def map_trace_to_names(trace, params):
-    "maps trace to param names"
-    out = {}
-    allnames = list(params.keys()) + ['prob']
-    for name in trace.keys():
-        tmp_dict = {}
-        tmp = np.array(trace[name])
-        for para_name, values in zip(allnames, tmp.T):
-            tmp_dict[para_name] = values
-        out[name] = tmp_dict
-    return out
-
-
-class ConfidenceInterval(object):
-    """
-    Class used to calculate the confidence interval.
-    """
-    def __init__(self, minimizer, result, p_names=None, prob_func=None,
-                 sigmas=(0.674, 0.95, 0.997), trace=False, verbose=False,
-                 maxiter=50):
-        """
-
-        """
-        self.verbose = verbose
-        self.minimizer = minimizer
-        self.result = result
-        self.params = result.params
-        self.org = copy_vals(self.params)
-        self.best_chi = result.chisqr
-
-        if p_names is None:
-            p_names = [i for i in self.params if self.params[i].vary]
-
-        self.p_names = p_names
-        self.fit_params = [self.params[p] for p in self.p_names]
-
-        # check that there are at least 2 true variables!
-        # check that all stderrs are sensible (including not None or NaN)
-        nvars = 0
-        for par in self.fit_params:
-            if par.vary:
-                nvars += 1
-            try:
-                if not (par.vary and par.stderr > 0.0):
-                    raise MinimizerException(CONF_ERR_STDERR)
-            except TypeError:
-                raise MinimizerException(CONF_ERR_STDERR)
-        if nvars < 2:
-            raise MinimizerException(CONF_ERR_NVARS)
-
-        if prob_func is None or not hasattr(prob_func, '__call__'):
-            self.prob_func = f_compare
-        if trace:
-            self.trace_dict = dict([(i, []) for i in self.p_names])
-
-        self.trace = trace
-        self.maxiter = maxiter
-        self.min_rel_change = 1e-5
-
-        self.sigmas = list(sigmas)
-        self.sigmas.sort()
-
-    def calc_all_ci(self):
-        """
-        Calculates all cis.
-        """
-        out = OrderedDict()
-
-        for p in self.p_names:
-            out[p] = (self.calc_ci(p, -1)[::-1] +
-                      [(0., self.params[p].value)] +
-                      self.calc_ci(p, 1))
-        if self.trace:
-            self.trace_dict = map_trace_to_names(self.trace_dict,
-                                                 self.params)
-
-        return out
-
-    def calc_ci(self, para, direction):
-        """
-        Calculate the ci for a single parameter for a single direction.
-        Direction is either positive or negative 1.
-        """
-
-        if isinstance(para, str):
-            para = self.params[para]
-
-        #function used to calculate the pro
-        calc_prob = lambda val, prob: self.calc_prob(para, val, prob)
-        if self.trace:
-            x = [i.value for i in self.params.values()]
-            self.trace_dict[para.name].append(x + [0])
-
-        para.vary = False
-        limit, max_prob = self.find_limit(para, direction)
-        start_val = para.value.copy()
-        a_limit = start_val.copy()
-        ret = []
-        orig_warn_settings = np.geterr()
-        np.seterr(all='ignore')
-        for prob in self.sigmas:
-            if prob > max_prob:
-                ret.append((prob, direction*np.inf))
-                continue
-
-            try:
-                val = brentq(calc_prob, a_limit,
-                             limit, rtol=.5e-4, args=prob)
-
-            except ValueError:
-                self.reset_vals()
-                try:
-                    val = brentq(calc_prob, start_val,
-                                 limit, rtol=.5e-4, args=prob)
-                except ValueError:
-                    val = np.nan
-
-            a_limit = val
-            ret.append((prob, val))
-
-        para.vary = True
-        self.reset_vals()
-        np.seterr(**orig_warn_settings)
-        return ret
-
-    def reset_vals(self):
-        restore_vals(self.org, self.params)
-
-    def find_limit(self, para, direction):
-        """
-        For given para, search a value so that prob(val) > sigmas.
-        """
-        if self.verbose:
-            print('Calculating CI for ' + para.name)
-        self.reset_vals()
-
-        #starting steps:
-        if para.stderr > 0 and para.stderr < abs(para.value):
-            step = para.stderr
-        else:
-            step = max(abs(para.value) * 0.2, 0.001)
-        para.vary = False
-        start_val = para.value
-
-        old_prob = 0
-        limit = start_val
-        i = 0
-
-        while old_prob < max(self.sigmas):
-            i = i + 1
-            limit += step * direction
-
-            new_prob = self.calc_prob(para, limit)
-            rel_change = (new_prob - old_prob) / max(new_prob, old_prob, 1.e-12)
-            old_prob = new_prob
-
-            # Check convergence.
-            if i > self.maxiter:
-                errmsg = "Warning, maxiter={0} reached".format(self.maxiter)
-                errmsg += "and prob({0}={1}) = {2} < max(sigmas).".format(para.name, limit, new_prob)
-                warn(errmsg)
-                break
-
-            if rel_change < self.min_rel_change:
-                errmsg = "Warning, rel_change={0} < 0.01 ".format(rel_change)
-                errmsg += " at iteration {3} and prob({0}={1}) = {2} < max(sigmas).".format(para.name, limit, new_prob, i)
-                warn(errmsg)
-                break
-
-        self.reset_vals()
-
-        return limit, new_prob
-
-    def calc_prob(self, para, val, offset=0., restore=False):
-        """Returns the probability for given Value."""
-        if restore:
-            restore_vals(self.org, self.params)
-        para.value = val
-        save_para = self.params[para.name]
-        self.params[para.name] = para
-        self.minimizer.prepare_fit(self.params)
-        out = self.minimizer.leastsq()
-        prob = self.prob_func(out.ndata, out.ndata - out.nfree,
-                              out.chisqr, self.best_chi)
-
-        if self.trace:
-            x = [i.value for i in out.params.values()]
-            self.trace_dict[para.name].append(x + [prob])
-        self.params[para.name] = save_para
-        return prob - offset
-
-def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
-                    limits=None, prob_func=None):
-    r"""Calculates confidence regions for two fixed parameters.
-
-    The method is explained in *conf_interval*: here we are fixing
-    two parameters.
-
-    Parameters
-    ----------
-    minimizer : Minimizer
-        The minimizer to use, holding objective function.
-    result : MinimizerResult
-        The result of running minimize().
-    x_name : string
-        The name of the parameter which will be the x direction.
-    y_name : string
-        The name of the parameter which will be the y direction.
-    nx, ny : ints, optional
-        Number of points.
-    limits : tuple: optional
-        Should have the form ((x_upper, x_lower),(y_upper, y_lower)). If not
-        given, the default is 5 std-errs in each direction.
-
-    Returns
-    -------
-    x : (nx)-array
-        x-coordinates
-    y : (ny)-array
-        y-coordinates
-    grid : (nx,ny)-array
-        grid contains the calculated probabilities.
-
-    Examples
-    --------
-
-    >>> mini = Minimizer(some_func, params)
-    >>> result = mini.leastsq()
-    >>> x, y, gr = conf_interval2d(mini, result, 'para1','para2')
-    >>> plt.contour(x,y,gr)
-
-    Other Parameters
-    ----------------
-    prob_func : ``None`` or callable
-        Function to calculate the probability from the optimized chi-square.
-        Default (``None``) uses built-in f_compare (F test).
-    """
-    # used to detect that .leastsq() has run!
-    params = result.params
-
-    best_chi = result.chisqr
-    org = copy_vals(result.params)
-
-    if prob_func is None or not hasattr(prob_func, '__call__'):
-        prob_func = f_compare
-
-    x = params[x_name]
-    y = params[y_name]
-
-    if limits is None:
-        (x_upper, x_lower) = (x.value + 5 * x.stderr, x.value - 5
-                                                      * x.stderr)
-        (y_upper, y_lower) = (y.value + 5 * y.stderr, y.value - 5
-                                                      * y.stderr)
-    elif len(limits) == 2:
-        (x_upper, x_lower) = limits[0]
-        (y_upper, y_lower) = limits[1]
-
-    x_points = np.linspace(x_lower, x_upper, nx)
-    y_points = np.linspace(y_lower, y_upper, ny)
-    grid = np.dstack(np.meshgrid(x_points, y_points))
-
-    x.vary = False
-    y.vary = False
-
-    def calc_prob(vals, restore=False):
-        if restore:
-            restore_vals(org, result.params)
-        x.value = vals[0]
-        y.value = vals[1]
-        save_x = result.params[x.name]
-        save_y = result.params[y.name]
-        result.params[x.name] = x
-        result.params[y.name] = y
-        minimizer.prepare_fit(params=result.params)
-        out = minimizer.leastsq()
-        prob = prob_func(out.ndata, out.ndata - out.nfree, out.chisqr,
-                         best_chi, nfix=2.)
-        result.params[x.name] = save_x
-        result.params[y.name] = save_y
-        return prob
-
-    out = x_points, y_points, np.apply_along_axis(calc_prob, -1, grid)
-
-    x.vary, y.vary = True, True
-    restore_vals(org, result.params)
-    result.chisqr = best_chi
-    return out
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""
+Contains functions to calculate confidence intervals.
+"""
+from __future__ import print_function
+from warnings import warn
+import numpy as np
+from scipy.stats import f
+from scipy.optimize import brentq
+from .minimizer import MinimizerException
+
+try:
+    from collections import OrderedDict
+except ImportError:
+    from ordereddict import OrderedDict
+
+CONF_ERR_GEN    = 'Cannot determine Confidence Intervals'
+CONF_ERR_STDERR = '%s without sensible uncertainty estimates' % CONF_ERR_GEN
+CONF_ERR_NVARS  = '%s with < 2 variables' % CONF_ERR_GEN
+
+def f_compare(ndata, nparas, new_chi, best_chi, nfix=1.):
+    """
+    Returns the probalitiy for two given parameter sets.
+    nfix is the number of fixed parameters.
+    """
+    nparas = nparas + nfix
+    nfree = ndata - nparas
+    nfix = 1.0*nfix
+    dchi = new_chi / best_chi - 1.0
+    return f.cdf(dchi * nfree / nfix, nfix, nfree)
+
+
+def copy_vals(params):
+    """Saves the values and stderrs of params in temporay dict"""
+    tmp_params = {}
+    for para_key in params:
+        tmp_params[para_key] = (params[para_key].value,
+                                params[para_key].stderr)
+    return tmp_params
+
+
+def restore_vals(tmp_params, params):
+    """Restores values and stderrs of params in temporay dict"""
+    for para_key in params:
+        params[para_key].value, params[para_key].stderr = tmp_params[para_key]
+
+
+def conf_interval(minimizer, result, p_names=None, sigmas=(0.674, 0.95, 0.997),
+                  trace=False, maxiter=200, verbose=False, prob_func=None):
+    """Calculates the confidence interval for parameters
+    from the given a MinimizerResult, output from minimize.
+
+    The parameter for which the ci is calculated will be varied, while
+    the remaining parameters are re-optimized for minimizing chi-square.
+    The resulting chi-square is used  to calculate the probability with
+    a given statistic e.g. F-statistic. This function uses a 1d-rootfinder
+    from scipy to find the values resulting in the searched confidence
+    region.
+
+    Parameters
+    ----------
+    minimizer : Minimizer
+        The minimizer to use, holding objective function.
+    result : MinimizerResult
+        The result of running minimize().
+    p_names : list, optional
+        Names of the parameters for which the ci is calculated. If None,
+        the ci is calculated for every parameter.
+    sigmas : list, optional
+        The probabilities (1-alpha) to find. Default is 1,2 and 3-sigma.
+    trace : bool, optional
+        Defaults to False, if true, each result of a probability calculation
+        is saved along with the parameter. This can be used to plot so
+        called "profile traces".
+    maxiter : int
+        Maximum of iteration to find an upper limit. Default is 200.
+    prob_func : ``None`` or callable
+        Function to calculate the probability from the optimized chi-square.
+        Default (``None``) uses built-in f_compare (F test).
+    verbose: bool
+        print extra debuging information. Default is ``False``.
+
+
+    Returns
+    -------
+    output : dict
+        A dict, which contains a list of (sigma, vals)-tuples for each name.
+    trace_dict : dict
+        Only if trace is set true. Is a dict, the key is the parameter which
+        was fixed. The values are again a dict with the names as keys, but with
+        an additional key 'prob'. Each contains an array of the corresponding
+        values.
+
+    See also
+    --------
+    conf_interval2d
+
+    Examples
+    --------
+
+    >>> from lmfit.printfuncs import *
+    >>> mini = minimize(some_func, params)
+    >>> mini.leastsq()
+    True
+    >>> report_errors(params)
+    ... #report
+    >>> ci = conf_interval(mini)
+    >>> report_ci(ci)
+    ... #report
+
+    Now with quantiles for the sigmas and using the trace.
+
+    >>> ci, trace = conf_interval(mini, sigmas=(0.25, 0.5, 0.75, 0.999), trace=True)
+    >>> fixed = trace['para1']['para1']
+    >>> free = trace['para1']['not_para1']
+    >>> prob = trace['para1']['prob']
+
+    This makes it possible to plot the dependence between free and fixed.
+
+    """
+    ci = ConfidenceInterval(minimizer, result, p_names, prob_func, sigmas,
+                            trace, verbose, maxiter)
+    output = ci.calc_all_ci()
+    if trace:
+        return output, ci.trace_dict
+    return output
+
+
+def map_trace_to_names(trace, params):
+    "maps trace to param names"
+    out = {}
+    allnames = list(params.keys()) + ['prob']
+    for name in trace.keys():
+        tmp_dict = {}
+        tmp = np.array(trace[name])
+        for para_name, values in zip(allnames, tmp.T):
+            tmp_dict[para_name] = values
+        out[name] = tmp_dict
+    return out
+
+
+class ConfidenceInterval(object):
+    """
+    Class used to calculate the confidence interval.
+    """
+    def __init__(self, minimizer, result, p_names=None, prob_func=None,
+                 sigmas=(0.674, 0.95, 0.997), trace=False, verbose=False,
+                 maxiter=50):
+        """
+
+        """
+        self.verbose = verbose
+        self.minimizer = minimizer
+        self.result = result
+        self.params = result.params
+        self.org = copy_vals(self.params)
+        self.best_chi = result.chisqr
+
+        if p_names is None:
+            p_names = [i for i in self.params if self.params[i].vary]
+
+        self.p_names = p_names
+        self.fit_params = [self.params[p] for p in self.p_names]
+
+        # check that there are at least 2 true variables!
+        # check that all stderrs are sensible (including not None or NaN)
+        nvars = 0
+        for par in self.fit_params:
+            if par.vary:
+                nvars += 1
+            try:
+                if not (par.vary and par.stderr > 0.0):
+                    raise MinimizerException(CONF_ERR_STDERR)
+            except TypeError:
+                raise MinimizerException(CONF_ERR_STDERR)
+        if nvars < 2:
+            raise MinimizerException(CONF_ERR_NVARS)
+
+        if prob_func is None or not hasattr(prob_func, '__call__'):
+            self.prob_func = f_compare
+        if trace:
+            self.trace_dict = dict([(i, []) for i in self.p_names])
+
+        self.trace = trace
+        self.maxiter = maxiter
+        self.min_rel_change = 1e-5
+
+        self.sigmas = list(sigmas)
+        self.sigmas.sort()
+
+    def calc_all_ci(self):
+        """
+        Calculates all cis.
+        """
+        out = OrderedDict()
+
+        for p in self.p_names:
+            out[p] = (self.calc_ci(p, -1)[::-1] +
+                      [(0., self.params[p].value)] +
+                      self.calc_ci(p, 1))
+        if self.trace:
+            self.trace_dict = map_trace_to_names(self.trace_dict,
+                                                 self.params)
+
+        return out
+
+    def calc_ci(self, para, direction):
+        """
+        Calculate the ci for a single parameter for a single direction.
+        Direction is either positive or negative 1.
+        """
+
+        if isinstance(para, str):
+            para = self.params[para]
+
+        #function used to calculate the pro
+        calc_prob = lambda val, prob: self.calc_prob(para, val, prob)
+        if self.trace:
+            x = [i.value for i in self.params.values()]
+            self.trace_dict[para.name].append(x + [0])
+
+        para.vary = False
+        limit, max_prob = self.find_limit(para, direction)
+        start_val = a_limit = float(para.value)
+        ret = []
+        orig_warn_settings = np.geterr()
+        np.seterr(all='ignore')
+        for prob in self.sigmas:
+            if prob > max_prob:
+                ret.append((prob, direction*np.inf))
+                continue
+
+            try:
+                val = brentq(calc_prob, a_limit,
+                             limit, rtol=.5e-4, args=prob)
+
+            except ValueError:
+                self.reset_vals()
+                try:
+                    val = brentq(calc_prob, start_val,
+                                 limit, rtol=.5e-4, args=prob)
+                except ValueError:
+                    val = np.nan
+
+            a_limit = val
+            ret.append((prob, val))
+
+        para.vary = True
+        self.reset_vals()
+        np.seterr(**orig_warn_settings)
+        return ret
+
+    def reset_vals(self):
+        restore_vals(self.org, self.params)
+
+    def find_limit(self, para, direction):
+        """
+        For given para, search a value so that prob(val) > sigmas.
+        """
+        if self.verbose:
+            print('Calculating CI for ' + para.name)
+        self.reset_vals()
+
+        #starting steps:
+        if para.stderr > 0 and para.stderr < abs(para.value):
+            step = para.stderr
+        else:
+            step = max(abs(para.value) * 0.2, 0.001)
+        para.vary = False
+        start_val = para.value
+
+        old_prob = 0
+        limit = start_val
+        i = 0
+
+        while old_prob < max(self.sigmas):
+            i = i + 1
+            limit += step * direction
+
+            new_prob = self.calc_prob(para, limit)
+            rel_change = (new_prob - old_prob) / max(new_prob, old_prob, 1.e-12)
+            old_prob = new_prob
+
+            # Check convergence.
+            if i > self.maxiter:
+                errmsg = "Warning, maxiter={0} reached".format(self.maxiter)
+                errmsg += "and prob({0}={1}) = {2} < max(sigmas).".format(para.name, limit, new_prob)
+                warn(errmsg)
+                break
+
+            if rel_change < self.min_rel_change:
+                errmsg = "Warning, rel_change={0} < 0.01 ".format(rel_change)
+                errmsg += " at iteration {3} and prob({0}={1}) = {2} < max(sigmas).".format(para.name, limit, new_prob, i)
+                warn(errmsg)
+                break
+
+        self.reset_vals()
+
+        return limit, new_prob
+
+    def calc_prob(self, para, val, offset=0., restore=False):
+        """Returns the probability for given Value."""
+        if restore:
+            restore_vals(self.org, self.params)
+        para.value = val
+        save_para = self.params[para.name]
+        self.params[para.name] = para
+        self.minimizer.prepare_fit(self.params)
+        out = self.minimizer.leastsq()
+        prob = self.prob_func(out.ndata, out.ndata - out.nfree,
+                              out.chisqr, self.best_chi)
+
+        if self.trace:
+            x = [i.value for i in out.params.values()]
+            self.trace_dict[para.name].append(x + [prob])
+        self.params[para.name] = save_para
+        return prob - offset
+
+def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
+                    limits=None, prob_func=None):
+    r"""Calculates confidence regions for two fixed parameters.
+
+    The method is explained in *conf_interval*: here we are fixing
+    two parameters.
+
+    Parameters
+    ----------
+    minimizer : Minimizer
+        The minimizer to use, holding objective function.
+    result : MinimizerResult
+        The result of running minimize().
+    x_name : string
+        The name of the parameter which will be the x direction.
+    y_name : string
+        The name of the parameter which will be the y direction.
+    nx, ny : ints, optional
+        Number of points.
+    limits : tuple: optional
+        Should have the form ((x_upper, x_lower),(y_upper, y_lower)). If not
+        given, the default is 5 std-errs in each direction.
+
+    Returns
+    -------
+    x : (nx)-array
+        x-coordinates
+    y : (ny)-array
+        y-coordinates
+    grid : (nx,ny)-array
+        grid contains the calculated probabilities.
+
+    Examples
+    --------
+
+    >>> mini = Minimizer(some_func, params)
+    >>> result = mini.leastsq()
+    >>> x, y, gr = conf_interval2d(mini, result, 'para1','para2')
+    >>> plt.contour(x,y,gr)
+
+    Other Parameters
+    ----------------
+    prob_func : ``None`` or callable
+        Function to calculate the probability from the optimized chi-square.
+        Default (``None``) uses built-in f_compare (F test).
+    """
+    # used to detect that .leastsq() has run!
+    params = result.params
+
+    best_chi = result.chisqr
+    org = copy_vals(result.params)
+
+    if prob_func is None or not hasattr(prob_func, '__call__'):
+        prob_func = f_compare
+
+    x = params[x_name]
+    y = params[y_name]
+
+    if limits is None:
+        (x_upper, x_lower) = (x.value + 5 * x.stderr, x.value - 5
+                                                      * x.stderr)
+        (y_upper, y_lower) = (y.value + 5 * y.stderr, y.value - 5
+                                                      * y.stderr)
+    elif len(limits) == 2:
+        (x_upper, x_lower) = limits[0]
+        (y_upper, y_lower) = limits[1]
+
+    x_points = np.linspace(x_lower, x_upper, nx)
+    y_points = np.linspace(y_lower, y_upper, ny)
+    grid = np.dstack(np.meshgrid(x_points, y_points))
+
+    x.vary = False
+    y.vary = False
+
+    def calc_prob(vals, restore=False):
+        if restore:
+            restore_vals(org, result.params)
+        x.value = vals[0]
+        y.value = vals[1]
+        save_x = result.params[x.name]
+        save_y = result.params[y.name]
+        result.params[x.name] = x
+        result.params[y.name] = y
+        minimizer.prepare_fit(params=result.params)
+        out = minimizer.leastsq()
+        prob = prob_func(out.ndata, out.ndata - out.nfree, out.chisqr,
+                         best_chi, nfix=2.)
+        result.params[x.name] = save_x
+        result.params[y.name] = save_y
+        return prob
+
+    out = x_points, y_points, np.apply_along_axis(calc_prob, -1, grid)
+
+    x.vary, y.vary = True, True
+    restore_vals(org, result.params)
+    result.chisqr = best_chi
+    return out
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
index a71bb3f..6c55279 100644
--- a/lmfit/lineshapes.py
+++ b/lmfit/lineshapes.py
@@ -1,286 +1,286 @@
-#!/usr/bin/env python
-"""
-basic model line shapes and distribution functions
-"""
-from __future__ import division
-from numpy import (pi, log, exp, sqrt, arctan, cos, where)
-from numpy.testing import assert_allclose
-
-from scipy.special import gamma as gamfcn
-from scipy.special import gammaln, erf, erfc, wofz
-
-log2 = log(2)
-s2pi = sqrt(2*pi)
-spi  = sqrt(pi)
-s2   = sqrt(2.0)
-
-functions = ('gaussian', 'lorentzian', 'voigt', 'pvoigt', 'moffat', 'pearson7',
-             'breit_wigner', 'damped_oscillator', 'logistic', 'lognormal',
-             'students_t', 'expgaussian', 'donaich', 'skewed_gaussian',
-             'skewed_voigt', 'step', 'rectangle', 'erf', 'erfc', 'wofz',
-             'gamma', 'gammaln', 'exponential', 'powerlaw', 'linear',
-             'parabolic')
-
-def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):
-    """1 dimensional gaussian:
-    gaussian(x, amplitude, center, sigma)
-    """
-    return (amplitude/(s2pi*sigma)) * exp(-(1.0*x-center)**2 /(2*sigma**2))
-
-def lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0):
-    """1 dimensional lorentzian
-    lorentzian(x, amplitude, center, sigma)
-    """
-    return (amplitude/(1 + ((1.0*x-center)/sigma)**2) ) / (pi*sigma)
-
-def voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None):
-    """1 dimensional voigt function.
-    see http://en.wikipedia.org/wiki/Voigt_profile
-    """
-    if gamma is None:
-        gamma = sigma
-    z = (x-center + 1j*gamma)/ (sigma*s2)
-    return amplitude*wofz(z).real / (sigma*s2pi)
-
-def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):
-    """1 dimensional pseudo-voigt:
-    pvoigt(x, amplitude, center, sigma, fraction)
-       = amplitude*(1-fraction)*gaussion(x, center, sigma_g) +
-         amplitude*fraction*lorentzian(x, center, sigma)
-
-    where sigma_g (the sigma for the Gaussian component) is
-
-        sigma_g = sigma / sqrt(2*log(2)) ~= sigma / 1.17741
-
-    so that the Gaussian and Lorentzian components have the
-    same FWHM of 2*sigma.
-    """
-    sigma_g = sigma / sqrt(2*log2)
-    return ((1-fraction)*gaussian(x, amplitude, center, sigma_g) +
-               fraction*lorentzian(x, amplitude, center, sigma))
-
-def moffat(x, amplitude=1, center=0., sigma=1, beta=1.):
-    """ 1 dimensional moffat function:
-
-    moffat(amplitude, center, sigma, beta) = amplitude / (((x - center)/sigma)**2 + 1)**beta
-    """
-    return amplitude / (((x - center)/sigma)**2 + 1)**beta
-
-def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0):
-    """pearson7 lineshape, using the wikipedia definition:
-
-    pearson7(x, center, sigma, expon) =
-      amplitude*(1+arg**2)**(-expon)/(sigma*beta(expon-0.5, 0.5))
-
-    where arg = (x-center)/sigma
-    and beta() is the beta function.
-    """
-    arg = (x-center)/sigma
-    scale = amplitude * gamfcn(expon)/(gamfcn(0.5)*gamfcn(expon-0.5))
-    return  scale*(1+arg**2)**(-expon)/sigma
-
-def breit_wigner(x, amplitude=1.0, center=0.0, sigma=1.0, q=1.0):
-    """Breit-Wigner-Fano lineshape:
-       = amplitude*(q*sigma/2 + x - center)**2 / ( (sigma/2)**2 + (x - center)**2 )
-    """
-    gam = sigma/2.0
-    return  amplitude*(q*gam + x - center)**2 / (gam*gam + (x-center)**2)
-
-def damped_oscillator(x, amplitude=1.0, center=1., sigma=0.1):
-    """amplitude for a damped harmonic oscillator
-    amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
-    """
-    center = max(1.e-9, abs(center))
-    return (amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
-
-def logistic(x, amplitude=1., center=0., sigma=1.):
-    """Logistic lineshape (yet another sigmoidal curve)
-        = amplitude*(1.  - 1. / (1 + exp((x-center)/sigma)))
-    """
-    return amplitude*(1. - 1./(1. + exp((x-center)/sigma)))
-
-def lognormal(x, amplitude=1.0, center=0., sigma=1):
-    """log-normal function
-    lognormal(x, center, sigma)
-        = (amplitude/x) * exp(-(ln(x) - center)/ (2* sigma**2))
-    """
-    x[where(x<=1.e-19)] = 1.e-19
-    return (amplitude/(x*sigma*s2pi)) * exp(-(log(x)-center)**2/ (2* sigma**2))
-
-def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
-    """Student's t distribution:
-        gamma((sigma+1)/2)   (1 + (x-center)**2/sigma)^(-(sigma+1)/2)
-     =  -------------------------
-        sqrt(sigma*pi)gamma(sigma/2)
-
-    """
-    s1  = (sigma+1)/2.0
-    denom = (sqrt(sigma*pi)*gamfcn(sigma/2))
-    return amplitude*(1 + (x-center)**2/sigma)**(-s1) * gamfcn(s1) / denom
-
-
-def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
-    """exponentially modified Gaussian
-
-    = (gamma/2) exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] *
-                erfc[(center + gamma*sigma**2 - x)/(sqrt(2)*sigma)]
-
-    http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
-    """
-    gss = gamma*sigma*sigma
-    arg1 = gamma*(center +gss/2.0 - x)
-    arg2 = (center + gss - x)/(s2*sigma)
-    return amplitude*(gamma/2) * exp(arg1) * erfc(arg2)
-
-def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
-    """Doniach Sunjic asymmetric lineshape, used for photo-emission
-
-    = amplitude* cos(pi*gamma/2 + (1-gamma) arctan((x-center)/sigma) /
-                      (sigma**2 + (x-center)**2)**[(1-gamma)/2]
-
-    see http://www.casaxps.com/help_manual/line_shapes.htm
-    """
-    arg = (x-center)/sigma
-    gm1 = (1.0 - gamma)
-    scale = amplitude/(sigma**gm1)
-    return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2)
-
-def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0):
-    """Gaussian, skewed with error function, equal to
-
-     gaussian(x, center, sigma)*(1+erf(beta*(x-center)))
-
-    with beta = gamma/(sigma*sqrt(2))
-
-    with  gamma < 0:  tail to low value of centroid
-          gamma > 0:  tail to high value of centroid
-
-    see http://en.wikipedia.org/wiki/Skew_normal_distribution
-    """
-    asym = 1 + erf(gamma*(x-center)/(s2*sigma))
-    return asym * gaussian(x, amplitude, center, sigma)
-
-def skewed_voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None, skew=0.0):
-    """Skewed Voigt lineshape, skewed with error function
-    useful for ad-hoc Compton scatter profile
-
-    with beta = skew/(sigma*sqrt(2))
-    = voigt(x, center, sigma, gamma)*(1+erf(beta*(x-center)))
-
-    skew < 0:  tail to low value of centroid
-    skew > 0:  tail to high value of centroid
-
-    see http://en.wikipedia.org/wiki/Skew_normal_distribution
-    """
-    beta = skew/(s2*sigma)
-    asym = 1 + erf(beta*(x-center))
-    return asym * voigt(x, amplitude, center, sigma, gamma=gamma)
-
-def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
-    """step function:
-    starts at 0.0, ends at amplitude, with half-max at center, and
-    rising with form:
-      'linear' (default) = amplitude * min(1, max(0, arg))
-      'atan', 'arctan'   = amplitude * (0.5 + atan(arg)/pi)
-      'erf'              = amplitude * (1 + erf(arg))/2.0
-      'logistic'         = amplitude * [1 - 1/(1 + exp(arg))]
-
-    where arg = (x - center)/sigma
-    """
-    if abs(sigma) <  1.e-13:
-        sigma = 1.e-13
-
-    out = (x - center)/sigma
-    if form == 'erf':
-        out = 0.5*(1 + erf(out))
-    elif form.startswith('logi'):
-        out = (1. - 1./(1. + exp(out)))
-    elif form in ('atan', 'arctan'):
-        out = 0.5 + arctan(out)/pi
-    else:
-        out[where(out < 0)] = 0.0
-        out[where(out > 1)] = 1.0
-    return amplitude*out
-
-def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
-              center2=1.0, sigma2=1.0, form='linear'):
-    """rectangle function: step up, step down  (see step function)
-    starts at 0.0, rises to amplitude (at center1 with width sigma1)
-    then drops to 0.0 (at center2 with width sigma2) with form:
-      'linear' (default) = ramp_up + ramp_down
-      'atan', 'arctan'   = amplitude*(atan(arg1) + atan(arg2))/pi
-      'erf'              = amplitude*(erf(arg1) + erf(arg2))/2.
-      'logisitic'        = amplitude*[1 - 1/(1 + exp(arg1)) - 1/(1+exp(arg2))]
-
-    where arg1 =  (x - center1)/sigma1
-    and   arg2 = -(x - center2)/sigma2
-    """
-    if abs(sigma1) <  1.e-13:
-        sigma1 = 1.e-13
-    if abs(sigma2) <  1.e-13:
-        sigma2 = 1.e-13
-
-    arg1 = (x - center1)/sigma1
-    arg2 = (center2 - x)/sigma2
-    if form == 'erf':
-        out = 0.5*(erf(arg1) + erf(arg2))
-    elif form.startswith('logi'):
-        out = (1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2)))
-    elif form in ('atan', 'arctan'):
-        out = (arctan(arg1) + arctan(arg2))/pi
-    else:
-        arg1[where(arg1 <  0)]  = 0.0
-        arg1[where(arg1 >  1)]  = 1.0
-        arg2[where(arg2 >  0)]  = 0.0
-        arg2[where(arg2 < -1)] = -1.0
-        out = arg1 + arg2
-    return amplitude*out
-
-def _erf(x):
-    """error function.  = 2/sqrt(pi)*integral(exp(-t**2), t=[0, z])"""
-    return erf(x)
-
-def _erfc(x):
-    """complented error function.  = 1 - erf(x)"""
-    return erfc(x)
-
-def _wofz(x):
-    """fadeeva function for complex argument. = exp(-x**2)*erfc(-i*x)"""
-    return wofz(x)
-
-def _gamma(x):
-    """gamma function"""
-    return gamfcn(x)
-
-def _gammaln(x):
-    """log of absolute value of gamma function"""
-    return gammaln(x)
-
-
-def exponential(x, amplitude=1, decay=1):
-    "x -> amplitude * exp(-x/decay)"
-    return amplitude * exp(-x/decay)
-
-
-def powerlaw(x, amplitude=1, exponent=1.0):
-    "x -> amplitude * x**exponent"
-    return amplitude * x**exponent
-
-
-def linear(x, slope, intercept):
-    "x -> slope * x + intercept"
-    return slope * x + intercept
-
-
-def parabolic(x, a, b, c):
-    "x -> a * x**2 + b * x + c"
-    return a * x**2 + b * x + c
-
-
-def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
-                         err_msg='', verbose=True):
-    """returns whether all parameter values in actual are close to
-    those in desired"""
-    for param_name, value in desired.items():
-        assert_allclose(actual[param_name], value, rtol,
-                        atol, err_msg, verbose)
+#!/usr/bin/env python
+"""
+basic model line shapes and distribution functions
+"""
+from __future__ import division
+from numpy import (pi, log, exp, sqrt, arctan, cos, where)
+from numpy.testing import assert_allclose
+
+from scipy.special import gamma as gamfcn
+from scipy.special import gammaln, erf, erfc, wofz
+
+log2 = log(2)
+s2pi = sqrt(2*pi)
+spi  = sqrt(pi)
+s2   = sqrt(2.0)
+
+functions = ('gaussian', 'lorentzian', 'voigt', 'pvoigt', 'moffat', 'pearson7',
+             'breit_wigner', 'damped_oscillator', 'logistic', 'lognormal',
+             'students_t', 'expgaussian', 'donaich', 'skewed_gaussian',
+             'skewed_voigt', 'step', 'rectangle', 'erf', 'erfc', 'wofz',
+             'gamma', 'gammaln', 'exponential', 'powerlaw', 'linear',
+             'parabolic')
+
+def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):
+    """1 dimensional gaussian:
+    gaussian(x, amplitude, center, sigma)
+    """
+    return (amplitude/(s2pi*sigma)) * exp(-(1.0*x-center)**2 /(2*sigma**2))
+
+def lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0):
+    """1 dimensional lorentzian
+    lorentzian(x, amplitude, center, sigma)
+    """
+    return (amplitude/(1 + ((1.0*x-center)/sigma)**2) ) / (pi*sigma)
+
+def voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None):
+    """1 dimensional voigt function.
+    see http://en.wikipedia.org/wiki/Voigt_profile
+    """
+    if gamma is None:
+        gamma = sigma
+    z = (x-center + 1j*gamma)/ (sigma*s2)
+    return amplitude*wofz(z).real / (sigma*s2pi)
+
+def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):
+    """1 dimensional pseudo-voigt:
+    pvoigt(x, amplitude, center, sigma, fraction)
+       = amplitude*(1-fraction)*gaussion(x, center, sigma_g) +
+         amplitude*fraction*lorentzian(x, center, sigma)
+
+    where sigma_g (the sigma for the Gaussian component) is
+
+        sigma_g = sigma / sqrt(2*log(2)) ~= sigma / 1.17741
+
+    so that the Gaussian and Lorentzian components have the
+    same FWHM of 2*sigma.
+    """
+    sigma_g = sigma / sqrt(2*log2)
+    return ((1-fraction)*gaussian(x, amplitude, center, sigma_g) +
+               fraction*lorentzian(x, amplitude, center, sigma))
+
+def moffat(x, amplitude=1, center=0., sigma=1, beta=1.):
+    """ 1 dimensional moffat function:
+
+    moffat(amplitude, center, sigma, beta) = amplitude / (((x - center)/sigma)**2 + 1)**beta
+    """
+    return amplitude / (((x - center)/sigma)**2 + 1)**beta
+
+def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0):
+    """pearson7 lineshape, using the wikipedia definition:
+
+    pearson7(x, center, sigma, expon) =
+      amplitude*(1+arg**2)**(-expon)/(sigma*beta(expon-0.5, 0.5))
+
+    where arg = (x-center)/sigma
+    and beta() is the beta function.
+    """
+    arg = (x-center)/sigma
+    scale = amplitude * gamfcn(expon)/(gamfcn(0.5)*gamfcn(expon-0.5))
+    return  scale*(1+arg**2)**(-expon)/sigma
+
+def breit_wigner(x, amplitude=1.0, center=0.0, sigma=1.0, q=1.0):
+    """Breit-Wigner-Fano lineshape:
+       = amplitude*(q*sigma/2 + x - center)**2 / ( (sigma/2)**2 + (x - center)**2 )
+    """
+    gam = sigma/2.0
+    return  amplitude*(q*gam + x - center)**2 / (gam*gam + (x-center)**2)
+
+def damped_oscillator(x, amplitude=1.0, center=1., sigma=0.1):
+    """amplitude for a damped harmonic oscillator
+    amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
+    """
+    center = max(1.e-9, abs(center))
+    return (amplitude/sqrt( (1.0 - (x/center)**2)**2 + (2*sigma*x/center)**2))
+
+def logistic(x, amplitude=1., center=0., sigma=1.):
+    """Logistic lineshape (yet another sigmoidal curve)
+        = amplitude*(1.  - 1. / (1 + exp((x-center)/sigma)))
+    """
+    return amplitude*(1. - 1./(1. + exp((x-center)/sigma)))
+
+def lognormal(x, amplitude=1.0, center=0., sigma=1):
+    """log-normal function
+    lognormal(x, center, sigma)
+        = (amplitude/x) * exp(-(ln(x) - center)/ (2* sigma**2))
+    """
+    x[where(x<=1.e-19)] = 1.e-19
+    return (amplitude/(x*sigma*s2pi)) * exp(-(log(x)-center)**2/ (2* sigma**2))
+
+def students_t(x, amplitude=1.0, center=0.0, sigma=1.0):
+    """Student's t distribution:
+        gamma((sigma+1)/2)   (1 + (x-center)**2/sigma)^(-(sigma+1)/2)
+     =  -------------------------
+        sqrt(sigma*pi)gamma(sigma/2)
+
+    """
+    s1  = (sigma+1)/2.0
+    denom = (sqrt(sigma*pi)*gamfcn(sigma/2))
+    return amplitude*(1 + (x-center)**2/sigma)**(-s1) * gamfcn(s1) / denom
+
+
+def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
+    """exponentially modified Gaussian
+
+    = (gamma/2) exp[center*gamma + (gamma*sigma)**2/2 - gamma*x] *
+                erfc[(center + gamma*sigma**2 - x)/(sqrt(2)*sigma)]
+
+    http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
+    """
+    gss = gamma*sigma*sigma
+    arg1 = gamma*(center +gss/2.0 - x)
+    arg2 = (center + gss - x)/(s2*sigma)
+    return amplitude*(gamma/2) * exp(arg1) * erfc(arg2)
+
+def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
+    """Doniach Sunjic asymmetric lineshape, used for photo-emission
+
+    = amplitude* cos(pi*gamma/2 + (1-gamma) arctan((x-center)/sigma) /
+                      (sigma**2 + (x-center)**2)**[(1-gamma)/2]
+
+    see http://www.casaxps.com/help_manual/line_shapes.htm
+    """
+    arg = (x-center)/sigma
+    gm1 = (1.0 - gamma)
+    scale = amplitude/(sigma**gm1)
+    return scale*cos(pi*gamma/2 + gm1*arctan(arg))/(1 + arg**2)**(gm1/2)
+
+def skewed_gaussian(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=0.0):
+    """Gaussian, skewed with error function, equal to
+
+     gaussian(x, center, sigma)*(1+erf(beta*(x-center)))
+
+    with beta = gamma/(sigma*sqrt(2))
+
+    with  gamma < 0:  tail to low value of centroid
+          gamma > 0:  tail to high value of centroid
+
+    see http://en.wikipedia.org/wiki/Skew_normal_distribution
+    """
+    asym = 1 + erf(gamma*(x-center)/(s2*sigma))
+    return asym * gaussian(x, amplitude, center, sigma)
+
+def skewed_voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None, skew=0.0):
+    """Skewed Voigt lineshape, skewed with error function
+    useful for ad-hoc Compton scatter profile
+
+    with beta = skew/(sigma*sqrt(2))
+    = voigt(x, center, sigma, gamma)*(1+erf(beta*(x-center)))
+
+    skew < 0:  tail to low value of centroid
+    skew > 0:  tail to high value of centroid
+
+    see http://en.wikipedia.org/wiki/Skew_normal_distribution
+    """
+    beta = skew/(s2*sigma)
+    asym = 1 + erf(beta*(x-center))
+    return asym * voigt(x, amplitude, center, sigma, gamma=gamma)
+
+def step(x, amplitude=1.0, center=0.0, sigma=1.0, form='linear'):
+    """step function:
+    starts at 0.0, ends at amplitude, with half-max at center, and
+    rising with form:
+      'linear' (default) = amplitude * min(1, max(0, arg))
+      'atan', 'arctan'   = amplitude * (0.5 + atan(arg)/pi)
+      'erf'              = amplitude * (1 + erf(arg))/2.0
+      'logistic'         = amplitude * [1 - 1/(1 + exp(arg))]
+
+    where arg = (x - center)/sigma
+    """
+    if abs(sigma) <  1.e-13:
+        sigma = 1.e-13
+
+    out = (x - center)/sigma
+    if form == 'erf':
+        out = 0.5*(1 + erf(out))
+    elif form.startswith('logi'):
+        out = (1. - 1./(1. + exp(out)))
+    elif form in ('atan', 'arctan'):
+        out = 0.5 + arctan(out)/pi
+    else:
+        out[where(out < 0)] = 0.0
+        out[where(out > 1)] = 1.0
+    return amplitude*out
+
+def rectangle(x, amplitude=1.0, center1=0.0, sigma1=1.0,
+              center2=1.0, sigma2=1.0, form='linear'):
+    """rectangle function: step up, step down  (see step function)
+    starts at 0.0, rises to amplitude (at center1 with width sigma1)
+    then drops to 0.0 (at center2 with width sigma2) with form:
+      'linear' (default) = ramp_up + ramp_down
+      'atan', 'arctan'   = amplitude*(atan(arg1) + atan(arg2))/pi
+      'erf'              = amplitude*(erf(arg1) + erf(arg2))/2.
+      'logisitic'        = amplitude*[1 - 1/(1 + exp(arg1)) - 1/(1+exp(arg2))]
+
+    where arg1 =  (x - center1)/sigma1
+    and   arg2 = -(x - center2)/sigma2
+    """
+    if abs(sigma1) <  1.e-13:
+        sigma1 = 1.e-13
+    if abs(sigma2) <  1.e-13:
+        sigma2 = 1.e-13
+
+    arg1 = (x - center1)/sigma1
+    arg2 = (center2 - x)/sigma2
+    if form == 'erf':
+        out = 0.5*(erf(arg1) + erf(arg2))
+    elif form.startswith('logi'):
+        out = (1. - 1./(1. + exp(arg1)) - 1./(1. + exp(arg2)))
+    elif form in ('atan', 'arctan'):
+        out = (arctan(arg1) + arctan(arg2))/pi
+    else:
+        arg1[where(arg1 <  0)]  = 0.0
+        arg1[where(arg1 >  1)]  = 1.0
+        arg2[where(arg2 >  0)]  = 0.0
+        arg2[where(arg2 < -1)] = -1.0
+        out = arg1 + arg2
+    return amplitude*out
+
+def _erf(x):
+    """error function.  = 2/sqrt(pi)*integral(exp(-t**2), t=[0, z])"""
+    return erf(x)
+
+def _erfc(x):
+    """complented error function.  = 1 - erf(x)"""
+    return erfc(x)
+
+def _wofz(x):
+    """fadeeva function for complex argument. = exp(-x**2)*erfc(-i*x)"""
+    return wofz(x)
+
+def _gamma(x):
+    """gamma function"""
+    return gamfcn(x)
+
+def _gammaln(x):
+    """log of absolute value of gamma function"""
+    return gammaln(x)
+
+
+def exponential(x, amplitude=1, decay=1):
+    "x -> amplitude * exp(-x/decay)"
+    return amplitude * exp(-x/decay)
+
+
+def powerlaw(x, amplitude=1, exponent=1.0):
+    "x -> amplitude * x**exponent"
+    return amplitude * x**exponent
+
+
+def linear(x, slope, intercept):
+    "x -> slope * x + intercept"
+    return slope * x + intercept
+
+
+def parabolic(x, a, b, c):
+    "x -> a * x**2 + b * x + c"
+    return a * x**2 + b * x + c
+
+
+def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
+                         err_msg='', verbose=True):
+    """returns whether all parameter values in actual are close to
+    those in desired"""
+    for param_name, value in desired.items():
+        assert_allclose(actual[param_name], value, rtol,
+                        atol, err_msg, verbose)
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index 7fca545..c67a396 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -1,774 +1,1282 @@
-"""
-Simple minimizer is a wrapper around scipy.leastsq, allowing a
-user to build a fitting model as a function of general purpose
-Fit Parameters that can be fixed or floated, bounded, and written
-as a simple expression of other Fit Parameters.
-
-The user sets up a model in terms of instance of Parameters, writes a
-function-to-be-minimized (residual function) in terms of these Parameters.
-
-   Copyright (c) 2011 Matthew Newville, The University of Chicago
-   <newville at cars.uchicago.edu>
-"""
-
-from copy import deepcopy
-import numpy as np
-from numpy import (dot, eye, ndarray, ones_like,
-                   sqrt, take, transpose, triu, deprecate)
-from numpy.dual import inv
-from numpy.linalg import LinAlgError
-
-from scipy.optimize import leastsq as scipy_leastsq
-from scipy.optimize import fmin as scipy_fmin
-from scipy.optimize.lbfgsb import fmin_l_bfgs_b as scipy_lbfgsb
-
-# differential_evolution is only present in scipy >= 0.15
-try:
-    from scipy.optimize import differential_evolution as scipy_diffev
-except ImportError:
-    from ._differentialevolution import differential_evolution as scipy_diffev
-
-# check for scipy.optimize.minimize
-HAS_SCALAR_MIN = False
-try:
-    from scipy.optimize import minimize as scipy_minimize
-    HAS_SCALAR_MIN = True
-except ImportError:
-    pass
-
-from .asteval import Interpreter
-from .parameter import Parameter, Parameters
-
-# use locally modified version of uncertainties package
-from . import uncertainties
-
-
-def asteval_with_uncertainties(*vals, **kwargs):
-    """
-    given values for variables, calculate object value.
-    This is used by the uncertainties package to calculate
-    the uncertainty in an object even with a complicated
-    expression.
-    """
-    _obj = kwargs.get('_obj', None)
-    _pars = kwargs.get('_pars', None)
-    _names = kwargs.get('_names', None)
-    _asteval = _pars._asteval
-    if (_obj is None or  _pars is None or _names is None or
-        _asteval is None or _obj._expr_ast is None):
-        return 0
-    for val, name in zip(vals, _names):
-        _asteval.symtable[name] = val
-    return _asteval.eval(_obj._expr_ast)
-
-wrap_ueval = uncertainties.wrap(asteval_with_uncertainties)
-
-def eval_stderr(obj, uvars, _names, _pars):
-    """evaluate uncertainty and set .stderr for a parameter `obj`
-    given the uncertain values `uvars` (a list of uncertainties.ufloats),
-    a list of parameter names that matches uvars, and a dict of param
-    objects, keyed by name.
-
-    This uses the uncertainties package wrapped function to evaluate the
-    uncertainty for an arbitrary expression (in obj._expr_ast) of parameters.
-    """
-    if not isinstance(obj, Parameter) or getattr(obj, '_expr_ast', None) is None:
-        return
-    uval = wrap_ueval(*uvars, _obj=obj, _names=_names, _pars=_pars)
-    try:
-        obj.stderr = uval.std_dev()
-    except:
-        obj.stderr = 0
-
-
-class MinimizerException(Exception):
-    """General Purpose Exception"""
-    def __init__(self, msg):
-        Exception.__init__(self)
-        self.msg = msg
-
-    def __str__(self):
-        return "\n%s" % (self.msg)
-
-
-def _differential_evolution(func, x0, **kwds):
-    """
-    A wrapper for differential_evolution that can be used with scipy.minimize
-    """
-    kwargs = dict(args=(), strategy='best1bin', maxiter=None, popsize=15,
-                  tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
-                  callback=None, disp=False, polish=True,
-                  init='latinhypercube')
-
-    for k, v in kwds.items():
-        if k in kwargs:
-            kwargs[k] = v
-
-    return scipy_diffev(func, kwds['bounds'], **kwargs)
-
-SCALAR_METHODS = {'nelder': 'Nelder-Mead',
-                  'powell': 'Powell',
-                  'cg': 'CG',
-                  'bfgs': 'BFGS',
-                  'newton': 'Newton-CG',
-                  'lbfgsb': 'L-BFGS-B',
-                  'l-bfgsb':'L-BFGS-B',
-                  'tnc': 'TNC',
-                  'cobyla': 'COBYLA',
-                  'slsqp': 'SLSQP',
-                  'dogleg': 'dogleg',
-                  'trust-ncg': 'trust-ncg',
-                  'differential_evolution': 'differential_evolution'}
-
-
-class MinimizerResult(object):
-    """ The result of a minimization.
-
-    Attributes
-    ----------
-    params : Parameters
-        The best-fit parameters
-    success : bool
-        Whether the minimization was successful
-    status : int
-        Termination status of the optimizer. Its value depends on the
-        underlying solver. Refer to `message` for details.
-
-    Notes
-    -----
-    additional attributes not listed above depending of the
-    specific solver. Since this class is essentially a subclass of dict
-    with attribute accessors, one can see which attributes are available
-    using the `keys()` method.
-    """
-    def __init__(self, **kws):
-        for key, val in kws.items():
-            setattr(self, key, val)
-
-class Minimizer(object):
-    """A general minimizer for curve fitting"""
-    err_nonparam = ("params must be a minimizer.Parameters() instance or list "
-                    "of Parameters()")
-    err_maxfev = ("Too many function calls (max set to %i)!  Use:"
-                  " minimize(func, params, ..., maxfev=NNN)"
-                  "or set leastsq_kws['maxfev']  to increase this maximum.")
-
-    def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None,
-                 iter_cb=None, scale_covar=True, **kws):
-        """
-        Initialization of the Minimzer class
-
-        Parameters
-        ----------
-        userfcn : callable
-            objective function that returns the residual (difference between
-            model and data) to be minimized in a least squares sense.  The
-            function must have the signature:
-            `userfcn(params, *fcn_args, **fcn_kws)`
-        params : lmfit.parameter.Parameters object.
-            contains the Parameters for the model.
-        fcn_args : tuple, optional
-            positional arguments to pass to userfcn.
-        fcn_kws : dict, optional
-            keyword arguments to pass to userfcn.
-        iter_cb : callable, optional
-            Function to be called at each fit iteration. This function should
-            have the signature:
-            `iter_cb(params, iter, resid, *fcn_args, **fcn_kws)`,
-            where where `params` will have the current parameter values, `iter`
-            the iteration, `resid` the current residual array, and `*fcn_args`
-            and `**fcn_kws` as passed to the objective function.
-        scale_covar : bool, optional
-            Whether to automatically scale the covariance matrix (leastsq
-            only).
-        kws : dict, optional
-            Options to pass to the minimizer being used.
-
-        Notes
-        -----
-        The objective function should return the value to be minimized. For the
-        Levenberg-Marquardt algorithm from leastsq(), this returned value must
-        be an array, with a length greater than or equal to the number of
-        fitting variables in the model. For the other methods, the return value
-        can either be a scalar or an array. If an array is returned, the sum of
-        squares of the array will be sent to the underlying fitting method,
-        effectively doing a least-squares optimization of the return values.
-
-        A common use for the fcn_args and fcn_kwds would be to pass in other
-        data needed to calculate the residual, including such things as the
-        data array, dependent variable, uncertainties in the data, and other
-        data structures for the model calculation.
-        """
-        self.userfcn = userfcn
-        self.userargs = fcn_args
-        if self.userargs is None:
-            self.userargs = []
-
-        self.userkws = fcn_kws
-        if self.userkws is None:
-            self.userkws = {}
-        self.kws = kws
-        self.iter_cb = iter_cb
-        self.scale_covar = scale_covar
-        self.nfev = 0
-        self.nfree = 0
-        self.ndata = 0
-        self.ier = 0
-        self._abort = False
-        self.success = True
-        self.errorbars = False
-        self.message = None
-        self.lmdif_message = None
-        self.chisqr = None
-        self.redchi = None
-        self.covar = None
-        self.residual = None
-
-        self.params = params
-        self.jacfcn = None
-
-    @property
-    def values(self):
-        """
-        Returns
-        -------
-        param_values : dict
-            Parameter values in a simple dictionary.
-        """
-
-        return dict([(name, p.value) for name, p in self.result.params.items()])
-
-    def __residual(self, fvars):
-        """
-        Residual function used for least-squares fit.
-        With the new, candidate values of fvars (the fitting variables), this
-        evaluates all parameters, including setting bounds and evaluating
-        constraints, and then passes those to the user-supplied function to
-        calculate the residual.
-        """
-        # set parameter values
-        if self._abort:
-            return None
-        params = self.result.params
-        for name, val in zip(self.result.var_names, fvars):
-            params[name].value = params[name].from_internal(val)
-        self.result.nfev = self.result.nfev + 1
-
-        params.update_constraints()
-        out = self.userfcn(params, *self.userargs, **self.userkws)
-        if callable(self.iter_cb):
-            abort = self.iter_cb(params, self.result.nfev, out,
-                                 *self.userargs, **self.userkws)
-            self._abort = self._abort or abort
-        if not self._abort:
-            return np.asarray(out).ravel()
-
-    def __jacobian(self, fvars):
-        """
-        analytical jacobian to be used with the Levenberg-Marquardt
-
-        modified 02-01-2012 by Glenn Jones, Aberystwyth University
-        modified 06-29-2015 M Newville to apply gradient scaling
-               for bounded variables (thanks to JJ Helmus, N Mayorov)
-        """
-        pars  = self.result.params
-        grad_scale = ones_like(fvars)
-        for ivar, name in enumerate(self.result.var_names):
-            val = fvars[ivar]
-            pars[name].value = pars[name].from_internal(val)
-            grad_scale[ivar] = pars[name].scale_gradient(val)
-
-        self.result.nfev = self.result.nfev + 1
-        pars.update_constraints()
-        # compute the jacobian for "internal" unbounded variables,
-        # the rescale for bounded "external" variables.
-        jac = self.jacfcn(pars, *self.userargs, **self.userkws)
-        if self.col_deriv:
-            jac = (jac.transpose()*grad_scale).transpose()
-        else:
-            jac = jac*grad_scale
-        return jac
-
-    def penalty(self, fvars):
-        """
-        Penalty function for scalar minimizers:
-
-        Parameters
-        ----------
-        fvars : array of values for the variable parameters
-
-        Returns
-        -------
-        r - float
-            The user evaluated user-supplied objective function. If the
-            objective function is an array, return the array sum-of-squares
-        """
-        r = self.__residual(fvars)
-        if isinstance(r, ndarray):
-            r = (r*r).sum()
-        return r
-
-    def prepare_fit(self, params=None):
-        """
-        Prepares parameters for fitting,
-        return array of initial values
-        """
-        # determine which parameters are actually variables
-        # and which are defined expressions.
-        result = self.result = MinimizerResult()
-        if params is not None:
-            self.params = params
-        if isinstance(self.params, Parameters):
-            result.params = deepcopy(self.params)
-        elif isinstance(self.params, (list, tuple)):
-            result.params = Parameters()
-            for par in self.params:
-                if not isinstance(par, Parameter):
-                    raise MinimizerException(self.err_nonparam)
-                else:
-                    result.params[par.name] = par
-        elif self.params is None:
-            raise MinimizerException(self.err_nonparam)
-
-        # determine which parameters are actually variables
-        # and which are defined expressions.
-
-        result.var_names = [] # note that this *does* belong to self...
-        result.init_vals = []
-        result.params.update_constraints()
-        result.nfev = 0
-        result.errorbars = False
-        result.aborted = False
-        for name, par in self.result.params.items():
-            par.stderr = None
-            par.correl = None
-            if par.expr is not None:
-                par.vary = False
-            if par.vary:
-                result.var_names.append(name)
-                result.init_vals.append(par.setup_bounds())
-
-            par.init_value = par.value
-            if par.name is None:
-                par.name = name
-        result.nvarys = len(result.var_names)
-        return result
-
-    def unprepare_fit(self):
-        """
-        Unprepares the fit, so that subsequent fits will be
-        forced to run prepare_fit.
-
-        removes ast compilations of constraint expressions
-        """
-        pass
-
-    @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
-                       'and method=\'L-BFGS-B\' instead')
-    def lbfgsb(self, **kws):
-        """
-        Use l-bfgs-b minimization
-
-        Parameters
-        ----------
-        kws : dict
-            Minimizer options to pass to the
-            scipy.optimize.lbfgsb.fmin_l_bfgs_b function.
-
-        """
-        raise NotImplementedError("use scalar_minimize(method='L-BFGS-B')")
-
-
-    @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
-                       'and method=\'Nelder-Mead\' instead')
-    def fmin(self, **kws):
-        """
-        Use Nelder-Mead (simplex) minimization
-
-        Parameters
-        ----------
-        kws : dict
-            Minimizer options to pass to the scipy.optimize.fmin minimizer.
-        """
-        raise NotImplementedError("use scalar_minimize(method='Nelder-Mead')")
-
-    def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
-        """
-        Use one of the scalar minimization methods from
-        scipy.optimize.minimize.
-
-        Parameters
-        ----------
-        method : str, optional
-            Name of the fitting method to use.
-            One of:
-                'Nelder-Mead' (default)
-                'L-BFGS-B'
-                'Powell'
-                'CG'
-                'Newton-CG'
-                'COBYLA'
-                'TNC'
-                'trust-ncg'
-                'dogleg'
-                'SLSQP'
-                'differential_evolution'
-
-        params : Parameters, optional
-           Parameters to use as starting points.
-        kws : dict, optional
-            Minimizer options pass to scipy.optimize.minimize.
-
-        If the objective function returns a numpy array instead
-        of the expected scalar, the sum of squares of the array
-        will be used.
-
-        Note that bounds and constraints can be set on Parameters
-        for any of these methods, so are not supported separately
-        for those designed to use bounds. However, if you use the
-        differential_evolution option you must specify finite
-        (min, max) for each Parameter.
-
-        Returns
-        -------
-        success : bool
-            Whether the fit was successful.
-
-        """
-        if not HAS_SCALAR_MIN:
-            raise NotImplementedError
-
-        result = self.prepare_fit(params=params)
-        vars   = result.init_vals
-        params = result.params
-
-        fmin_kws = dict(method=method,
-                        options={'maxiter': 1000 * (len(vars) + 1)})
-        fmin_kws.update(self.kws)
-        fmin_kws.update(kws)
-
-        # hess supported only in some methods
-        if 'hess' in fmin_kws and method not in ('Newton-CG',
-                                                 'dogleg', 'trust-ncg'):
-            fmin_kws.pop('hess')
-
-        # jac supported only in some methods (and Dfun could be used...)
-        if 'jac' not in fmin_kws and fmin_kws.get('Dfun', None) is not None:
-            self.jacfcn = fmin_kws.pop('jac')
-            fmin_kws['jac'] = self.__jacobian
-
-        if 'jac' in fmin_kws and method not in ('CG', 'BFGS', 'Newton-CG',
-                                                'dogleg', 'trust-ncg'):
-            self.jacfcn = None
-            fmin_kws.pop('jac')
-
-        if method == 'differential_evolution':
-            fmin_kws['method'] = _differential_evolution
-            bounds = [(par.min, par.max) for par in params.values()]
-            if not np.all(np.isfinite(bounds)):
-                raise ValueError('With differential evolution finite bounds '
-                                 'are required for each parameter')
-            bounds = [(-np.pi / 2., np.pi / 2.)] * len(vars)
-            fmin_kws['bounds'] = bounds
-
-            # in scipy 0.14 this can be called directly from scipy_minimize
-            # When minimum scipy is 0.14 the following line and the else
-            # can be removed.
-            ret = _differential_evolution(self.penalty, vars, **fmin_kws)
-        else:
-            ret = scipy_minimize(self.penalty, vars, **fmin_kws)
-
-        result.aborted = self._abort
-        self._abort = False
-
-        for attr in dir(ret):
-            if not attr.startswith('_'):
-                setattr(result, attr, getattr(ret, attr))
-
-        result.chisqr = result.residual = self.__residual(ret.x)
-        result.nvarys = len(vars)
-        result.ndata = 1
-        result.nfree = 1
-        if isinstance(result.residual, ndarray):
-            result.chisqr = (result.chisqr**2).sum()
-            result.ndata = len(result.residual)
-            result.nfree = result.ndata - result.nvarys
-        result.redchi = result.chisqr / result.nfree
-        _log_likelihood = result.ndata * np.log(result.redchi)
-        result.aic = _log_likelihood + 2 * result.nvarys
-        result.bic = _log_likelihood + np.log(result.ndata) * result.nvarys
-
-        return result
-
-    def leastsq(self, params=None, **kws):
-        """
-        Use Levenberg-Marquardt minimization to perform a fit.
-        This assumes that ModelParameters have been stored, and a function to
-        minimize has been properly set up.
-
-        This wraps scipy.optimize.leastsq.
-
-        When possible, this calculates the estimated uncertainties and
-        variable correlations from the covariance matrix.
-
-        Writes outputs to many internal attributes.
-
-        Parameters
-        ----------
-        params : Parameters, optional
-           Parameters to use as starting points.
-        kws : dict, optional
-            Minimizer options to pass to scipy.optimize.leastsq.
-
-        Returns
-        -------
-        success : bool
-            True if fit was successful, False if not.
-        """
-        result = self.prepare_fit(params=params)
-        vars   = result.init_vals
-        nvars = len(vars)
-        lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7, col_deriv=False,
-                     gtol=1.e-7, maxfev=2000*(nvars+1), Dfun=None)
-
-        lskws.update(self.kws)
-        lskws.update(kws)
-
-        self.col_deriv = False
-        if lskws['Dfun'] is not None:
-            self.jacfcn = lskws['Dfun']
-            self.col_deriv = lskws['col_deriv']
-            lskws['Dfun'] = self.__jacobian
-
-        # suppress runtime warnings during fit and error analysis
-        orig_warn_settings = np.geterr()
-        np.seterr(all='ignore')
-
-        lsout = scipy_leastsq(self.__residual, vars, **lskws)
-        _best, _cov, infodict, errmsg, ier = lsout
-        result.aborted = self._abort
-        self._abort = False
-
-        result.residual = resid = infodict['fvec']
-        result.ier = ier
-        result.lmdif_message = errmsg
-        result.message = 'Fit succeeded.'
-        result.success = ier in [1, 2, 3, 4]
-        if result.aborted:
-            result.message = 'Fit aborted by user callback.'
-            result.success = False
-        elif ier == 0:
-            result.message = 'Invalid Input Parameters.'
-        elif ier == 5:
-            result.message = self.err_maxfev % lskws['maxfev']
-        else:
-            result.message = 'Tolerance seems to be too small.'
-
-        result.ndata = len(resid)
-
-        result.chisqr = (resid**2).sum()
-        result.nfree = (result.ndata - nvars)
-        result.redchi = result.chisqr / result.nfree
-        _log_likelihood = result.ndata * np.log(result.redchi)
-        result.aic = _log_likelihood + 2 * nvars
-        result.bic = _log_likelihood + np.log(result.ndata) * nvars
-
-        params = result.params
-
-        # need to map _best values to params, then calculate the
-        # grad for the variable parameters
-        grad = ones_like(_best)
-        vbest = ones_like(_best)
-
-        # ensure that _best, vbest, and grad are not
-        # broken 1-element ndarrays.
-        if len(np.shape(_best)) == 0:
-            _best = np.array([_best])
-        if len(np.shape(vbest)) == 0:
-            vbest = np.array([vbest])
-        if len(np.shape(grad)) == 0:
-            grad = np.array([grad])
-
-        for ivar, name in enumerate(result.var_names):
-            grad[ivar] = params[name].scale_gradient(_best[ivar])
-            vbest[ivar] = params[name].value
-
-        # modified from JJ Helmus' leastsqbound.py
-        infodict['fjac'] = transpose(transpose(infodict['fjac']) /
-                                     take(grad, infodict['ipvt'] - 1))
-        rvec = dot(triu(transpose(infodict['fjac'])[:nvars, :]),
-                   take(eye(nvars), infodict['ipvt'] - 1, 0))
-        try:
-            result.covar = inv(dot(transpose(rvec), rvec))
-        except (LinAlgError, ValueError):
-            result.covar = None
-
-        has_expr = False
-        for par in params.values():
-            par.stderr, par.correl = 0, None
-            has_expr = has_expr or par.expr is not None
-
-        # self.errorbars = error bars were successfully estimated
-        result.errorbars = (result.covar is not None)
-        if result.aborted:
-            result.errorbars = False
-        if result.errorbars:
-            if self.scale_covar:
-                result.covar *= result.redchi
-            for ivar, name in enumerate(result.var_names):
-                par = params[name]
-                par.stderr = sqrt(result.covar[ivar, ivar])
-                par.correl = {}
-                try:
-                    result.errorbars = result.errorbars and (par.stderr > 0.0)
-                    for jvar, varn2 in enumerate(result.var_names):
-                        if jvar != ivar:
-                            par.correl[varn2] = (result.covar[ivar, jvar] /
-                                 (par.stderr * sqrt(result.covar[jvar, jvar])))
-                except:
-                    result.errorbars = False
-
-            uvars = None
-            if has_expr:
-                # uncertainties on constrained parameters:
-                #   get values with uncertainties (including correlations),
-                #   temporarily set Parameter values to these,
-                #   re-evaluate contrained parameters to extract stderr
-                #   and then set Parameters back to best-fit value
-                try:
-                    uvars = uncertainties.correlated_values(vbest, result.covar)
-                except (LinAlgError, ValueError):
-                    uvars = None
-                if uvars is not None:
-                    for par in params.values():
-                        eval_stderr(par, uvars, result.var_names, params)
-                    # restore nominal values
-                    for v, nam in zip(uvars, result.var_names):
-                        params[nam].value = v.nominal_value
-
-        if not result.errorbars:
-            result.message = '%s. Could not estimate error-bars'% result.message
-
-        np.seterr(**orig_warn_settings)
-        return result
-
-    def minimize(self, method='leastsq', params=None, **kws):
-        """
-        Perform the minimization.
-
-        Parameters
-        ----------
-        method : str, optional
-            Name of the fitting method to use.
-            One of:
-            'leastsq'                -    Levenberg-Marquardt (default)
-            'nelder'                 -    Nelder-Mead
-            'lbfgsb'                 -    L-BFGS-B
-            'powell'                 -    Powell
-            'cg'                     -    Conjugate-Gradient
-            'newton'                 -    Newton-CG
-            'cobyla'                 -    Cobyla
-            'tnc'                    -    Truncate Newton
-            'trust-ncg'              -    Trust Newton-CGn
-            'dogleg'                 -    Dogleg
-            'slsqp'                  -    Sequential Linear Squares Programming
-            'differential_evolution' -    differential evolution
-
-        params : Parameters, optional
-            parameters to use as starting values
-
-        Returns
-        -------
-        result : MinimizerResult
-
-            MinimizerResult object contains updated params, fit statistics, etc.
-
-        """
-
-        function = self.leastsq
-        kwargs = {'params': params}
-        kwargs.update(kws)
-
-        user_method = method.lower()
-        if user_method.startswith('least'):
-            function = self.leastsq
-        elif HAS_SCALAR_MIN:
-            function = self.scalar_minimize
-            for key, val in SCALAR_METHODS.items():
-                if (key.lower().startswith(user_method) or
-                    val.lower().startswith(user_method)):
-                    kwargs['method'] = val
-        elif (user_method.startswith('nelder') or
-              user_method.startswith('fmin')):
-            function = self.fmin
-        elif user_method.startswith('lbfgsb'):
-            function = self.lbfgsb
-        return function(**kwargs)
-
-def minimize(fcn, params, method='leastsq', args=None, kws=None,
-             scale_covar=True, iter_cb=None, **fit_kws):
-    """
-    A general purpose curvefitting function
-    The minimize function takes a objective function to be minimized, a
-    dictionary (lmfit.parameter.Parameters) containing the model parameters,
-    and several optional arguments.
-
-    Parameters
-    ----------
-    fcn : callable
-        objective function that returns the residual (difference between
-        model and data) to be minimized in a least squares sense.  The
-        function must have the signature:
-        `fcn(params, *args, **kws)`
-    params : lmfit.parameter.Parameters object.
-        contains the Parameters for the model.
-    method : str, optional
-        Name of the fitting method to use.
-        One of:
-            'leastsq'                -    Levenberg-Marquardt (default)
-            'nelder'                 -    Nelder-Mead
-            'lbfgsb'                 -    L-BFGS-B
-            'powell'                 -    Powell
-            'cg'                     -    Conjugate-Gradient
-            'newton'                 -    Newton-CG
-            'cobyla'                 -    Cobyla
-            'tnc'                    -    Truncate Newton
-            'trust-ncg'              -    Trust Newton-CGn
-            'dogleg'                 -    Dogleg
-            'slsqp'                  -    Sequential Linear Squares Programming
-            'differential_evolution' -    differential evolution
-
-    args : tuple, optional
-        Positional arguments to pass to fcn.
-    kws : dict, optional
-        keyword arguments to pass to fcn.
-    iter_cb : callable, optional
-        Function to be called at each fit iteration. This function should
-        have the signature `iter_cb(params, iter, resid, *args, **kws)`,
-        where where `params` will have the current parameter values, `iter`
-        the iteration, `resid` the current residual array, and `*args`
-        and `**kws` as passed to the objective function.
-    scale_covar : bool, optional
-        Whether to automatically scale the covariance matrix (leastsq
-        only).
-    fit_kws : dict, optional
-        Options to pass to the minimizer being used.
-
-    Notes
-    -----
-    The objective function should return the value to be minimized. For the
-    Levenberg-Marquardt algorithm from leastsq(), this returned value must
-    be an array, with a length greater than or equal to the number of
-    fitting variables in the model. For the other methods, the return value
-    can either be a scalar or an array. If an array is returned, the sum of
-    squares of the array will be sent to the underlying fitting method,
-    effectively doing a least-squares optimization of the return values.
-
-    A common use for `args` and `kwds` would be to pass in other
-    data needed to calculate the residual, including such things as the
-    data array, dependent variable, uncertainties in the data, and other
-    data structures for the model calculation.
-    """
-    fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
-                       iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
-    return fitter.minimize(method=method)
+"""
+Simple minimizer is a wrapper around scipy.leastsq, allowing a
+user to build a fitting model as a function of general purpose
+Fit Parameters that can be fixed or floated, bounded, and written
+as a simple expression of other Fit Parameters.
+
+The user sets up a model in terms of instance of Parameters, writes a
+function-to-be-minimized (residual function) in terms of these Parameters.
+
+   Copyright (c) 2011 Matthew Newville, The University of Chicago
+   <newville at cars.uchicago.edu>
+"""
+
+from copy import deepcopy
+import numpy as np
+from numpy import (dot, eye, ndarray, ones_like,
+                   sqrt, take, transpose, triu, deprecate)
+from numpy.dual import inv
+from numpy.linalg import LinAlgError
+import multiprocessing
+import numbers
+
+from scipy.optimize import leastsq as scipy_leastsq
+
+# differential_evolution is only present in scipy >= 0.15
+try:
+    from scipy.optimize import differential_evolution as scipy_diffev
+except ImportError:
+    from ._differentialevolution import differential_evolution as scipy_diffev
+
+# check for EMCEE
+HAS_EMCEE = False
+try:
+    import emcee as emcee
+    HAS_EMCEE = True
+except ImportError:
+    pass
+
+# check for pandas
+HAS_PANDAS = False
+try:
+    import pandas as pd
+    HAS_PANDAS = True
+except ImportError:
+    pass
+
+# check for scipy.optimize.minimize
+HAS_SCALAR_MIN = False
+try:
+    from scipy.optimize import minimize as scipy_minimize
+    HAS_SCALAR_MIN = True
+except ImportError:
+    pass
+
+from .parameter import Parameter, Parameters
+
+# use locally modified version of uncertainties package
+from . import uncertainties
+
+
+def asteval_with_uncertainties(*vals, **kwargs):
+    """
+    given values for variables, calculate object value.
+    This is used by the uncertainties package to calculate
+    the uncertainty in an object even with a complicated
+    expression.
+    """
+    _obj = kwargs.get('_obj', None)
+    _pars = kwargs.get('_pars', None)
+    _names = kwargs.get('_names', None)
+    _asteval = _pars._asteval
+    if (_obj is None or _pars is None or _names is None or
+        _asteval is None or _obj._expr_ast is None):
+        return 0
+    for val, name in zip(vals, _names):
+        _asteval.symtable[name] = val
+    return _asteval.eval(_obj._expr_ast)
+
+wrap_ueval = uncertainties.wrap(asteval_with_uncertainties)
+
+
+def eval_stderr(obj, uvars, _names, _pars):
+    """evaluate uncertainty and set .stderr for a parameter `obj`
+    given the uncertain values `uvars` (a list of uncertainties.ufloats),
+    a list of parameter names that matches uvars, and a dict of param
+    objects, keyed by name.
+
+    This uses the uncertainties package wrapped function to evaluate the
+    uncertainty for an arbitrary expression (in obj._expr_ast) of parameters.
+    """
+    if not isinstance(obj, Parameter) or getattr(obj, '_expr_ast', None) is None:
+        return
+    uval = wrap_ueval(*uvars, _obj=obj, _names=_names, _pars=_pars)
+    try:
+        obj.stderr = uval.std_dev()
+    except:
+        obj.stderr = 0
+
+
+class MinimizerException(Exception):
+    """General Purpose Exception"""
+    def __init__(self, msg):
+        Exception.__init__(self)
+        self.msg = msg
+
+    def __str__(self):
+        return "\n%s" % self.msg
+
+
+def _differential_evolution(func, x0, **kwds):
+    """
+    A wrapper for differential_evolution that can be used with scipy.minimize
+    """
+    kwargs = dict(args=(), strategy='best1bin', maxiter=None, popsize=15,
+                  tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
+                  callback=None, disp=False, polish=True,
+                  init='latinhypercube')
+
+    for k, v in kwds.items():
+        if k in kwargs:
+            kwargs[k] = v
+
+    return scipy_diffev(func, kwds['bounds'], **kwargs)
+
+SCALAR_METHODS = {'nelder': 'Nelder-Mead',
+                  'powell': 'Powell',
+                  'cg': 'CG',
+                  'bfgs': 'BFGS',
+                  'newton': 'Newton-CG',
+                  'lbfgsb': 'L-BFGS-B',
+                  'l-bfgsb': 'L-BFGS-B',
+                  'tnc': 'TNC',
+                  'cobyla': 'COBYLA',
+                  'slsqp': 'SLSQP',
+                  'dogleg': 'dogleg',
+                  'trust-ncg': 'trust-ncg',
+                  'differential_evolution': 'differential_evolution'}
+
+
+class MinimizerResult(object):
+    """ The result of a minimization.
+
+    Attributes
+    ----------
+    params : Parameters
+        The best-fit parameters
+    success : bool
+        Whether the minimization was successful
+    status : int
+        Termination status of the optimizer. Its value depends on the
+        underlying solver. Refer to `message` for details.
+
+    Notes
+    -----
+    Additional attributes not listed above may be present, depending on the
+    specific solver. Since this class is essentially a subclass of dict
+    with attribute accessors, one can see which attributes are available
+    using the `keys()` method.
+    """
+    def __init__(self, **kws):
+        for key, val in kws.items():
+            setattr(self, key, val)
+
+    @property
+    def flatchain(self):
+        """
+        A flatchain view of the sampling chain from the `emcee` method.
+        """
+        if hasattr(self, 'chain'):
+            if HAS_PANDAS:
+                return pd.DataFrame(self.chain.reshape((-1, self.nvarys)),
+                                    columns=self.var_names)
+            else:
+                raise NotImplementedError('Please install Pandas to see the '
+                                          'flattened chain')
+        else:
+            return None
+
+
+class Minimizer(object):
+    """A general minimizer for curve fitting"""
+    err_nonparam = ("params must be a minimizer.Parameters() instance or list "
+                    "of Parameters()")
+    err_maxfev = ("Too many function calls (max set to %i)!  Use:"
+                  " minimize(func, params, ..., maxfev=NNN)"
+                  "or set leastsq_kws['maxfev']  to increase this maximum.")
+
+    def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None,
+                 iter_cb=None, scale_covar=True, **kws):
+        """
+        Initialization of the Minimzer class
+
+        Parameters
+        ----------
+        userfcn : callable
+            objective function that returns the residual (difference between
+            model and data) to be minimized in a least squares sense.  The
+            function must have the signature:
+            `userfcn(params, *fcn_args, **fcn_kws)`
+        params : lmfit.parameter.Parameters object.
+            contains the Parameters for the model.
+        fcn_args : tuple, optional
+            positional arguments to pass to userfcn.
+        fcn_kws : dict, optional
+            keyword arguments to pass to userfcn.
+        iter_cb : callable, optional
+            Function to be called at each fit iteration. This function should
+            have the signature:
+            `iter_cb(params, iter, resid, *fcn_args, **fcn_kws)`,
+            where where `params` will have the current parameter values, `iter`
+            the iteration, `resid` the current residual array, and `*fcn_args`
+            and `**fcn_kws` as passed to the objective function.
+        scale_covar : bool, optional
+            Whether to automatically scale the covariance matrix (leastsq
+            only).
+        kws : dict, optional
+            Options to pass to the minimizer being used.
+
+        Notes
+        -----
+        The objective function should return the value to be minimized. For the
+        Levenberg-Marquardt algorithm from leastsq(), this returned value must
+        be an array, with a length greater than or equal to the number of
+        fitting variables in the model. For the other methods, the return value
+        can either be a scalar or an array. If an array is returned, the sum of
+        squares of the array will be sent to the underlying fitting method,
+        effectively doing a least-squares optimization of the return values.
+
+        A common use for the fcn_args and fcn_kwds would be to pass in other
+        data needed to calculate the residual, including such things as the
+        data array, dependent variable, uncertainties in the data, and other
+        data structures for the model calculation.
+        """
+        self.userfcn = userfcn
+        self.userargs = fcn_args
+        if self.userargs is None:
+            self.userargs = []
+
+        self.userkws = fcn_kws
+        if self.userkws is None:
+            self.userkws = {}
+        self.kws = kws
+        self.iter_cb = iter_cb
+        self.scale_covar = scale_covar
+        self.nfev = 0
+        self.nfree = 0
+        self.ndata = 0
+        self.ier = 0
+        self._abort = False
+        self.success = True
+        self.errorbars = False
+        self.message = None
+        self.lmdif_message = None
+        self.chisqr = None
+        self.redchi = None
+        self.covar = None
+        self.residual = None
+
+        self.params = params
+        self.jacfcn = None
+
+    @property
+    def values(self):
+        """
+        Returns
+        -------
+        param_values : dict
+            Parameter values in a simple dictionary.
+        """
+
+        return dict([(name, p.value) for name, p in self.result.params.items()])
+
+    def __residual(self, fvars):
+        """
+        Residual function used for least-squares fit.
+        With the new, candidate values of fvars (the fitting variables), this
+        evaluates all parameters, including setting bounds and evaluating
+        constraints, and then passes those to the user-supplied function to
+        calculate the residual.
+        """
+        # set parameter values
+        if self._abort:
+            return None
+        params = self.result.params
+        for name, val in zip(self.result.var_names, fvars):
+            params[name].value = params[name].from_internal(val)
+        self.result.nfev += 1
+
+        params.update_constraints()
+        out = self.userfcn(params, *self.userargs, **self.userkws)
+        if callable(self.iter_cb):
+            abort = self.iter_cb(params, self.result.nfev, out,
+                                 *self.userargs, **self.userkws)
+            self._abort = self._abort or abort
+        self._abort = self._abort and self.result.nfev > len(fvars)
+        if not self._abort:
+            return np.asarray(out).ravel()
+
+    def __jacobian(self, fvars):
+        """
+        analytical jacobian to be used with the Levenberg-Marquardt
+
+        modified 02-01-2012 by Glenn Jones, Aberystwyth University
+        modified 06-29-2015 M Newville to apply gradient scaling
+               for bounded variables (thanks to JJ Helmus, N Mayorov)
+        """
+        pars = self.result.params
+        grad_scale = ones_like(fvars)
+        for ivar, name in enumerate(self.result.var_names):
+            val = fvars[ivar]
+            pars[name].value = pars[name].from_internal(val)
+            grad_scale[ivar] = pars[name].scale_gradient(val)
+
+        self.result.nfev += 1
+        pars.update_constraints()
+        # compute the jacobian for "internal" unbounded variables,
+        # the rescale for bounded "external" variables.
+        jac = self.jacfcn(pars, *self.userargs, **self.userkws)
+        if self.col_deriv:
+            jac = (jac.transpose()*grad_scale).transpose()
+        else:
+            jac *= grad_scale
+        return jac
+
+    def penalty(self, fvars):
+        """
+        Penalty function for scalar minimizers:
+
+        Parameters
+        ----------
+        fvars : array of values for the variable parameters
+
+        Returns
+        -------
+        r - float
+            The user evaluated user-supplied objective function. If the
+            objective function is an array, return the array sum-of-squares
+        """
+        r = self.__residual(fvars)
+        if isinstance(r, ndarray):
+            r = (r*r).sum()
+        return r
+
+    def prepare_fit(self, params=None):
+        """
+        Prepares parameters for fitting,
+        return array of initial values
+        """
+        # determine which parameters are actually variables
+        # and which are defined expressions.
+        result = self.result = MinimizerResult()
+        if params is not None:
+            self.params = params
+        if isinstance(self.params, Parameters):
+            result.params = deepcopy(self.params)
+        elif isinstance(self.params, (list, tuple)):
+            result.params = Parameters()
+            for par in self.params:
+                if not isinstance(par, Parameter):
+                    raise MinimizerException(self.err_nonparam)
+                else:
+                    result.params[par.name] = par
+        elif self.params is None:
+            raise MinimizerException(self.err_nonparam)
+
+        # determine which parameters are actually variables
+        # and which are defined expressions.
+
+        result.var_names = []  # note that this *does* belong to self...
+        result.init_vals = []
+        result.params.update_constraints()
+        result.nfev = 0
+        result.errorbars = False
+        result.aborted = False
+        for name, par in self.result.params.items():
+            par.stderr = None
+            par.correl = None
+            if par.expr is not None:
+                par.vary = False
+            if par.vary:
+                result.var_names.append(name)
+                result.init_vals.append(par.setup_bounds())
+
+            par.init_value = par.value
+            if par.name is None:
+                par.name = name
+        result.nvarys = len(result.var_names)
+        return result
+
+    def unprepare_fit(self):
+        """
+        Unprepares the fit, so that subsequent fits will be
+        forced to run prepare_fit.
+
+        removes ast compilations of constraint expressions
+        """
+        pass
+
+    @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
+                       'and method=\'L-BFGS-B\' instead')
+    def lbfgsb(self, **kws):
+        """
+        Use l-bfgs-b minimization
+
+        Parameters
+        ----------
+        kws : dict
+            Minimizer options to pass to the
+            scipy.optimize.lbfgsb.fmin_l_bfgs_b function.
+
+        """
+        raise NotImplementedError("use scalar_minimize(method='L-BFGS-B')")
+
+    @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
+                       'and method=\'Nelder-Mead\' instead')
+    def fmin(self, **kws):
+        """
+        Use Nelder-Mead (simplex) minimization
+
+        Parameters
+        ----------
+        kws : dict
+            Minimizer options to pass to the scipy.optimize.fmin minimizer.
+        """
+        raise NotImplementedError("use scalar_minimize(method='Nelder-Mead')")
+
+    def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
+        """
+        Use one of the scalar minimization methods from
+        scipy.optimize.minimize.
+
+        Parameters
+        ----------
+        method : str, optional
+            Name of the fitting method to use.
+            One of:
+                'Nelder-Mead' (default)
+                'L-BFGS-B'
+                'Powell'
+                'CG'
+                'Newton-CG'
+                'COBYLA'
+                'TNC'
+                'trust-ncg'
+                'dogleg'
+                'SLSQP'
+                'differential_evolution'
+
+        params : Parameters, optional
+           Parameters to use as starting points.
+        kws : dict, optional
+            Minimizer options pass to scipy.optimize.minimize.
+
+        If the objective function returns a numpy array instead
+        of the expected scalar, the sum of squares of the array
+        will be used.
+
+        Note that bounds and constraints can be set on Parameters
+        for any of these methods, so are not supported separately
+        for those designed to use bounds. However, if you use the
+        differential_evolution option you must specify finite
+        (min, max) for each Parameter.
+
+        Returns
+        -------
+        success : bool
+            Whether the fit was successful.
+
+        """
+        if not HAS_SCALAR_MIN:
+            raise NotImplementedError
+
+        result = self.prepare_fit(params=params)
+        vars = result.init_vals
+        params = result.params
+
+        fmin_kws = dict(method=method,
+                        options={'maxiter': 1000 * (len(vars) + 1)})
+        fmin_kws.update(self.kws)
+        fmin_kws.update(kws)
+
+        # hess supported only in some methods
+        if 'hess' in fmin_kws and method not in ('Newton-CG',
+                                                 'dogleg', 'trust-ncg'):
+            fmin_kws.pop('hess')
+
+        # jac supported only in some methods (and Dfun could be used...)
+        if 'jac' not in fmin_kws and fmin_kws.get('Dfun', None) is not None:
+            self.jacfcn = fmin_kws.pop('jac')
+            fmin_kws['jac'] = self.__jacobian
+
+        if 'jac' in fmin_kws and method not in ('CG', 'BFGS', 'Newton-CG',
+                                                'dogleg', 'trust-ncg'):
+            self.jacfcn = None
+            fmin_kws.pop('jac')
+
+        if method == 'differential_evolution':
+            fmin_kws['method'] = _differential_evolution
+            bounds = [(par.min, par.max) for par in params.values()]
+            if not np.all(np.isfinite(bounds)):
+                raise ValueError('With differential evolution finite bounds '
+                                 'are required for each parameter')
+            bounds = [(-np.pi / 2., np.pi / 2.)] * len(vars)
+            fmin_kws['bounds'] = bounds
+
+            # in scipy 0.14 this can be called directly from scipy_minimize
+            # When minimum scipy is 0.14 the following line and the else
+            # can be removed.
+            ret = _differential_evolution(self.penalty, vars, **fmin_kws)
+        else:
+            ret = scipy_minimize(self.penalty, vars, **fmin_kws)
+
+        result.aborted = self._abort
+        self._abort = False
+        if isinstance(ret, dict):
+            for attr, value in ret.items():
+                setattr(result, attr, value)
+        else:
+            for attr in dir(ret):
+                if not attr.startswith('_'):
+                    setattr(result, attr, getattr(ret, attr))
+
+        result.x = np.atleast_1d(result.x)
+        result.chisqr = result.residual = self.__residual(result.x)
+        result.nvarys = len(vars)
+        result.ndata = 1
+        result.nfree = 1
+        if isinstance(result.residual, ndarray):
+            result.chisqr = (result.chisqr**2).sum()
+            result.ndata = len(result.residual)
+            result.nfree = result.ndata - result.nvarys
+        result.redchi = result.chisqr / result.nfree
+        _log_likelihood = result.ndata * np.log(result.redchi)
+        result.aic = _log_likelihood + 2 * result.nvarys
+        result.bic = _log_likelihood + np.log(result.ndata) * result.nvarys
+
+        return result
+
+    def emcee(self, params=None, steps=1000, nwalkers=100, burn=0, thin=1,
+              ntemps=1, pos=None, reuse_sampler=False, workers=1,
+              float_behavior='posterior', is_weighted=True, seed=None):
+        """
+        Bayesian sampling of the posterior distribution for the parameters
+        using the `emcee` Markov Chain Monte Carlo package. The method assumes
+        that the prior is Uniform. You need to have `emcee` installed to use
+        this method.
+
+        Parameters
+        ----------
+        params : lmfit.Parameters, optional
+            Parameters to use as starting point. If this is not specified
+            then the Parameters used to initialise the Minimizer object are
+            used.
+        steps : int, optional
+            How many samples you would like to draw from the posterior
+            distribution for each of the walkers?
+        nwalkers : int, optional
+            Should be set so :math:`nwalkers >> nvarys`, where `nvarys` are
+            the number of parameters being varied during the fit.
+            "Walkers are the members of the ensemble. They are almost like
+            separate Metropolis-Hastings chains but, of course, the proposal
+            distribution for a given walker depends on the positions of all
+            the other walkers in the ensemble." - from the `emcee` webpage.
+        burn : int, optional
+            Discard this many samples from the start of the sampling regime.
+        thin : int, optional
+            Only accept 1 in every `thin` samples.
+        ntemps : int, optional
+            If `ntemps > 1` perform a Parallel Tempering.
+        pos : np.ndarray, optional
+            Specify the initial positions for the sampler.  If `ntemps == 1`
+            then `pos.shape` should be `(nwalkers, nvarys)`. Otherwise,
+            `(ntemps, nwalkers, nvarys)`. You can also initialise using a
+            previous chain that had the same `ntemps`, `nwalkers` and
+            `nvarys`. Note that `nvarys` may be one larger than you expect it
+            to be if your `userfcn` returns an array and `is_weighted is
+            False`.
+        reuse_sampler : bool, optional
+            If you have already run `emcee` on a given `Minimizer` object then
+            it possesses an internal ``sampler`` attribute. You can continue to
+            draw from the same sampler (retaining the chain history) if you set
+            this option to `True`. Otherwise a new sampler is created. The
+            `nwalkers`, `ntemps`, `pos`, and `params` keywords are ignored with
+            this option.
+            **Important**: the Parameters used to create the sampler must not
+            change in-between calls to `emcee`. Alteration of Parameters
+            would include changed ``min``, ``max``, ``vary`` and ``expr``
+            attributes. This may happen, for example, if you use an altered
+            Parameters object and call the `minimize` method in-between calls
+            to `emcee`.
+        workers : Pool-like or int, optional
+            For parallelization of sampling.  It can be any Pool-like object
+            with a map method that follows the same calling sequence as the
+            built-in `map` function. If int is given as the argument, then a
+            multiprocessing-based pool is spawned internally with the
+            corresponding number of parallel processes. 'mpi4py'-based
+            parallelization and 'joblib'-based parallelization pools can also
+            be used here. **Note**: because of multiprocessing overhead it may
+            only be worth parallelising if the objective function is expensive
+            to calculate, or if there are a large number of objective
+            evaluations per step (`ntemps * nwalkers * nvarys`).
+        float_behavior : str, optional
+            Specifies meaning of the objective function output if it returns a
+            float. One of:
+
+                'posterior' - objective function returns a log-posterior
+                               probability
+                'chi2' - objective function returns :math:`\chi^2`.
+
+            See Notes for further details.
+        is_weighted : bool, optional
+            Has your objective function been weighted by measurement
+            uncertainties? If `is_weighted is True` then your objective
+            function is assumed to return residuals that have been divided by
+            the true measurement uncertainty `(data - model) / sigma`. If
+            `is_weighted is False` then the objective function is assumed to
+            return unweighted residuals, `data - model`. In this case `emcee`
+            will employ a positive measurement uncertainty during the sampling.
+            This measurement uncertainty will be present in the output params
+            and output chain with the name `__lnsigma`. A side effect of this
+            is that you cannot use this parameter name yourself.
+            **Important** this parameter only has any effect if your objective
+            function returns an array. If your objective function returns a
+            float, then this parameter is ignored. See Notes for more details.
+        seed : int or `np.random.RandomState`, optional
+            If `seed` is an int, a new `np.random.RandomState` instance is used,
+            seeded with `seed`.
+            If `seed` is already a `np.random.RandomState` instance, then that
+            `np.random.RandomState` instance is used.
+            Specify `seed` for repeatable minimizations.
+
+        Returns
+        -------
+        result : MinimizerResult
+            MinimizerResult object containing updated params, statistics,
+            etc. The `MinimizerResult` also contains the ``chain``,
+            ``flatchain`` and ``lnprob`` attributes. The ``chain``
+            and ``flatchain`` attributes contain the samples and have the shape
+            `(nwalkers, (steps - burn) // thin, nvarys)` or
+            `(ntemps, nwalkers, (steps - burn) // thin, nvarys)`,
+            depending on whether Parallel tempering was used or not.
+            `nvarys` is the number of parameters that are allowed to vary.
+            The ``flatchain`` attribute is a `pandas.DataFrame` of the
+            flattened chain, `chain.reshape(-1, nvarys)`. To access flattened
+            chain values for a particular parameter use
+            `result.flatchain[parname]`. The ``lnprob`` attribute contains the
+            log probability for each sample in ``chain``. The sample with the
+            highest probability corresponds to the maximum likelihood estimate.
+
+        Notes
+        -----
+        This method samples the posterior distribution of the parameters using
+        Markov Chain Monte Carlo.  To do so it needs to calculate the
+        log-posterior probability of the model parameters, `F`, given the data,
+        `D`, :math:`\ln p(F_{true} | D)`. This 'posterior probability' is
+        calculated as:
+
+        ..math::
+
+        \ln p(F_{true} | D) \propto \ln p(D | F_{true}) + \ln p(F_{true})
+
+        where :math:`\ln p(D | F_{true})` is the 'log-likelihood' and
+        :math:`\ln p(F_{true})` is the 'log-prior'. The default log-prior
+        encodes prior information already known about the model. This method
+        assumes that the log-prior probability is `-np.inf` (impossible) if the
+        one of the parameters is outside its limits. The log-prior probability
+        term is zero if all the parameters are inside their bounds (known as a
+        uniform prior). The log-likelihood function is given by [1]_:
+
+        ..math::
+
+        \ln p(D|F_{true}) = -\frac{1}{2}\sum_n \left[\frac{\left(g_n(F_{true}) - D_n \right)^2}{s_n^2}+\ln (2\pi s_n^2)\right]
+
+        The first summand in the square brackets represents the residual for a
+        given datapoint (:math:`g` being the generative model) . This term
+        represents :math:`\chi^2` when summed over all datapoints.
+        Ideally the objective function used to create `lmfit.Minimizer` should
+        return the log-posterior probability, :math:`\ln p(F_{true} | D)`.
+        However, since the in-built log-prior term is zero, the objective
+        function can also just return the log-likelihood, unless you wish to
+        create a non-uniform prior.
+
+        If a float value is returned by the objective function then this value
+        is assumed by default to be the log-posterior probability, i.e.
+        `float_behavior is 'posterior'`. If your objective function returns
+        :math:`\chi^2`, then you should use a value of `'chi2'` for
+        `float_behavior`. `emcee` will then multiply your :math:`\chi^2` value
+        by -0.5 to obtain the posterior probability.
+
+        However, the default behaviour of many objective functions is to return
+        a vector of (possibly weighted) residuals. Therefore, if your objective
+        function returns a vector, `res`, then the vector is assumed to contain
+        the residuals. If `is_weighted is True` then your residuals are assumed
+        to be correctly weighted by the standard deviation of the data points
+        (`res = (data - model) / sigma`) and the log-likelihood (and
+        log-posterior probability) is calculated as: `-0.5 * np.sum(res **2)`.
+        This ignores the second summand in the square brackets. Consequently,
+        in order to calculate a fully correct log-posterior probability value
+        your objective function should return a single value. If
+        `is_weighted is False` then the data uncertainty, `s_n`, will be
+        treated as a nuisance parameter and will be marginalised out. This is
+        achieved by employing a strictly positive uncertainty
+        (homoscedasticity) for each data point, :math:`s_n = exp(__lnsigma)`.
+        `__lnsigma` will be present in `MinimizerResult.params`, as well as
+        `Minimizer.chain`, `nvarys` will also be increased by one.
+
+        References
+        ----------
+        .. [1] http://dan.iel.fm/emcee/current/user/line/
+        """
+        if not HAS_EMCEE:
+            raise NotImplementedError('You must have emcee to use'
+                                      ' the emcee method')
+        tparams = params
+        # if you're reusing the sampler then ntemps, nwalkers have to be
+        # determined from the previous sampling
+        if reuse_sampler:
+            if not hasattr(self, 'sampler') or not hasattr(self, '_lastpos'):
+                raise ValueError("You wanted to use an existing sampler, but"
+                                 "it hasn't been created yet")
+            if len(self._lastpos.shape) == 2:
+                ntemps = 1
+                nwalkers = self._lastpos.shape[0]
+            elif len(self._lastpos.shape) == 3:
+                ntemps = self._lastpos.shape[0]
+                nwalkers = self._lastpos.shape[1]
+            tparams = None
+
+        result = self.prepare_fit(params=tparams)
+        params = result.params
+
+        # check if the userfcn returns a vector of residuals
+        out = self.userfcn(params, *self.userargs, **self.userkws)
+        out = np.asarray(out).ravel()
+        if out.size > 1 and is_weighted is False:
+            # we need to marginalise over a constant data uncertainty
+            if '__lnsigma' not in params:
+                # __lnsigma should already be in params if is_weighted was
+                # previously set to True.
+                params.add('__lnsigma', value=0.01, min=-np.inf, max=np.inf, vary=True)
+                # have to re-prepare the fit
+                result = self.prepare_fit(params)
+                params = result.params
+
+        # Removing internal parameter scaling. We could possibly keep it,
+        # but I don't know how this affects the emcee sampling.
+        bounds = []
+        var_arr = np.zeros(len(result.var_names))
+        i = 0
+        for par in params:
+            param = params[par]
+            if param.expr is not None:
+                param.vary = False
+            if param.vary:
+                var_arr[i] = param.value
+                i += 1
+            else:
+                # don't want to append bounds if they're not being varied.
+                continue
+
+            param.from_internal = lambda val: val
+            lb, ub = param.min, param.max
+            if lb is None or lb is np.nan:
+                lb = -np.inf
+            if ub is None or ub is np.nan:
+                ub = np.inf
+            bounds.append((lb, ub))
+        bounds = np.array(bounds)
+
+        self.nvarys = len(result.var_names)
+
+        # set up multiprocessing options for the samplers
+        auto_pool = None
+        sampler_kwargs = {}
+        if type(workers) is int and workers > 1:
+            auto_pool = multiprocessing.Pool(workers)
+            sampler_kwargs['pool'] = auto_pool
+        elif hasattr(workers, 'map'):
+            sampler_kwargs['pool'] = workers
+
+        # function arguments for the log-probability functions
+        # these values are sent to the log-probability functions by the sampler.
+        lnprob_args = (self.userfcn, params, result.var_names, bounds)
+        lnprob_kwargs = {'is_weighted': is_weighted,
+                         'float_behavior': float_behavior,
+                         'userargs': self.userargs,
+                         'userkws': self.userkws}
+
+        if ntemps > 1:
+            # the prior and likelihood function args and kwargs are the same
+            sampler_kwargs['loglargs'] = lnprob_args
+            sampler_kwargs['loglkwargs'] = lnprob_kwargs
+            sampler_kwargs['logpargs'] = (bounds,)
+        else:
+            sampler_kwargs['args'] = lnprob_args
+            sampler_kwargs['kwargs'] = lnprob_kwargs
+
+        # set up the random number generator
+        rng = _make_random_gen(seed)
+
+        # now initialise the samplers
+        if reuse_sampler:
+            if auto_pool is not None:
+                self.sampler.pool = auto_pool
+
+            p0 = self._lastpos
+            if p0.shape[-1] != self.nvarys:
+                raise ValueError("You cannot reuse the sampler if the number"
+                                 "of varying parameters has changed")
+        elif ntemps > 1:
+            # Parallel Tempering
+            # jitter the starting position by scaled Gaussian noise
+            p0 = 1 + rng.randn(ntemps, nwalkers, self.nvarys) * 1.e-4
+            p0 *= var_arr
+            self.sampler = emcee.PTSampler(ntemps, nwalkers, self.nvarys,
+                                           _lnpost, _lnprior, **sampler_kwargs)
+        else:
+            p0 = 1 + rng.randn(nwalkers, self.nvarys) * 1.e-4
+            p0 *= var_arr
+            self.sampler = emcee.EnsembleSampler(nwalkers, self.nvarys,
+                                                 _lnpost, **sampler_kwargs)
+
+        # user supplies an initialisation position for the chain
+        # If you try to run the sampler with p0 of a wrong size then you'll get
+        # a ValueError. Note, you can't initialise with a position if you are
+        # reusing the sampler.
+        if pos is not None and not reuse_sampler:
+            tpos = np.asfarray(pos)
+            if p0.shape == tpos.shape:
+                pass
+            # trying to initialise with a previous chain
+            elif (tpos.shape[0::2] == (nwalkers, self.nvarys)):
+                tpos = tpos[:, -1, :]
+            # initialising with a PTsampler chain.
+            elif ntemps > 1 and tpos.ndim == 4:
+                tpos_shape = list(tpos.shape)
+                tpos_shape.pop(2)
+                if tpos_shape == (ntemps, nwalkers, self.nvarys):
+                    tpos = tpos[..., -1, :]
+            else:
+                raise ValueError('pos should have shape (nwalkers, nvarys)'
+                                 'or (ntemps, nwalkers, nvarys) if ntemps > 1')
+            p0 = tpos
+
+        # if you specified a seed then you also need to seed the sampler
+        if seed is not None:
+            self.sampler.random_state = rng.get_state()
+
+        # now do a production run, sampling all the time
+        output = self.sampler.run_mcmc(p0, steps)
+        self._lastpos = output[0]
+
+        # discard the burn samples and thin
+        chain = self.sampler.chain[..., burn::thin, :]
+        lnprobability = self.sampler.lnprobability[:, burn::thin]
+
+        flatchain = chain.reshape((-1, self.nvarys))
+
+        quantiles = np.percentile(flatchain, [15.87, 50, 84.13], axis=0)
+
+        for i, var_name in enumerate(result.var_names):
+            std_l, median, std_u = quantiles[:, i]
+            params[var_name].value = median
+            params[var_name].stderr = 0.5 * (std_u - std_l)
+            params[var_name].correl = {}
+
+        params.update_constraints()
+
+        # work out correlation coefficients
+        corrcoefs = np.corrcoef(flatchain.T)
+
+        for i, var_name in enumerate(result.var_names):
+            for j, var_name2 in enumerate(result.var_names):
+                if i != j:
+                    result.params[var_name].correl[var_name2] = corrcoefs[i, j]
+
+        result.chain = np.copy(chain)
+        result.lnprob = np.copy(lnprobability)
+        result.errorbars = True
+        result.nvarys = len(result.var_names)
+
+        if auto_pool is not None:
+            auto_pool.terminate()
+
+        return result
+
+    def leastsq(self, params=None, **kws):
+        """
+        Use Levenberg-Marquardt minimization to perform a fit.
+        This assumes that ModelParameters have been stored, and a function to
+        minimize has been properly set up.
+
+        This wraps scipy.optimize.leastsq.
+
+        When possible, this calculates the estimated uncertainties and
+        variable correlations from the covariance matrix.
+
+        Writes outputs to many internal attributes.
+
+        Parameters
+        ----------
+        params : Parameters, optional
+           Parameters to use as starting points.
+        kws : dict, optional
+            Minimizer options to pass to scipy.optimize.leastsq.
+
+        Returns
+        -------
+        success : bool
+            True if fit was successful, False if not.
+        """
+        result = self.prepare_fit(params=params)
+        vars = result.init_vals
+        nvars = len(vars)
+        lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7, col_deriv=False,
+                     gtol=1.e-7, maxfev=2000*(nvars+1), Dfun=None)
+
+        lskws.update(self.kws)
+        lskws.update(kws)
+
+        self.col_deriv = False
+        if lskws['Dfun'] is not None:
+            self.jacfcn = lskws['Dfun']
+            self.col_deriv = lskws['col_deriv']
+            lskws['Dfun'] = self.__jacobian
+
+        # suppress runtime warnings during fit and error analysis
+        orig_warn_settings = np.geterr()
+        np.seterr(all='ignore')
+
+        lsout = scipy_leastsq(self.__residual, vars, **lskws)
+        _best, _cov, infodict, errmsg, ier = lsout
+        result.aborted = self._abort
+        self._abort = False
+
+        result.residual = resid = infodict['fvec']
+        result.ier = ier
+        result.lmdif_message = errmsg
+        result.message = 'Fit succeeded.'
+        result.success = ier in [1, 2, 3, 4]
+        if result.aborted:
+            result.message = 'Fit aborted by user callback.'
+            result.success = False
+        elif ier == 0:
+            result.message = 'Invalid Input Parameters.'
+        elif ier == 5:
+            result.message = self.err_maxfev % lskws['maxfev']
+        else:
+            result.message = 'Tolerance seems to be too small.'
+
+        result.ndata = len(resid)
+
+        result.chisqr = (resid**2).sum()
+        result.nfree = (result.ndata - nvars)
+        result.redchi = result.chisqr / result.nfree
+        _log_likelihood = result.ndata * np.log(result.redchi)
+        result.aic = _log_likelihood + 2 * nvars
+        result.bic = _log_likelihood + np.log(result.ndata) * nvars
+
+        params = result.params
+
+        # need to map _best values to params, then calculate the
+        # grad for the variable parameters
+        grad = ones_like(_best)
+        vbest = ones_like(_best)
+
+        # ensure that _best, vbest, and grad are not
+        # broken 1-element ndarrays.
+        if len(np.shape(_best)) == 0:
+            _best = np.array([_best])
+        if len(np.shape(vbest)) == 0:
+            vbest = np.array([vbest])
+        if len(np.shape(grad)) == 0:
+            grad = np.array([grad])
+
+        for ivar, name in enumerate(result.var_names):
+            grad[ivar] = params[name].scale_gradient(_best[ivar])
+            vbest[ivar] = params[name].value
+
+        # modified from JJ Helmus' leastsqbound.py
+        infodict['fjac'] = transpose(transpose(infodict['fjac']) /
+                                     take(grad, infodict['ipvt'] - 1))
+        rvec = dot(triu(transpose(infodict['fjac'])[:nvars, :]),
+                   take(eye(nvars), infodict['ipvt'] - 1, 0))
+        try:
+            result.covar = inv(dot(transpose(rvec), rvec))
+        except (LinAlgError, ValueError):
+            result.covar = None
+
+        has_expr = False
+        for par in params.values():
+            par.stderr, par.correl = 0, None
+            has_expr = has_expr or par.expr is not None
+
+        # self.errorbars = error bars were successfully estimated
+        result.errorbars = (result.covar is not None)
+        if result.aborted:
+            result.errorbars = False
+        if result.errorbars:
+            if self.scale_covar:
+                result.covar *= result.redchi
+            for ivar, name in enumerate(result.var_names):
+                par = params[name]
+                par.stderr = sqrt(result.covar[ivar, ivar])
+                par.correl = {}
+                try:
+                    result.errorbars = result.errorbars and (par.stderr > 0.0)
+                    for jvar, varn2 in enumerate(result.var_names):
+                        if jvar != ivar:
+                            par.correl[varn2] = (result.covar[ivar, jvar] /
+                                 (par.stderr * sqrt(result.covar[jvar, jvar])))
+                except:
+                    result.errorbars = False
+
+            if has_expr:
+                # uncertainties on constrained parameters:
+                #   get values with uncertainties (including correlations),
+                #   temporarily set Parameter values to these,
+                #   re-evaluate contrained parameters to extract stderr
+                #   and then set Parameters back to best-fit value
+                try:
+                    uvars = uncertainties.correlated_values(vbest, result.covar)
+                except (LinAlgError, ValueError):
+                    uvars = None
+                if uvars is not None:
+                    for par in params.values():
+                        eval_stderr(par, uvars, result.var_names, params)
+                    # restore nominal values
+                    for v, nam in zip(uvars, result.var_names):
+                        params[nam].value = v.nominal_value
+
+        if not result.errorbars:
+            result.message = '%s. Could not estimate error-bars' % result.message
+
+        np.seterr(**orig_warn_settings)
+        return result
+
+    def minimize(self, method='leastsq', params=None, **kws):
+        """
+        Perform the minimization.
+
+        Parameters
+        ----------
+        method : str, optional
+            Name of the fitting method to use.
+            One of:
+            'leastsq'                -    Levenberg-Marquardt (default)
+            'nelder'                 -    Nelder-Mead
+            'lbfgsb'                 -    L-BFGS-B
+            'powell'                 -    Powell
+            'cg'                     -    Conjugate-Gradient
+            'newton'                 -    Newton-CG
+            'cobyla'                 -    Cobyla
+            'tnc'                    -    Truncate Newton
+            'trust-ncg'              -    Trust Newton-CGn
+            'dogleg'                 -    Dogleg
+            'slsqp'                  -    Sequential Linear Squares Programming
+            'differential_evolution' -    differential evolution
+
+        params : Parameters, optional
+            parameters to use as starting values
+
+        Returns
+        -------
+        result : MinimizerResult
+
+            MinimizerResult object contains updated params, fit statistics, etc.
+
+        """
+
+        function = self.leastsq
+        kwargs = {'params': params}
+        kwargs.update(kws)
+
+        user_method = method.lower()
+        if user_method.startswith('least'):
+            function = self.leastsq
+        elif HAS_SCALAR_MIN:
+            function = self.scalar_minimize
+            for key, val in SCALAR_METHODS.items():
+                if (key.lower().startswith(user_method) or
+                    val.lower().startswith(user_method)):
+                    kwargs['method'] = val
+        elif (user_method.startswith('nelder') or
+              user_method.startswith('fmin')):
+            function = self.fmin
+        elif user_method.startswith('lbfgsb'):
+            function = self.lbfgsb
+        return function(**kwargs)
+
+
+def _lnprior(theta, bounds):
+    """
+    Calculates an improper uniform log-prior probability
+
+    Parameters
+    ----------
+    theta : sequence
+        float parameter values (only those being varied)
+    bounds : np.ndarray
+        Lower and upper bounds of parameters that are varying.
+        Has shape (nvarys, 2).
+
+    Returns
+    -------
+    lnprob : float
+        Log prior probability
+    """
+    if (np.any(theta > bounds[:, 1])
+        or np.any(theta < bounds[:, 0])):
+        return -np.inf
+    else:
+        return 0
+
+
+def _lnpost(theta, userfcn, params, var_names, bounds, userargs=(),
+            userkws=None, float_behavior='posterior', is_weighted=True):
+    """
+    Calculates the log-posterior probability. See the `Minimizer.emcee` method
+    for more details
+
+    Parameters
+    ----------
+    theta : sequence
+        float parameter values (only those being varied)
+    userfcn : callable
+        User objective function
+    params : lmfit.Parameters
+        The entire set of Parameters
+    var_names : list
+        The names of the parameters that are varying
+    bounds : np.ndarray
+        Lower and upper bounds of parameters. Has shape (nvarys, 2).
+    userargs : tuple, optional
+        Extra positional arguments required for user objective function
+    userkws : dict, optional
+        Extra keyword arguments required for user objective function
+    float_behavior : str, optional
+        Specifies meaning of objective when it returns a float. One of:
+
+        'posterior' - objective function returnins a log-posterior
+                      probability.
+        'chi2' - objective function returns a chi2 value.
+
+    is_weighted : bool
+        If `userfcn` returns a vector of residuals then `is_weighted`
+        specifies if the residuals have been weighted by data uncertainties.
+
+    Returns
+    -------
+    lnprob : float
+        Log posterior probability
+    """
+    # the comparison has to be done on theta and bounds. DO NOT inject theta
+    # values into Parameters, then compare Parameters values to the bounds.
+    # Parameters values are clipped to stay within bounds.
+    if (np.any(theta > bounds[:, 1])
+        or np.any(theta < bounds[:, 0])):
+        return -np.inf
+
+    for name, val in zip(var_names, theta):
+        params[name].value = val
+
+    userkwargs = {}
+    if userkws is not None:
+        userkwargs = userkws
+
+    # update the constraints
+    params.update_constraints()
+
+    # now calculate the log-likelihood
+    out = userfcn(params, *userargs, **userkwargs)
+    lnprob = np.asarray(out).ravel()
+
+    if lnprob.size > 1:
+        # objective function returns a vector of residuals
+        if '__lnsigma' in params and not is_weighted:
+            # marginalise over a constant data uncertainty
+            __lnsigma = params['__lnsigma'].value
+            c = np.log(2 * np.pi) + 2 * __lnsigma
+            lnprob = -0.5 * np.sum((lnprob / np.exp(__lnsigma)) ** 2 + c)
+        else:
+            lnprob = -0.5 * (lnprob * lnprob).sum()
+    else:
+        # objective function returns a single value.
+        # use float_behaviour to figure out if the value is posterior or chi2
+        if float_behavior == 'posterior':
+            pass
+        elif float_behavior == 'chi2':
+            lnprob *= -0.5
+        else:
+            raise ValueError("float_behaviour must be either 'posterior' or"
+                             " 'chi2' " + float_behavior)
+
+    return lnprob
+
+
+def _make_random_gen(seed):
+    """Turn seed into a np.random.RandomState instance
+
+    If seed is None, return the RandomState singleton used by np.random.
+    If seed is an int, return a new RandomState instance seeded with seed.
+    If seed is already a RandomState instance, return it.
+    Otherwise raise ValueError.
+    """
+    if seed is None or seed is np.random:
+        return np.random.mtrand._rand
+    if isinstance(seed, (numbers.Integral, np.integer)):
+        return np.random.RandomState(seed)
+    if isinstance(seed, np.random.RandomState):
+        return seed
+    raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
+                     ' instance' % seed)
+
+
+def minimize(fcn, params, method='leastsq', args=None, kws=None,
+             scale_covar=True, iter_cb=None, **fit_kws):
+    """
+    A general purpose curvefitting function
+    The minimize function takes a objective function to be minimized, a
+    dictionary (lmfit.parameter.Parameters) containing the model parameters,
+    and several optional arguments.
+
+    Parameters
+    ----------
+    fcn : callable
+        objective function that returns the residual (difference between
+        model and data) to be minimized in a least squares sense.  The
+        function must have the signature:
+        `fcn(params, *args, **kws)`
+    params : lmfit.parameter.Parameters object.
+        contains the Parameters for the model.
+    method : str, optional
+        Name of the fitting method to use.
+        One of:
+            'leastsq'                -    Levenberg-Marquardt (default)
+            'nelder'                 -    Nelder-Mead
+            'lbfgsb'                 -    L-BFGS-B
+            'powell'                 -    Powell
+            'cg'                     -    Conjugate-Gradient
+            'newton'                 -    Newton-CG
+            'cobyla'                 -    Cobyla
+            'tnc'                    -    Truncate Newton
+            'trust-ncg'              -    Trust Newton-CGn
+            'dogleg'                 -    Dogleg
+            'slsqp'                  -    Sequential Linear Squares Programming
+            'differential_evolution' -    differential evolution
+
+    args : tuple, optional
+        Positional arguments to pass to fcn.
+    kws : dict, optional
+        keyword arguments to pass to fcn.
+    iter_cb : callable, optional
+        Function to be called at each fit iteration. This function should
+        have the signature `iter_cb(params, iter, resid, *args, **kws)`,
+        where where `params` will have the current parameter values, `iter`
+        the iteration, `resid` the current residual array, and `*args`
+        and `**kws` as passed to the objective function.
+    scale_covar : bool, optional
+        Whether to automatically scale the covariance matrix (leastsq
+        only).
+    fit_kws : dict, optional
+        Options to pass to the minimizer being used.
+
+    Notes
+    -----
+    The objective function should return the value to be minimized. For the
+    Levenberg-Marquardt algorithm from leastsq(), this returned value must
+    be an array, with a length greater than or equal to the number of
+    fitting variables in the model. For the other methods, the return value
+    can either be a scalar or an array. If an array is returned, the sum of
+    squares of the array will be sent to the underlying fitting method,
+    effectively doing a least-squares optimization of the return values.
+
+    A common use for `args` and `kwds` would be to pass in other
+    data needed to calculate the residual, including such things as the
+    data array, dependent variable, uncertainties in the data, and other
+    data structures for the model calculation.
+    """
+    fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
+                       iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
+    return fitter.minimize(method=method)
diff --git a/lmfit/model.py b/lmfit/model.py
index d627c72..5dd1643 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -1,1031 +1,1067 @@
-"""
-Concise nonlinear curve fitting.
-"""
-from __future__ import print_function
-import warnings
-import inspect
-import operator
-from copy import deepcopy
-import numpy as np
-from . import Parameters, Parameter, Minimizer
-from .printfuncs import fit_report, ci_report
-from .confidence import conf_interval
-
-from collections import MutableSet
-
-try:
-    from collections import OrderedDict
-except ImportError:
-    from ordereddict import OrderedDict
-
-# Use pandas.isnull for aligning missing data is pandas is available.
-# otherwise use numpy.isnan
-try:
-    from pandas import isnull, Series
-except ImportError:
-    isnull = np.isnan
-    Series = type(NotImplemented)
-
-def _align(var, mask, data):
-    "align missing data, with pandas is available"
-    if isinstance(data, Series) and isinstance(var, Series):
-        return var.reindex_like(data).dropna()
-    elif mask is not None:
-        return var[mask]
-    return var
-
-
-try:
-    from matplotlib import pyplot as plt
-    _HAS_MATPLOTLIB = True
-except ImportError:
-    _HAS_MATPLOTLIB = False
-
-
-def _ensureMatplotlib(function):
-    if _HAS_MATPLOTLIB:
-        return function
-    else:
-        def no_op(*args, **kwargs):
-            print('matplotlib module is required for plotting the results')
-
-        return no_op
-
-
-class Model(object):
-    """Create a model from a user-defined function.
-
-    Parameters
-    ----------
-    func: function to be wrapped
-    independent_vars: list of strings or None (default)
-        arguments to func that are independent variables
-    param_names: list of strings or None (default)
-        names of arguments to func that are to be made into parameters
-    missing: None, 'none', 'drop', or 'raise'
-        'none' or None: Do not check for null or missing values (default)
-        'drop': Drop null or missing observations in data.
-            if pandas is installed, pandas.isnull is used, otherwise
-            numpy.isnan is used.
-        'raise': Raise a (more helpful) exception when data contains null
-            or missing values.
-    name: None or string
-        name for the model. When `None` (default) the name is the same as
-        the model function (`func`).
-
-    Note
-    ----
-    Parameter names are inferred from the function arguments,
-    and a residual function is automatically constructed.
-
-    Example
-    -------
-    >>> def decay(t, tau, N):
-    ...     return N*np.exp(-t/tau)
-    ...
-    >>> my_model = Model(decay, independent_vars=['t'])
-    """
-
-    _forbidden_args = ('data', 'weights', 'params')
-    _invalid_ivar  = "Invalid independent variable name ('%s') for function %s"
-    _invalid_par   = "Invalid parameter name ('%s') for function %s"
-    _invalid_missing = "missing must be None, 'none', 'drop', or 'raise'."
-    _valid_missing   = (None, 'none', 'drop', 'raise')
-
-    _invalid_hint = "unknown parameter hint '%s' for param '%s'"
-    _hint_names = ('value', 'vary', 'min', 'max', 'expr')
-
-    def __init__(self, func, independent_vars=None, param_names=None,
-                 missing='none', prefix='', name=None, **kws):
-        self.func = func
-        self._prefix = prefix
-        self._param_root_names = param_names  # will not include prefixes
-        self.independent_vars = independent_vars
-        self._func_allargs = []
-        self._func_haskeywords = False
-        if not missing in self._valid_missing:
-            raise ValueError(self._invalid_missing)
-        self.missing = missing
-        self.opts = kws
-        self.param_hints = OrderedDict()
-        # the following has been changed from OrderedSet for the time being
-        self._param_names = []
-        self._parse_params()
-        if self.independent_vars is None:
-            self.independent_vars = []
-        if name is None and hasattr(self.func, '__name__'):
-            name = self.func.__name__
-        self._name = name
-
-    def _reprstring(self, long=False):
-        out = self._name
-        opts = []
-        if len(self._prefix) > 0:
-            opts.append("prefix='%s'" % (self._prefix))
-        if long:
-            for k, v in self.opts.items():
-                opts.append("%s='%s'" % (k, v))
-        if len(opts) > 0:
-            out = "%s, %s" % (out, ', '.join(opts))
-        return "Model(%s)" % out
-
-    @property
-    def name(self):
-        return self._reprstring(long=False)
-
-    @name.setter
-    def name(self, value):
-        self._name = value
-
-    @property
-    def prefix(self):
-        return self._prefix
-
-    @property
-    def param_names(self):
-        return self._param_names
-
-    def __repr__(self):
-        return "<lmfit.Model: %s>" % (self.name)
-
-    def copy(self, **kwargs):
-        """DOES NOT WORK"""
-        raise NotImplementedError("Model.copy does not work. Make a new Model")
-
-    def _parse_params(self):
-        "build params from function arguments"
-        if self.func is None:
-            return
-        argspec = inspect.getargspec(self.func)
-        pos_args = argspec.args[:]
-        keywords = argspec.keywords
-        kw_args = {}
-        if argspec.defaults is not None:
-            for val in reversed(argspec.defaults):
-                kw_args[pos_args.pop()] = val
-
-        self._func_haskeywords = keywords is not None
-        self._func_allargs = pos_args + list(kw_args.keys())
-        allargs = self._func_allargs
-
-        if len(allargs) == 0 and keywords is not None:
-            return
-
-        # default independent_var = 1st argument
-        if self.independent_vars is None:
-            self.independent_vars = [pos_args[0]]
-
-        # default param names: all positional args
-        # except independent variables
-        self.def_vals = {}
-        might_be_param = []
-        if self._param_root_names is None:
-            self._param_root_names = pos_args[:]
-            for key, val in kw_args.items():
-                if (not isinstance(val, bool) and
-                    isinstance(val, (float, int))):
-                    self._param_root_names.append(key)
-                    self.def_vals[key] = val
-                elif val is None:
-                    might_be_param.append(key)
-            for p in self.independent_vars:
-                if p in self._param_root_names:
-                    self._param_root_names.remove(p)
-
-        new_opts = {}
-        for opt, val in self.opts.items():
-            if (opt in self._param_root_names or opt in might_be_param and
-                isinstance(val, Parameter)):
-                self.set_param_hint(opt, value=val.value,
-                                    min=val.min, max=val.max, expr=val.expr)
-            elif opt in self._func_allargs:
-                new_opts[opt] = val
-        self.opts = new_opts
-
-        names = []
-        if self._prefix is None:
-            self._prefix = ''
-        for pname in self._param_root_names:
-            names.append("%s%s" % (self._prefix, pname))
-
-        # check variables names for validity
-        # The implicit magic in fit() requires us to disallow some
-        fname = self.func.__name__
-        for arg in self.independent_vars:
-            if arg not in allargs or arg in self._forbidden_args:
-                raise ValueError(self._invalid_ivar % (arg, fname))
-        for arg in names:
-            if (self._strip_prefix(arg) not in allargs or
-                arg in self._forbidden_args):
-                raise ValueError(self._invalid_par % (arg, fname))
-        # the following as been changed from OrderedSet for the time being.
-        self._param_names = names[:]
-
-    def set_param_hint(self, name, **kwargs):
-        """set hints for parameter, including optional bounds
-        and constraints  (value, vary, min, max, expr)
-        these will be used by make_params() when building
-        default parameters
-
-        example:
-          model = GaussianModel()
-          model.set_param_hint('amplitude', min=-100.0, max=0.)
-        """
-        npref = len(self._prefix)
-        if npref > 0 and name.startswith(self._prefix):
-            name = name[npref:]
-
-        thishint = {}
-        if name in self.param_hints:
-            thishint = self.param_hints.pop(name)
-        thishint.update(kwargs)
-
-        self.param_hints[name] = OrderedDict()
-        for key, val in thishint.items():
-            if key in self._hint_names:
-                self.param_hints[name][key] = val
-            else:
-                warnings.warn(self._invalid_hint % (key, name))
-
-    def make_params(self, verbose=False, **kwargs):
-        """create and return a Parameters object for a Model.
-        This applies any default values
-        """
-        params = Parameters()
-        # first build parameters defined in param_hints
-        # note that composites may define their own additional
-        # convenience parameters here
-        for basename, hint in self.param_hints.items():
-            name = "%s%s" % (self._prefix, basename)
-            if name in params:
-                par = params[name]
-            else:
-                par = Parameter(name=name)
-            par._delay_asteval = True
-            for item in self._hint_names:
-                if item in  hint:
-                    setattr(par, item, hint[item])
-            # Add the new parameter to self._param_names
-            if name not in self._param_names:
-                self._param_names.append(name)
-            params.add(par)
-            if verbose:
-                print( ' - Adding parameter for hint "%s"' % name)
-
-        # next, make sure that all named parameters are included
-        for name in self.param_names:
-            if name in params:
-                par = params[name]
-            else:
-                par = Parameter(name=name)
-            par._delay_asteval = True
-            basename = name[len(self._prefix):]
-            # apply defaults from model function definition
-            if basename in self.def_vals:
-                par.value = self.def_vals[basename]
-            # apply defaults from parameter hints
-            if basename in self.param_hints:
-                hint = self.param_hints[basename]
-                for item in self._hint_names:
-                    if item in hint:
-                        setattr(par, item, hint[item])
-            # apply values passed in through kw args
-            if basename in kwargs:
-                # kw parameter names with no prefix
-                par.value = kwargs[basename]
-            if name in kwargs:
-                # kw parameter names with prefix
-                par.value = kwargs[name]
-            params.add(par)
-            if verbose:
-                print( ' - Adding parameter "%s"' % name)
-
-        for p in params.values():
-            p._delay_asteval = False
-        return params
-
-    def guess(self, data=None, **kws):
-        """stub for guess starting values --
-        should be implemented for each model subclass to
-        run self.make_params(), update starting values
-        and return a Parameters object"""
-        cname = self.__class__.__name__
-        msg = 'guess() not implemented for %s' % cname
-        raise NotImplementedError(msg)
-
-    def _residual(self, params, data, weights, **kwargs):
-        "default residual:  (data-model)*weights"
-        diff = self.eval(params, **kwargs) - data
-        if weights is not None:
-            diff *= weights
-        return np.asarray(diff).ravel()  # for compatibility with pandas.Series
-
-    def _handle_missing(self, data):
-        "handle missing data"
-        if self.missing == 'raise':
-            if np.any(isnull(data)):
-                raise ValueError("Data contains a null value.")
-        elif self.missing == 'drop':
-            mask = ~isnull(data)
-            if np.all(mask):
-                return None  # short-circuit this -- no missing values
-            mask = np.asarray(mask)  # for compatibility with pandas.Series
-            return mask
-
-    def _strip_prefix(self, name):
-        npref = len(self._prefix)
-        if npref > 0 and name.startswith(self._prefix):
-            name = name[npref:]
-        return name
-
-    def make_funcargs(self, params=None, kwargs=None, strip=True):
-        """convert parameter values and keywords to function arguments"""
-        if params is None: params = {}
-        if kwargs is None: kwargs = {}
-        out = {}
-        out.update(self.opts)
-        for name, par in params.items():
-            if strip:
-                name = self._strip_prefix(name)
-            if name in self._func_allargs or self._func_haskeywords:
-                out[name] = par.value
-
-        # kwargs handled slightly differently -- may set param value too!
-        for name, val in kwargs.items():
-            if strip:
-                name = self._strip_prefix(name)
-            if name in self._func_allargs or self._func_haskeywords:
-                out[name] = val
-                if name in params:
-                    params[name].value = val
-        return out
-
-    def _make_all_args(self, params=None, **kwargs):
-        """generate **all** function args for all functions"""
-        args = {}
-        for key, val in self.make_funcargs(params, kwargs).items():
-            args["%s%s" % (self._prefix, key)] = val
-        return args
-
-    def eval(self, params=None, **kwargs):
-        """evaluate the model with the supplied parameters"""
-        result = self.func(**self.make_funcargs(params, kwargs))
-        # Handle special case of constant result and one
-        # independent variable (of any dimension).
-        if np.ndim(result) == 0 and len(self.independent_vars) == 1:
-            result = np.tile(result, kwargs[self.independent_vars[0]].shape)
-        return result
-
-    @property
-    def components(self):
-        """return components for composite model"""
-        return [self]
-
-    def eval_components(self, params=None, **kwargs):
-        """
-        evaluate the model with the supplied parameters and returns a ordered
-        dict containting name, result pairs.
-        """
-        key = self._prefix
-        if len(key) < 1:
-            key = self._name
-        return {key: self.eval(params=params, **kwargs)}
-
-    def fit(self, data, params=None, weights=None, method='leastsq',
-            iter_cb=None, scale_covar=True, verbose=True, fit_kws=None, **kwargs):
-        """Fit the model to the data.
-
-        Parameters
-        ----------
-        data: array-like
-        params: Parameters object
-        weights: array-like of same size as data
-            used for weighted fit
-        method: fitting method to use (default = 'leastsq')
-        iter_cb:  None or callable  callback function to call at each iteration.
-        scale_covar:  bool (default True) whether to auto-scale covariance matrix
-        verbose: bool (default True) print a message when a new parameter is
-            added because of a hint.
-        fit_kws: dict
-            default fitting options, such as xtol and maxfev, for scipy optimizer
-        keyword arguments: optional, named like the arguments of the
-            model function, will override params. See examples below.
-
-        Returns
-        -------
-        lmfit.ModelResult
-
-        Examples
-        --------
-        # Take t to be the independent variable and data to be the
-        # curve we will fit.
-
-        # Using keyword arguments to set initial guesses
-        >>> result = my_model.fit(data, tau=5, N=3, t=t)
-
-        # Or, for more control, pass a Parameters object.
-        >>> result = my_model.fit(data, params, t=t)
-
-        # Keyword arguments override Parameters.
-        >>> result = my_model.fit(data, params, tau=5, t=t)
-
-        Note
-        ----
-        All parameters, however passed, are copied on input, so the original
-        Parameter objects are unchanged.
-
-        """
-        if params is None:
-            params = self.make_params(verbose=verbose)
-        else:
-            params = deepcopy(params)
-
-        # If any kwargs match parameter names, override params.
-        param_kwargs = set(kwargs.keys()) & set(self.param_names)
-        for name in param_kwargs:
-            p = kwargs[name]
-            if isinstance(p, Parameter):
-                p.name = name  # allows N=Parameter(value=5) with implicit name
-                params[name] = deepcopy(p)
-            else:
-                params[name].set(value=p)
-            del kwargs[name]
-
-        # All remaining kwargs should correspond to independent variables.
-        for name in kwargs.keys():
-            if name not in self.independent_vars:
-                warnings.warn("The keyword argument %s does not" % name +
-                              "match any arguments of the model function." +
-                              "It will be ignored.", UserWarning)
-
-        # If any parameter is not initialized raise a more helpful error.
-        missing_param = any([p not in params.keys()
-                             for p in self.param_names])
-        blank_param = any([(p.value is None and p.expr is None)
-                           for p in params.values()])
-        if missing_param or blank_param:
-            msg = ('Assign each parameter an initial value by passing '
-                   'Parameters or keyword arguments to fit.\n')
-            missing = [p for p in self.param_names if p not in params.keys()]
-            blank = [name for name, p in params.items()
-                                    if (p.value is None and p.expr is None)]
-            msg += 'Missing parameters: %s\n' % str(missing)
-            msg += 'Non initialized parameters: %s' % str(blank)
-            raise ValueError(msg)
-
-        # Do not alter anything that implements the array interface (np.array, pd.Series)
-        # but convert other iterables (e.g., Python lists) to numpy arrays.
-        if not hasattr(data, '__array__'):
-            data = np.asfarray(data)
-        for var in self.independent_vars:
-            var_data = kwargs[var]
-            if (not hasattr(var_data, '__array__')) and (not np.isscalar(var_data)):
-                kwargs[var] = np.asfarray(var_data)
-
-        # Handle null/missing values.
-        mask = None
-        if self.missing not in (None, 'none'):
-            mask = self._handle_missing(data)  # This can raise.
-            if mask is not None:
-                data = data[mask]
-            if weights is not None:
-                weights = _align(weights, mask, data)
-
-        # If independent_vars and data are alignable (pandas), align them,
-        # and apply the mask from above if there is one.
-        for var in self.independent_vars:
-            if not np.isscalar(kwargs[var]):
-                kwargs[var] = _align(kwargs[var], mask, data)
-
-        if fit_kws is None:
-            fit_kws = {}
-
-        output = ModelResult(self, params, method=method, iter_cb=iter_cb,
-                             scale_covar=scale_covar, fcn_kws=kwargs,
-                             **fit_kws)
-        output.fit(data=data, weights=weights)
-        output.components = self.components
-        return output
-
-    def __add__(self, other):
-        return CompositeModel(self, other, operator.add)
-
-    def __sub__(self, other):
-        return CompositeModel(self, other, operator.sub)
-
-    def __mul__(self, other):
-        return CompositeModel(self, other, operator.mul)
-
-    def __div__(self, other):
-        return CompositeModel(self, other, operator.truediv)
-
-    def __truediv__(self, other):
-        return CompositeModel(self, other, operator.truediv)
-
-
-class CompositeModel(Model):
-    """Create a composite model -- a binary operator of two Models
-
-    Parameters
-    ----------
-    left_model:    left-hand side model-- must be a Model()
-    right_model:   right-hand side model -- must be a Model()
-    oper:          callable binary operator (typically, operator.add, operator.mul, etc)
-
-    independent_vars: list of strings or None (default)
-        arguments to func that are independent variables
-    param_names: list of strings or None (default)
-        names of arguments to func that are to be made into parameters
-    missing: None, 'none', 'drop', or 'raise'
-        'none' or None: Do not check for null or missing values (default)
-        'drop': Drop null or missing observations in data.
-            if pandas is installed, pandas.isnull is used, otherwise
-            numpy.isnan is used.
-        'raise': Raise a (more helpful) exception when data contains null
-            or missing values.
-    name: None or string
-        name for the model. When `None` (default) the name is the same as
-        the model function (`func`).
-
-    """
-    _names_collide = ("\nTwo models have parameters named '{clash}'. "
-                      "Use distinct names.")
-    _bad_arg   = "CompositeModel: argument {arg} is not a Model"
-    _bad_op    = "CompositeModel: operator {op} is not callable"
-    _known_ops = {operator.add: '+', operator.sub: '-',
-                  operator.mul: '*', operator.truediv: '/'}
-
-    def __init__(self, left, right, op, **kws):
-        if not isinstance(left, Model):
-            raise ValueError(self._bad_arg.format(arg=left))
-        if not isinstance(right, Model):
-            raise ValueError(self._bad_arg.format(arg=right))
-        if not callable(op):
-            raise ValueError(self._bad_op.format(op=op))
-
-        self.left  = left
-        self.right = right
-        self.op    = op
-
-        name_collisions = set(left.param_names) & set(right.param_names)
-        if len(name_collisions) > 0:
-            msg = ''
-            for collision in name_collisions:
-                msg += self._names_collide.format(clash=collision)
-            raise NameError(msg)
-
-        # we assume that all the sub-models have the same independent vars
-        if 'independent_vars' not in kws:
-            kws['independent_vars'] = self.left.independent_vars
-        if 'missing' not in kws:
-            kws['missing'] = self.left.missing
-
-        def _tmp(self, *args, **kws): pass
-        Model.__init__(self, _tmp, **kws)
-
-        for side in (left, right):
-            prefix = side.prefix
-            for basename, hint in side.param_hints.items():
-                self.param_hints["%s%s" % (prefix, basename)] = hint
-
-    def _parse_params(self):
-        self._func_haskeywords = (self.left._func_haskeywords or
-                                  self.right._func_haskeywords)
-        self._func_allargs = (self.left._func_allargs +
-                              self.right._func_allargs)
-        self.def_vals = deepcopy(self.right.def_vals)
-        self.def_vals.update(self.left.def_vals)
-        self.opts = deepcopy(self.right.opts)
-        self.opts.update(self.left.opts)
-
-    def _reprstring(self, long=False):
-        return "(%s %s %s)" % (self.left._reprstring(long=long),
-                               self._known_ops.get(self.op, self.op),
-                               self.right._reprstring(long=long))
-
-    def eval(self, params=None, **kwargs):
-        return self.op(self.left.eval(params=params, **kwargs),
-                       self.right.eval(params=params, **kwargs))
-
-    def eval_components(self, **kwargs):
-        """return ordered dict of name, results for each component"""
-        out = OrderedDict(self.left.eval_components(**kwargs))
-        out.update(self.right.eval_components(**kwargs))
-        return out
-
-    @property
-    def param_names(self):
-        return  self.left.param_names + self.right.param_names
-
-    @property
-    def components(self):
-        """return components for composite model"""
-        return self.left.components + self.right.components
-
-    def _make_all_args(self, params=None, **kwargs):
-        """generate **all** function args for all functions"""
-        out = self.right._make_all_args(params=params, **kwargs)
-        out.update(self.left._make_all_args(params=params, **kwargs))
-        return out
-
-
-class ModelResult(Minimizer):
-    """Result from Model fit
-
-    Attributes
-    -----------
-    model         instance of Model -- the model function
-    params        instance of Parameters -- the fit parameters
-    data          array of data values to compare to model
-    weights       array of weights used in fitting
-    init_params   copy of params, before being updated by fit()
-    init_values   array of parameter values, before being updated by fit()
-    init_fit      model evaluated with init_params.
-    best_fit      model evaluated with params after being updated by fit()
-
-    Methods:
-    --------
-    fit(data=None, params=None, weights=None, method=None, **kwargs)
-         fit (or re-fit) model with params to data (with weights)
-         using supplied method.  The keyword arguments are sent to
-         as keyword arguments to the model function.
-
-         all inputs are optional, defaulting to the value used in
-         the previous fit.  This allows easily changing data or
-         parameter settings, or both.
-
-    eval(**kwargs)
-         evaluate the current model, with the current parameter values,
-         with values in kwargs sent to the model function.
-
-    eval_components(**kwargs)
-         evaluate the current model, with the current parameter values,
-         with values in kwargs sent to the model function and returns
-         a ordered dict with the model names as the key and the component
-         results as the values.
-
-   fit_report(modelpars=None, show_correl=True, min_correl=0.1)
-         return a fit report.
-
-   plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
-            numpoints=None,  data_kws=None, fit_kws=None, init_kws=None,
-            ax_kws=None)
-        Plot the fit results using matplotlib.
-
-   plot_residuals(self, ax=None, datafmt='o', data_kws=None, fit_kws=None,
-                  ax_kws=None)
-        Plot the fit residuals using matplotlib.
-
-   plot(self, datafmt='o', fitfmt='-', initfmt='--', numpoints=None,
-        data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None,
-        ax_fit_kws=None, fig_kws=None)
-        Plot the fit results and residuals using matplotlib.
-    """
-    def __init__(self, model, params, data=None, weights=None,
-                 method='leastsq', fcn_args=None, fcn_kws=None,
-                 iter_cb=None, scale_covar=True, **fit_kws):
-        self.model = model
-        self.data = data
-        self.weights = weights
-        self.method = method
-        self.ci_out = None
-        self.init_params = deepcopy(params)
-        Minimizer.__init__(self, model._residual, params, fcn_args=fcn_args,
-                           fcn_kws=fcn_kws, iter_cb=iter_cb,
-                           scale_covar=scale_covar, **fit_kws)
-
-    def fit(self, data=None, params=None, weights=None, method=None, **kwargs):
-        """perform fit for a Model, given data and params"""
-        if data is not None:
-            self.data = data
-        if params is not None:
-            self.init_params = params
-        if weights is not None:
-            self.weights = weights
-        if method is not None:
-            self.method = method
-        self.ci_out = None
-        self.userargs = (self.data, self.weights)
-        self.userkws.update(kwargs)
-        self.init_fit    = self.model.eval(params=self.params, **self.userkws)
-
-        _ret = self.minimize(method=self.method)
-
-        for attr in dir(_ret):
-            if not attr.startswith('_') :
-                try:
-                    setattr(self, attr, getattr(_ret, attr))
-                except AttributeError:
-                    pass
-
-        self.init_values = self.model._make_all_args(self.init_params)
-        self.best_values = self.model._make_all_args(_ret.params)
-        self.best_fit    = self.model.eval(params=_ret.params, **self.userkws)
-
-    def eval(self, **kwargs):
-        self.userkws.update(kwargs)
-        return self.model.eval(params=self.params, **self.userkws)
-
-    def eval_components(self, **kwargs):
-        self.userkws.update(kwargs)
-        return self.model.eval_components(params=self.params, **self.userkws)
-
-    def conf_interval(self, **kwargs):
-        """return explicitly calculated confidence intervals"""
-        if self.ci_out is None:
-            self.ci_out = conf_interval(self, self, **kwargs)
-        return self.ci_out
-
-    def ci_report(self, with_offset=True, ndigits=5, **kwargs):
-        """return nicely formatted report about confidence intervals"""
-        return ci_report(self.conf_interval(**kwargs),
-                         with_offset=with_offset, ndigits=ndigits)
-
-    def fit_report(self,  **kwargs):
-        "return fit report"
-        return '[[Model]]\n    %s\n%s\n' % (self.model._reprstring(long=True),
-                                            fit_report(self, **kwargs))
-
-    @_ensureMatplotlib
-    def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
-                 numpoints=None,  data_kws=None, fit_kws=None, init_kws=None,
-                 ax_kws=None):
-        """Plot the fit results using matplotlib.
-
-        The method will plot results of the fit using matplotlib, including:
-        the data points, the initial fit curve and the fitted curve. If the fit
-        model included weights, errorbars will also be plotted.
-
-        Parameters
-        ----------
-        ax : matplotlib.axes.Axes, optional
-            The axes to plot on. The default in None, which means use the
-            current pyplot axis or create one if there is none.
-        datafmt : string, optional
-            matplotlib format string for data points
-        fitfmt : string, optional
-            matplotlib format string for fitted curve
-        initfmt : string, optional
-            matplotlib format string for initial conditions for the fit
-        yerr : ndarray, optional
-            array of uncertainties for data array
-        numpoints : int, optional
-            If provided, the final and initial fit curves are evaluated not
-            only at data points, but refined to contain `numpoints` points in
-            total.
-        data_kws : dictionary, optional
-            keyword arguments passed on to the plot function for data points
-        fit_kws : dictionary, optional
-            keyword arguments passed on to the plot function for fitted curve
-        init_kws : dictionary, optional
-            keyword arguments passed on to the plot function for the initial
-            conditions of the fit
-        ax_kws : dictionary, optional
-            keyword arguments for a new axis, if there is one being created
-
-        Returns
-        -------
-        matplotlib.axes.Axes
-
-        Notes
-        ----
-        For details about plot format strings and keyword arguments see
-        documentation of matplotlib.axes.Axes.plot.
-
-        If yerr is specified or if the fit model included weights, then
-        matplotlib.axes.Axes.errorbar is used to plot the data.  If yerr is
-        not specified and the fit includes weights, yerr set to 1/self.weights
-
-        If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
-
-        See Also
-        --------
-        ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
-        ModelResult.plot : Plot the fit results and residuals using matplotlib.
-        """
-        if data_kws is None:
-            data_kws = {}
-        if fit_kws is None:
-            fit_kws = {}
-        if init_kws is None:
-            init_kws = {}
-        if ax_kws is None:
-            ax_kws = {}
-
-        if len(self.model.independent_vars) == 1:
-            independent_var = self.model.independent_vars[0]
-        else:
-            print('Fit can only be plotted if the model function has one '
-                  'independent variable.')
-            return False
-
-        if not isinstance(ax, plt.Axes):
-            ax = plt.gca(**ax_kws)
-
-        x_array = self.userkws[independent_var]
-
-        # make a dense array for x-axis if data is not dense
-        if numpoints is not None and len(self.data) < numpoints:
-            x_array_dense = np.linspace(min(x_array), max(x_array), numpoints)
-        else:
-            x_array_dense = x_array
-
-        ax.plot(x_array_dense, self.model.eval(self.init_params,
-                **{independent_var: x_array_dense}), initfmt,
-                label='init', **init_kws)
-        ax.plot(x_array_dense, self.model.eval(self.params,
-                **{independent_var: x_array_dense}), fitfmt,
-                label='best-fit', **fit_kws)
-
-        if yerr is None and self.weights is not None:
-            yerr = 1.0/self.weights
-        if yerr is not None:
-            ax.errorbar(x_array, self.data, yerr=yerr,
-                        fmt=datafmt, label='data', **data_kws)
-        else:
-            ax.plot(x_array, self.data, datafmt, label='data', **data_kws)
-
-        ax.set_title(self.model.name)
-        ax.set_xlabel(independent_var)
-        ax.set_ylabel('y')
-        ax.legend()
-
-        return ax
-
-    @_ensureMatplotlib
-    def plot_residuals(self, ax=None, datafmt='o', yerr=None, data_kws=None,
-                       fit_kws=None, ax_kws=None):
-        """Plot the fit residuals using matplotlib.
-
-        The method will plot residuals of the fit using matplotlib, including:
-        the data points and the fitted curve (as horizontal line). If the fit
-        model included weights, errorbars will also be plotted.
-
-        Parameters
-        ----------
-        ax : matplotlib.axes.Axes, optional
-            The axes to plot on. The default in None, which means use the
-            current pyplot axis or create one if there is none.
-        datafmt : string, optional
-            matplotlib format string for data points
-        yerr : ndarray, optional
-            array of uncertainties for data array
-        data_kws : dictionary, optional
-            keyword arguments passed on to the plot function for data points
-        fit_kws : dictionary, optional
-            keyword arguments passed on to the plot function for fitted curve
-        ax_kws : dictionary, optional
-            keyword arguments for a new axis, if there is one being created
-
-        Returns
-        -------
-        matplotlib.axes.Axes
-
-        Notes
-        ----
-        For details about plot format strings and keyword arguments see
-        documentation of matplotlib.axes.Axes.plot.
-
-        If yerr is specified or if the fit model included weights, then
-        matplotlib.axes.Axes.errorbar is used to plot the data.  If yerr is
-        not specified and the fit includes weights, yerr set to 1/self.weights
-
-        If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
-
-        See Also
-        --------
-        ModelResult.plot_fit : Plot the fit results using matplotlib.
-        ModelResult.plot : Plot the fit results and residuals using matplotlib.
-        """
-        if data_kws is None:
-            data_kws = {}
-        if fit_kws is None:
-            fit_kws = {}
-        if fit_kws is None:
-            fit_kws = {}
-        if ax_kws is None:
-            ax_kws = {}
-
-        if len(self.model.independent_vars) == 1:
-            independent_var = self.model.independent_vars[0]
-        else:
-            print('Fit can only be plotted if the model function has one '
-                  'independent variable.')
-            return False
-
-        if not isinstance(ax, plt.Axes):
-            ax = plt.gca(**ax_kws)
-
-        x_array = self.userkws[independent_var]
-
-        ax.axhline(0, **fit_kws)
-
-        if yerr is None and self.weights is not None:
-            yerr = 1.0/self.weights
-        if yerr is not None:
-            ax.errorbar(x_array, self.eval() - self.data, yerr=yerr,
-                        fmt=datafmt, label='residuals', **data_kws)
-        else:
-            ax.plot(x_array, self.eval() - self.data, datafmt,
-                    label='residuals', **data_kws)
-
-        ax.set_title(self.model.name)
-        ax.set_ylabel('residuals')
-        ax.legend()
-
-        return ax
-
-    @_ensureMatplotlib
-    def plot(self, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
-             numpoints=None, fig=None, data_kws=None, fit_kws=None,
-             init_kws=None, ax_res_kws=None, ax_fit_kws=None,
-             fig_kws=None):
-        """Plot the fit results and residuals using matplotlib.
-
-        The method will produce a matplotlib figure with both results of the
-        fit and the residuals plotted. If the fit model included weights,
-        errorbars will also be plotted.
-
-        Parameters
-        ----------
-        datafmt : string, optional
-            matplotlib format string for data points
-        fitfmt : string, optional
-            matplotlib format string for fitted curve
-        initfmt : string, optional
-            matplotlib format string for initial conditions for the fit
-        yerr : ndarray, optional
-            array of uncertainties for data array
-        numpoints : int, optional
-            If provided, the final and initial fit curves are evaluated not
-            only at data points, but refined to contain `numpoints` points in
-            total.
-        fig : matplotlib.figure.Figure, optional
-            The figure to plot on. The default in None, which means use the
-            current pyplot figure or create one if there is none.
-        data_kws : dictionary, optional
-            keyword arguments passed on to the plot function for data points
-        fit_kws : dictionary, optional
-            keyword arguments passed on to the plot function for fitted curve
-        init_kws : dictionary, optional
-            keyword arguments passed on to the plot function for the initial
-            conditions of the fit
-        ax_res_kws : dictionary, optional
-            keyword arguments for the axes for the residuals plot
-        ax_fit_kws : dictionary, optional
-            keyword arguments for the axes for the fit plot
-        fig_kws : dictionary, optional
-            keyword arguments for a new figure, if there is one being created
-
-        Returns
-        -------
-        matplotlib.figure.Figure
-
-        Notes
-        ----
-        The method combines ModelResult.plot_fit and ModelResult.plot_residuals.
-
-        If yerr is specified or if the fit model included weights, then
-        matplotlib.axes.Axes.errorbar is used to plot the data.  If yerr is
-        not specified and the fit includes weights, yerr set to 1/self.weights
-
-        If `fig` is None then matplotlib.pyplot.figure(**fig_kws) is called.
-
-        See Also
-        --------
-        ModelResult.plot_fit : Plot the fit results using matplotlib.
-        ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
-        """
-        if data_kws is None:
-            data_kws = {}
-        if fit_kws is None:
-            fit_kws = {}
-        if init_kws is None:
-            init_kws = {}
-        if ax_res_kws is None:
-            ax_res_kws = {}
-        if ax_fit_kws is None:
-            ax_fit_kws = {}
-        if fig_kws is None:
-            fig_kws = {}
-
-        if len(self.model.independent_vars) != 1:
-            print('Fit can only be plotted if the model function has one '
-                  'independent variable.')
-            return False
-
-        if not isinstance(fig, plt.Figure):
-            fig = plt.figure(**fig_kws)
-
-        gs = plt.GridSpec(nrows=2, ncols=1, height_ratios=[1, 4])
-        ax_res = fig.add_subplot(gs[0], **ax_res_kws)
-        ax_fit = fig.add_subplot(gs[1], sharex=ax_res, **ax_fit_kws)
-
-        self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr,
-                      initfmt=initfmt, numpoints=numpoints, data_kws=data_kws,
-                      fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws)
-        self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,
-                            data_kws=data_kws, fit_kws=fit_kws,
-                            ax_kws=ax_res_kws)
-
-        return fig
+"""
+Concise nonlinear curve fitting.
+"""
+from __future__ import print_function
+import warnings
+import inspect
+import operator
+from copy import deepcopy
+import numpy as np
+from . import Parameters, Parameter, Minimizer
+from .printfuncs import fit_report, ci_report
+from .confidence import conf_interval
+
+try:
+    from collections import OrderedDict
+except ImportError:
+    from ordereddict import OrderedDict
+
+# Use pandas.isnull for aligning missing data is pandas is available.
+# otherwise use numpy.isnan
+try:
+    from pandas import isnull, Series
+except ImportError:
+    isnull = np.isnan
+    Series = type(NotImplemented)
+
+def _align(var, mask, data):
+    "align missing data, with pandas is available"
+    if isinstance(data, Series) and isinstance(var, Series):
+        return var.reindex_like(data).dropna()
+    elif mask is not None:
+        return var[mask]
+    return var
+
+
+try:
+    from matplotlib import pyplot as plt
+    _HAS_MATPLOTLIB = True
+except ImportError:
+    _HAS_MATPLOTLIB = False
+
+
+def _ensureMatplotlib(function):
+    if _HAS_MATPLOTLIB:
+        return function
+    else:
+        def no_op(*args, **kwargs):
+            print('matplotlib module is required for plotting the results')
+
+        return no_op
+
+
+class Model(object):
+    """Create a model from a user-defined function.
+
+    Parameters
+    ----------
+    func: function to be wrapped
+    independent_vars: list of strings or None (default)
+        arguments to func that are independent variables
+    param_names: list of strings or None (default)
+        names of arguments to func that are to be made into parameters
+    missing: None, 'none', 'drop', or 'raise'
+        'none' or None: Do not check for null or missing values (default)
+        'drop': Drop null or missing observations in data.
+            if pandas is installed, pandas.isnull is used, otherwise
+            numpy.isnan is used.
+        'raise': Raise a (more helpful) exception when data contains null
+            or missing values.
+    name: None or string
+        name for the model. When `None` (default) the name is the same as
+        the model function (`func`).
+
+    Note
+    ----
+    Parameter names are inferred from the function arguments,
+    and a residual function is automatically constructed.
+
+    Example
+    -------
+    >>> def decay(t, tau, N):
+    ...     return N*np.exp(-t/tau)
+    ...
+    >>> my_model = Model(decay, independent_vars=['t'])
+    """
+
+    _forbidden_args = ('data', 'weights', 'params')
+    _invalid_ivar  = "Invalid independent variable name ('%s') for function %s"
+    _invalid_par   = "Invalid parameter name ('%s') for function %s"
+    _invalid_missing = "missing must be None, 'none', 'drop', or 'raise'."
+    _valid_missing   = (None, 'none', 'drop', 'raise')
+
+    _invalid_hint = "unknown parameter hint '%s' for param '%s'"
+    _hint_names = ('value', 'vary', 'min', 'max', 'expr')
+
+    def __init__(self, func, independent_vars=None, param_names=None,
+                 missing='none', prefix='', name=None, **kws):
+        self.func = func
+        self._prefix = prefix
+        self._param_root_names = param_names  # will not include prefixes
+        self.independent_vars = independent_vars
+        self._func_allargs = []
+        self._func_haskeywords = False
+        if not missing in self._valid_missing:
+            raise ValueError(self._invalid_missing)
+        self.missing = missing
+        self.opts = kws
+        self.param_hints = OrderedDict()
+        # the following has been changed from OrderedSet for the time being
+        self._param_names = []
+        self._parse_params()
+        if self.independent_vars is None:
+            self.independent_vars = []
+        if name is None and hasattr(self.func, '__name__'):
+            name = self.func.__name__
+        self._name = name
+
+    def _reprstring(self, long=False):
+        out = self._name
+        opts = []
+        if len(self._prefix) > 0:
+            opts.append("prefix='%s'" % (self._prefix))
+        if long:
+            for k, v in self.opts.items():
+                opts.append("%s='%s'" % (k, v))
+        if len(opts) > 0:
+            out = "%s, %s" % (out, ', '.join(opts))
+        return "Model(%s)" % out
+
+    @property
+    def name(self):
+        return self._reprstring(long=False)
+
+    @name.setter
+    def name(self, value):
+        self._name = value
+
+    @property
+    def prefix(self):
+        return self._prefix
+
+    @property
+    def param_names(self):
+        return self._param_names
+
+    def __repr__(self):
+        return "<lmfit.Model: %s>" % (self.name)
+
+    def copy(self, **kwargs):
+        """DOES NOT WORK"""
+        raise NotImplementedError("Model.copy does not work. Make a new Model")
+
+    def _parse_params(self):
+        "build params from function arguments"
+        if self.func is None:
+            return
+        argspec = inspect.getargspec(self.func)
+        pos_args = argspec.args[:]
+        keywords = argspec.keywords
+        kw_args = {}
+        if argspec.defaults is not None:
+            for val in reversed(argspec.defaults):
+                kw_args[pos_args.pop()] = val
+
+        self._func_haskeywords = keywords is not None
+        self._func_allargs = pos_args + list(kw_args.keys())
+        allargs = self._func_allargs
+
+        if len(allargs) == 0 and keywords is not None:
+            return
+
+        # default independent_var = 1st argument
+        if self.independent_vars is None:
+            self.independent_vars = [pos_args[0]]
+
+        # default param names: all positional args
+        # except independent variables
+        self.def_vals = {}
+        might_be_param = []
+        if self._param_root_names is None:
+            self._param_root_names = pos_args[:]
+            for key, val in kw_args.items():
+                if (not isinstance(val, bool) and
+                    isinstance(val, (float, int))):
+                    self._param_root_names.append(key)
+                    self.def_vals[key] = val
+                elif val is None:
+                    might_be_param.append(key)
+            for p in self.independent_vars:
+                if p in self._param_root_names:
+                    self._param_root_names.remove(p)
+
+        new_opts = {}
+        for opt, val in self.opts.items():
+            if (opt in self._param_root_names or opt in might_be_param and
+                isinstance(val, Parameter)):
+                self.set_param_hint(opt, value=val.value,
+                                    min=val.min, max=val.max, expr=val.expr)
+            elif opt in self._func_allargs:
+                new_opts[opt] = val
+        self.opts = new_opts
+
+        names = []
+        if self._prefix is None:
+            self._prefix = ''
+        for pname in self._param_root_names:
+            names.append("%s%s" % (self._prefix, pname))
+
+        # check variables names for validity
+        # The implicit magic in fit() requires us to disallow some
+        fname = self.func.__name__
+        for arg in self.independent_vars:
+            if arg not in allargs or arg in self._forbidden_args:
+                raise ValueError(self._invalid_ivar % (arg, fname))
+        for arg in names:
+            if (self._strip_prefix(arg) not in allargs or
+                arg in self._forbidden_args):
+                raise ValueError(self._invalid_par % (arg, fname))
+        # the following as been changed from OrderedSet for the time being.
+        self._param_names = names[:]
+
+    def set_param_hint(self, name, **kwargs):
+        """set hints for parameter, including optional bounds
+        and constraints  (value, vary, min, max, expr)
+        these will be used by make_params() when building
+        default parameters
+
+        example:
+          model = GaussianModel()
+          model.set_param_hint('amplitude', min=-100.0, max=0.)
+        """
+        npref = len(self._prefix)
+        if npref > 0 and name.startswith(self._prefix):
+            name = name[npref:]
+
+        if name not in self.param_hints:
+            self.param_hints[name] = OrderedDict()
+
+        for key, val in kwargs.items():
+            if key in self._hint_names:
+                self.param_hints[name][key] = val
+            else:
+                warnings.warn(self._invalid_hint % (key, name))
+
+    def print_param_hints(self, colwidth=8):
+        """Prints a nicely aligned text-table of parameters hints.
+
+        The argument `colwidth` is the width of each column,
+        except for first and last columns.
+        """
+        name_len = max(len(s) for s in self.param_hints)
+        print('{:{name_len}}  {:>{n}} {:>{n}} {:>{n}} {:>{n}}    {:{n}}'
+              .format('Name', 'Value', 'Min', 'Max', 'Vary', 'Expr',
+                      name_len=name_len, n=colwidth))
+        line = ('{name:<{name_len}}  {value:{n}g} {min:{n}g} {max:{n}g} '
+                '{vary!s:>{n}}    {expr}')
+        for name, values in sorted(self.param_hints.items()):
+            pvalues = dict(name=name, value=np.nan, min=-np.inf, max=np.inf,
+                           vary=True, expr='')
+            pvalues.update(**values)
+            print(line.format(name_len=name_len, n=colwidth, **pvalues))
+
+    def make_params(self, verbose=False, **kwargs):
+        """create and return a Parameters object for a Model.
+        This applies any default values
+        """
+        params = Parameters()
+
+        # make sure that all named parameters are in params
+        for name in self.param_names:
+            if name in params:
+                par = params[name]
+            else:
+                par = Parameter(name=name)
+            par._delay_asteval = True
+            basename = name[len(self._prefix):]
+            # apply defaults from model function definition
+            if basename in self.def_vals:
+                par.value = self.def_vals[basename]
+            # apply defaults from parameter hints
+            if basename in self.param_hints:
+                hint = self.param_hints[basename]
+                for item in self._hint_names:
+                    if item in hint:
+                        setattr(par, item, hint[item])
+            # apply values passed in through kw args
+            if basename in kwargs:
+                # kw parameter names with no prefix
+                par.value = kwargs[basename]
+            if name in kwargs:
+                # kw parameter names with prefix
+                par.value = kwargs[name]
+            params.add(par)
+            if verbose:
+                print( ' - Adding parameter "%s"' % name)
+
+        # next build parameters defined in param_hints
+        # note that composites may define their own additional
+        # convenience parameters here
+        for basename, hint in self.param_hints.items():
+            name = "%s%s" % (self._prefix, basename)
+            if name in params:
+                par = params[name]
+            else:
+                par = Parameter(name=name)
+                params.add(par)
+                if verbose:
+                    print( ' - Adding parameter for hint "%s"' % name)
+            par._delay_asteval = True
+            for item in self._hint_names:
+                if item in  hint:
+                    setattr(par, item, hint[item])
+            # Add the new parameter to self._param_names
+            if name not in self._param_names:
+                self._param_names.append(name)
+
+        for p in params.values():
+            p._delay_asteval = False
+        return params
+
+    def guess(self, data=None, **kws):
+        """stub for guess starting values --
+        should be implemented for each model subclass to
+        run self.make_params(), update starting values
+        and return a Parameters object"""
+        cname = self.__class__.__name__
+        msg = 'guess() not implemented for %s' % cname
+        raise NotImplementedError(msg)
+
+    def _residual(self, params, data, weights, **kwargs):
+        """default residual:  (data-model)*weights
+
+        If the model returns complex values, the residual is computed by treating the real and imaginary
+        parts separately. In this case, if the weights provided are real, they are assumed to apply equally to the
+        real and imaginary parts. If the weights are complex, the real part of the weights are applied to the real
+        part of the residual and the imaginary part is treated correspondingly.
+
+        Since the underlying scipy.optimize routines expect np.float arrays, the only complex type supported is
+        np.complex.
+
+        The "ravels" throughout are necessary to support pandas.Series.
+        """
+        diff = self.eval(params, **kwargs) - data
+
+        if diff.dtype == np.complex:
+            # data/model are complex
+            diff = diff.ravel().view(np.float)
+            if weights is not None:
+                if weights.dtype == np.complex:
+                    # weights are complex
+                    weights = weights.ravel().view(np.float)
+                else:
+                    # real weights but complex data
+                    weights = (weights + 1j * weights).ravel().view(np.float)
+        if weights is not None:
+            diff *= weights
+        return np.asarray(diff).ravel()  # for compatibility with pandas.Series
+
+    def _handle_missing(self, data):
+        "handle missing data"
+        if self.missing == 'raise':
+            if np.any(isnull(data)):
+                raise ValueError("Data contains a null value.")
+        elif self.missing == 'drop':
+            mask = ~isnull(data)
+            if np.all(mask):
+                return None  # short-circuit this -- no missing values
+            mask = np.asarray(mask)  # for compatibility with pandas.Series
+            return mask
+
+    def _strip_prefix(self, name):
+        npref = len(self._prefix)
+        if npref > 0 and name.startswith(self._prefix):
+            name = name[npref:]
+        return name
+
+    def make_funcargs(self, params=None, kwargs=None, strip=True):
+        """convert parameter values and keywords to function arguments"""
+        if params is None: params = {}
+        if kwargs is None: kwargs = {}
+        out = {}
+        out.update(self.opts)
+        for name, par in params.items():
+            if strip:
+                name = self._strip_prefix(name)
+            if name in self._func_allargs or self._func_haskeywords:
+                out[name] = par.value
+
+        # kwargs handled slightly differently -- may set param value too!
+        for name, val in kwargs.items():
+            if strip:
+                name = self._strip_prefix(name)
+            if name in self._func_allargs or self._func_haskeywords:
+                out[name] = val
+                if name in params:
+                    params[name].value = val
+        return out
+
+    def _make_all_args(self, params=None, **kwargs):
+        """generate **all** function args for all functions"""
+        args = {}
+        for key, val in self.make_funcargs(params, kwargs).items():
+            args["%s%s" % (self._prefix, key)] = val
+        return args
+
+    def eval(self, params=None, **kwargs):
+        """evaluate the model with the supplied parameters"""
+        result = self.func(**self.make_funcargs(params, kwargs))
+        # Handle special case of constant result and one
+        # independent variable (of any dimension).
+        if np.ndim(result) == 0 and len(self.independent_vars) == 1:
+            result = np.tile(result, kwargs[self.independent_vars[0]].shape)
+        return result
+
+    @property
+    def components(self):
+        """return components for composite model"""
+        return [self]
+
+    def eval_components(self, params=None, **kwargs):
+        """
+        evaluate the model with the supplied parameters and returns a ordered
+        dict containting name, result pairs.
+        """
+        key = self._prefix
+        if len(key) < 1:
+            key = self._name
+        return {key: self.eval(params=params, **kwargs)}
+
+    def fit(self, data, params=None, weights=None, method='leastsq',
+            iter_cb=None, scale_covar=True, verbose=False, fit_kws=None, **kwargs):
+        """Fit the model to the data.
+
+        Parameters
+        ----------
+        data: array-like
+        params: Parameters object
+        weights: array-like of same size as data
+            used for weighted fit
+        method: fitting method to use (default = 'leastsq')
+        iter_cb:  None or callable  callback function to call at each iteration.
+        scale_covar:  bool (default True) whether to auto-scale covariance matrix
+        verbose: bool (default True) print a message when a new parameter is
+            added because of a hint.
+        fit_kws: dict
+            default fitting options, such as xtol and maxfev, for scipy optimizer
+        keyword arguments: optional, named like the arguments of the
+            model function, will override params. See examples below.
+
+        Returns
+        -------
+        lmfit.ModelResult
+
+        Examples
+        --------
+        # Take t to be the independent variable and data to be the
+        # curve we will fit.
+
+        # Using keyword arguments to set initial guesses
+        >>> result = my_model.fit(data, tau=5, N=3, t=t)
+
+        # Or, for more control, pass a Parameters object.
+        >>> result = my_model.fit(data, params, t=t)
+
+        # Keyword arguments override Parameters.
+        >>> result = my_model.fit(data, params, tau=5, t=t)
+
+        Note
+        ----
+        All parameters, however passed, are copied on input, so the original
+        Parameter objects are unchanged.
+
+        """
+        if params is None:
+            params = self.make_params(verbose=verbose)
+        else:
+            params = deepcopy(params)
+
+        # If any kwargs match parameter names, override params.
+        param_kwargs = set(kwargs.keys()) & set(self.param_names)
+        for name in param_kwargs:
+            p = kwargs[name]
+            if isinstance(p, Parameter):
+                p.name = name  # allows N=Parameter(value=5) with implicit name
+                params[name] = deepcopy(p)
+            else:
+                params[name].set(value=p)
+            del kwargs[name]
+
+        # All remaining kwargs should correspond to independent variables.
+        for name in kwargs.keys():
+            if name not in self.independent_vars:
+                warnings.warn("The keyword argument %s does not" % name +
+                              "match any arguments of the model function." +
+                              "It will be ignored.", UserWarning)
+
+        # If any parameter is not initialized raise a more helpful error.
+        missing_param = any([p not in params.keys()
+                             for p in self.param_names])
+        blank_param = any([(p.value is None and p.expr is None)
+                           for p in params.values()])
+        if missing_param or blank_param:
+            msg = ('Assign each parameter an initial value by passing '
+                   'Parameters or keyword arguments to fit.\n')
+            missing = [p for p in self.param_names if p not in params.keys()]
+            blank = [name for name, p in params.items()
+                                    if (p.value is None and p.expr is None)]
+            msg += 'Missing parameters: %s\n' % str(missing)
+            msg += 'Non initialized parameters: %s' % str(blank)
+            raise ValueError(msg)
+
+        # Do not alter anything that implements the array interface (np.array, pd.Series)
+        # but convert other iterables (e.g., Python lists) to numpy arrays.
+        if not hasattr(data, '__array__'):
+            data = np.asfarray(data)
+        for var in self.independent_vars:
+            var_data = kwargs[var]
+            if (not hasattr(var_data, '__array__')) and (not np.isscalar(var_data)):
+                kwargs[var] = np.asfarray(var_data)
+
+        # Handle null/missing values.
+        mask = None
+        if self.missing not in (None, 'none'):
+            mask = self._handle_missing(data)  # This can raise.
+            if mask is not None:
+                data = data[mask]
+            if weights is not None:
+                weights = _align(weights, mask, data)
+
+        # If independent_vars and data are alignable (pandas), align them,
+        # and apply the mask from above if there is one.
+        for var in self.independent_vars:
+            if not np.isscalar(kwargs[var]):
+                kwargs[var] = _align(kwargs[var], mask, data)
+
+        if fit_kws is None:
+            fit_kws = {}
+
+        output = ModelResult(self, params, method=method, iter_cb=iter_cb,
+                             scale_covar=scale_covar, fcn_kws=kwargs,
+                             **fit_kws)
+        output.fit(data=data, weights=weights)
+        output.components = self.components
+        return output
+
+    def __add__(self, other):
+        return CompositeModel(self, other, operator.add)
+
+    def __sub__(self, other):
+        return CompositeModel(self, other, operator.sub)
+
+    def __mul__(self, other):
+        return CompositeModel(self, other, operator.mul)
+
+    def __div__(self, other):
+        return CompositeModel(self, other, operator.truediv)
+
+    def __truediv__(self, other):
+        return CompositeModel(self, other, operator.truediv)
+
+
+class CompositeModel(Model):
+    """Create a composite model -- a binary operator of two Models
+
+    Parameters
+    ----------
+    left_model:    left-hand side model-- must be a Model()
+    right_model:   right-hand side model -- must be a Model()
+    oper:          callable binary operator (typically, operator.add, operator.mul, etc)
+
+    independent_vars: list of strings or None (default)
+        arguments to func that are independent variables
+    param_names: list of strings or None (default)
+        names of arguments to func that are to be made into parameters
+    missing: None, 'none', 'drop', or 'raise'
+        'none' or None: Do not check for null or missing values (default)
+        'drop': Drop null or missing observations in data.
+            if pandas is installed, pandas.isnull is used, otherwise
+            numpy.isnan is used.
+        'raise': Raise a (more helpful) exception when data contains null
+            or missing values.
+    name: None or string
+        name for the model. When `None` (default) the name is the same as
+        the model function (`func`).
+
+    """
+    _names_collide = ("\nTwo models have parameters named '{clash}'. "
+                      "Use distinct names.")
+    _bad_arg   = "CompositeModel: argument {arg} is not a Model"
+    _bad_op    = "CompositeModel: operator {op} is not callable"
+    _known_ops = {operator.add: '+', operator.sub: '-',
+                  operator.mul: '*', operator.truediv: '/'}
+
+    def __init__(self, left, right, op, **kws):
+        if not isinstance(left, Model):
+            raise ValueError(self._bad_arg.format(arg=left))
+        if not isinstance(right, Model):
+            raise ValueError(self._bad_arg.format(arg=right))
+        if not callable(op):
+            raise ValueError(self._bad_op.format(op=op))
+
+        self.left  = left
+        self.right = right
+        self.op    = op
+
+        name_collisions = set(left.param_names) & set(right.param_names)
+        if len(name_collisions) > 0:
+            msg = ''
+            for collision in name_collisions:
+                msg += self._names_collide.format(clash=collision)
+            raise NameError(msg)
+
+        # we assume that all the sub-models have the same independent vars
+        if 'independent_vars' not in kws:
+            kws['independent_vars'] = self.left.independent_vars
+        if 'missing' not in kws:
+            kws['missing'] = self.left.missing
+
+        def _tmp(self, *args, **kws): pass
+        Model.__init__(self, _tmp, **kws)
+
+        for side in (left, right):
+            prefix = side.prefix
+            for basename, hint in side.param_hints.items():
+                self.param_hints["%s%s" % (prefix, basename)] = hint
+
+    def _parse_params(self):
+        self._func_haskeywords = (self.left._func_haskeywords or
+                                  self.right._func_haskeywords)
+        self._func_allargs = (self.left._func_allargs +
+                              self.right._func_allargs)
+        self.def_vals = deepcopy(self.right.def_vals)
+        self.def_vals.update(self.left.def_vals)
+        self.opts = deepcopy(self.right.opts)
+        self.opts.update(self.left.opts)
+
+    def _reprstring(self, long=False):
+        return "(%s %s %s)" % (self.left._reprstring(long=long),
+                               self._known_ops.get(self.op, self.op),
+                               self.right._reprstring(long=long))
+
+    def eval(self, params=None, **kwargs):
+        return self.op(self.left.eval(params=params, **kwargs),
+                       self.right.eval(params=params, **kwargs))
+
+    def eval_components(self, **kwargs):
+        """return ordered dict of name, results for each component"""
+        out = OrderedDict(self.left.eval_components(**kwargs))
+        out.update(self.right.eval_components(**kwargs))
+        return out
+
+    @property
+    def param_names(self):
+        return  self.left.param_names + self.right.param_names
+
+    @property
+    def components(self):
+        """return components for composite model"""
+        return self.left.components + self.right.components
+
+    def _make_all_args(self, params=None, **kwargs):
+        """generate **all** function args for all functions"""
+        out = self.right._make_all_args(params=params, **kwargs)
+        out.update(self.left._make_all_args(params=params, **kwargs))
+        return out
+
+
+class ModelResult(Minimizer):
+    """Result from Model fit
+
+    Attributes
+    -----------
+    model         instance of Model -- the model function
+    params        instance of Parameters -- the fit parameters
+    data          array of data values to compare to model
+    weights       array of weights used in fitting
+    init_params   copy of params, before being updated by fit()
+    init_values   array of parameter values, before being updated by fit()
+    init_fit      model evaluated with init_params.
+    best_fit      model evaluated with params after being updated by fit()
+
+    Methods:
+    --------
+    fit(data=None, params=None, weights=None, method=None, **kwargs)
+         fit (or re-fit) model with params to data (with weights)
+         using supplied method.  The keyword arguments are sent to
+         as keyword arguments to the model function.
+
+         all inputs are optional, defaulting to the value used in
+         the previous fit.  This allows easily changing data or
+         parameter settings, or both.
+
+    eval(**kwargs)
+         evaluate the current model, with the current parameter values,
+         with values in kwargs sent to the model function.
+
+    eval_components(**kwargs)
+         evaluate the current model, with the current parameter values,
+         with values in kwargs sent to the model function and returns
+         a ordered dict with the model names as the key and the component
+         results as the values.
+
+   fit_report(modelpars=None, show_correl=True, min_correl=0.1)
+         return a fit report.
+
+   plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
+            numpoints=None,  data_kws=None, fit_kws=None, init_kws=None,
+            ax_kws=None)
+        Plot the fit results using matplotlib.
+
+   plot_residuals(self, ax=None, datafmt='o', data_kws=None, fit_kws=None,
+                  ax_kws=None)
+        Plot the fit residuals using matplotlib.
+
+   plot(self, datafmt='o', fitfmt='-', initfmt='--', numpoints=None,
+        data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None,
+        ax_fit_kws=None, fig_kws=None)
+        Plot the fit results and residuals using matplotlib.
+    """
+    def __init__(self, model, params, data=None, weights=None,
+                 method='leastsq', fcn_args=None, fcn_kws=None,
+                 iter_cb=None, scale_covar=True, **fit_kws):
+        self.model = model
+        self.data = data
+        self.weights = weights
+        self.method = method
+        self.ci_out = None
+        self.init_params = deepcopy(params)
+        Minimizer.__init__(self, model._residual, params, fcn_args=fcn_args,
+                           fcn_kws=fcn_kws, iter_cb=iter_cb,
+                           scale_covar=scale_covar, **fit_kws)
+
+    def fit(self, data=None, params=None, weights=None, method=None, **kwargs):
+        """perform fit for a Model, given data and params"""
+        if data is not None:
+            self.data = data
+        if params is not None:
+            self.init_params = params
+        if weights is not None:
+            self.weights = weights
+        if method is not None:
+            self.method = method
+        self.ci_out = None
+        self.userargs = (self.data, self.weights)
+        self.userkws.update(kwargs)
+        self.init_fit    = self.model.eval(params=self.params, **self.userkws)
+
+        _ret = self.minimize(method=self.method)
+
+        for attr in dir(_ret):
+            if not attr.startswith('_') :
+                try:
+                    setattr(self, attr, getattr(_ret, attr))
+                except AttributeError:
+                    pass
+
+        self.init_values = self.model._make_all_args(self.init_params)
+        self.best_values = self.model._make_all_args(_ret.params)
+        self.best_fit    = self.model.eval(params=_ret.params, **self.userkws)
+
+    def eval(self, **kwargs):
+        self.userkws.update(kwargs)
+        return self.model.eval(params=self.params, **self.userkws)
+
+    def eval_components(self, **kwargs):
+        self.userkws.update(kwargs)
+        return self.model.eval_components(params=self.params, **self.userkws)
+
+    def conf_interval(self, **kwargs):
+        """return explicitly calculated confidence intervals"""
+        if self.ci_out is None:
+            self.ci_out = conf_interval(self, self, **kwargs)
+        return self.ci_out
+
+    def ci_report(self, with_offset=True, ndigits=5, **kwargs):
+        """return nicely formatted report about confidence intervals"""
+        return ci_report(self.conf_interval(**kwargs),
+                         with_offset=with_offset, ndigits=ndigits)
+
+    def fit_report(self,  **kwargs):
+        "return fit report"
+        return '[[Model]]\n    %s\n%s\n' % (self.model._reprstring(long=True),
+                                            fit_report(self, **kwargs))
+
+    @_ensureMatplotlib
+    def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
+                 numpoints=None,  data_kws=None, fit_kws=None, init_kws=None,
+                 ax_kws=None):
+        """Plot the fit results using matplotlib.
+
+        The method will plot results of the fit using matplotlib, including:
+        the data points, the initial fit curve and the fitted curve. If the fit
+        model included weights, errorbars will also be plotted.
+
+        Parameters
+        ----------
+        ax : matplotlib.axes.Axes, optional
+            The axes to plot on. The default in None, which means use the
+            current pyplot axis or create one if there is none.
+        datafmt : string, optional
+            matplotlib format string for data points
+        fitfmt : string, optional
+            matplotlib format string for fitted curve
+        initfmt : string, optional
+            matplotlib format string for initial conditions for the fit
+        yerr : ndarray, optional
+            array of uncertainties for data array
+        numpoints : int, optional
+            If provided, the final and initial fit curves are evaluated not
+            only at data points, but refined to contain `numpoints` points in
+            total.
+        data_kws : dictionary, optional
+            keyword arguments passed on to the plot function for data points
+        fit_kws : dictionary, optional
+            keyword arguments passed on to the plot function for fitted curve
+        init_kws : dictionary, optional
+            keyword arguments passed on to the plot function for the initial
+            conditions of the fit
+        ax_kws : dictionary, optional
+            keyword arguments for a new axis, if there is one being created
+
+        Returns
+        -------
+        matplotlib.axes.Axes
+
+        Notes
+        ----
+        For details about plot format strings and keyword arguments see
+        documentation of matplotlib.axes.Axes.plot.
+
+        If yerr is specified or if the fit model included weights, then
+        matplotlib.axes.Axes.errorbar is used to plot the data.  If yerr is
+        not specified and the fit includes weights, yerr set to 1/self.weights
+
+        If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
+
+        See Also
+        --------
+        ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
+        ModelResult.plot : Plot the fit results and residuals using matplotlib.
+        """
+        if data_kws is None:
+            data_kws = {}
+        if fit_kws is None:
+            fit_kws = {}
+        if init_kws is None:
+            init_kws = {}
+        if ax_kws is None:
+            ax_kws = {}
+
+        if len(self.model.independent_vars) == 1:
+            independent_var = self.model.independent_vars[0]
+        else:
+            print('Fit can only be plotted if the model function has one '
+                  'independent variable.')
+            return False
+
+        if not isinstance(ax, plt.Axes):
+            ax = plt.gca(**ax_kws)
+
+        x_array = self.userkws[independent_var]
+
+        # make a dense array for x-axis if data is not dense
+        if numpoints is not None and len(self.data) < numpoints:
+            x_array_dense = np.linspace(min(x_array), max(x_array), numpoints)
+        else:
+            x_array_dense = x_array
+
+        ax.plot(x_array_dense, self.model.eval(self.init_params,
+                **{independent_var: x_array_dense}), initfmt,
+                label='init', **init_kws)
+        ax.plot(x_array_dense, self.model.eval(self.params,
+                **{independent_var: x_array_dense}), fitfmt,
+                label='best-fit', **fit_kws)
+
+        if yerr is None and self.weights is not None:
+            yerr = 1.0/self.weights
+        if yerr is not None:
+            ax.errorbar(x_array, self.data, yerr=yerr,
+                        fmt=datafmt, label='data', **data_kws)
+        else:
+            ax.plot(x_array, self.data, datafmt, label='data', **data_kws)
+
+        ax.set_title(self.model.name)
+        ax.set_xlabel(independent_var)
+        ax.set_ylabel('y')
+        ax.legend()
+
+        return ax
+
+    @_ensureMatplotlib
+    def plot_residuals(self, ax=None, datafmt='o', yerr=None, data_kws=None,
+                       fit_kws=None, ax_kws=None):
+        """Plot the fit residuals using matplotlib.
+
+        The method will plot residuals of the fit using matplotlib, including:
+        the data points and the fitted curve (as horizontal line). If the fit
+        model included weights, errorbars will also be plotted.
+
+        Parameters
+        ----------
+        ax : matplotlib.axes.Axes, optional
+            The axes to plot on. The default in None, which means use the
+            current pyplot axis or create one if there is none.
+        datafmt : string, optional
+            matplotlib format string for data points
+        yerr : ndarray, optional
+            array of uncertainties for data array
+        data_kws : dictionary, optional
+            keyword arguments passed on to the plot function for data points
+        fit_kws : dictionary, optional
+            keyword arguments passed on to the plot function for fitted curve
+        ax_kws : dictionary, optional
+            keyword arguments for a new axis, if there is one being created
+
+        Returns
+        -------
+        matplotlib.axes.Axes
+
+        Notes
+        ----
+        For details about plot format strings and keyword arguments see
+        documentation of matplotlib.axes.Axes.plot.
+
+        If yerr is specified or if the fit model included weights, then
+        matplotlib.axes.Axes.errorbar is used to plot the data.  If yerr is
+        not specified and the fit includes weights, yerr set to 1/self.weights
+
+        If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
+
+        See Also
+        --------
+        ModelResult.plot_fit : Plot the fit results using matplotlib.
+        ModelResult.plot : Plot the fit results and residuals using matplotlib.
+        """
+        if data_kws is None:
+            data_kws = {}
+        if fit_kws is None:
+            fit_kws = {}
+        if fit_kws is None:
+            fit_kws = {}
+        if ax_kws is None:
+            ax_kws = {}
+
+        if len(self.model.independent_vars) == 1:
+            independent_var = self.model.independent_vars[0]
+        else:
+            print('Fit can only be plotted if the model function has one '
+                  'independent variable.')
+            return False
+
+        if not isinstance(ax, plt.Axes):
+            ax = plt.gca(**ax_kws)
+
+        x_array = self.userkws[independent_var]
+
+        ax.axhline(0, **fit_kws)
+
+        if yerr is None and self.weights is not None:
+            yerr = 1.0/self.weights
+        if yerr is not None:
+            ax.errorbar(x_array, self.eval() - self.data, yerr=yerr,
+                        fmt=datafmt, label='residuals', **data_kws)
+        else:
+            ax.plot(x_array, self.eval() - self.data, datafmt,
+                    label='residuals', **data_kws)
+
+        ax.set_title(self.model.name)
+        ax.set_ylabel('residuals')
+        ax.legend()
+
+        return ax
+
+    @_ensureMatplotlib
+    def plot(self, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
+             numpoints=None, fig=None, data_kws=None, fit_kws=None,
+             init_kws=None, ax_res_kws=None, ax_fit_kws=None,
+             fig_kws=None):
+        """Plot the fit results and residuals using matplotlib.
+
+        The method will produce a matplotlib figure with both results of the
+        fit and the residuals plotted. If the fit model included weights,
+        errorbars will also be plotted.
+
+        Parameters
+        ----------
+        datafmt : string, optional
+            matplotlib format string for data points
+        fitfmt : string, optional
+            matplotlib format string for fitted curve
+        initfmt : string, optional
+            matplotlib format string for initial conditions for the fit
+        yerr : ndarray, optional
+            array of uncertainties for data array
+        numpoints : int, optional
+            If provided, the final and initial fit curves are evaluated not
+            only at data points, but refined to contain `numpoints` points in
+            total.
+        fig : matplotlib.figure.Figure, optional
+            The figure to plot on. The default in None, which means use the
+            current pyplot figure or create one if there is none.
+        data_kws : dictionary, optional
+            keyword arguments passed on to the plot function for data points
+        fit_kws : dictionary, optional
+            keyword arguments passed on to the plot function for fitted curve
+        init_kws : dictionary, optional
+            keyword arguments passed on to the plot function for the initial
+            conditions of the fit
+        ax_res_kws : dictionary, optional
+            keyword arguments for the axes for the residuals plot
+        ax_fit_kws : dictionary, optional
+            keyword arguments for the axes for the fit plot
+        fig_kws : dictionary, optional
+            keyword arguments for a new figure, if there is one being created
+
+        Returns
+        -------
+        matplotlib.figure.Figure
+
+        Notes
+        ----
+        The method combines ModelResult.plot_fit and ModelResult.plot_residuals.
+
+        If yerr is specified or if the fit model included weights, then
+        matplotlib.axes.Axes.errorbar is used to plot the data.  If yerr is
+        not specified and the fit includes weights, yerr set to 1/self.weights
+
+        If `fig` is None then matplotlib.pyplot.figure(**fig_kws) is called.
+
+        See Also
+        --------
+        ModelResult.plot_fit : Plot the fit results using matplotlib.
+        ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
+        """
+        if data_kws is None:
+            data_kws = {}
+        if fit_kws is None:
+            fit_kws = {}
+        if init_kws is None:
+            init_kws = {}
+        if ax_res_kws is None:
+            ax_res_kws = {}
+        if ax_fit_kws is None:
+            ax_fit_kws = {}
+        if fig_kws is None:
+            fig_kws = {}
+
+        if len(self.model.independent_vars) != 1:
+            print('Fit can only be plotted if the model function has one '
+                  'independent variable.')
+            return False
+
+        if not isinstance(fig, plt.Figure):
+            fig = plt.figure(**fig_kws)
+
+        gs = plt.GridSpec(nrows=2, ncols=1, height_ratios=[1, 4])
+        ax_res = fig.add_subplot(gs[0], **ax_res_kws)
+        ax_fit = fig.add_subplot(gs[1], sharex=ax_res, **ax_fit_kws)
+
+        self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr,
+                      initfmt=initfmt, numpoints=numpoints, data_kws=data_kws,
+                      fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws)
+        self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,
+                            data_kws=data_kws, fit_kws=fit_kws,
+                            ax_kws=ax_res_kws)
+
+        return fig
diff --git a/lmfit/models.py b/lmfit/models.py
index 87139b0..78d024d 100644
--- a/lmfit/models.py
+++ b/lmfit/models.py
@@ -1,454 +1,484 @@
-import numpy as np
-from .model import Model
-
-from .lineshapes import (gaussian, lorentzian, voigt, pvoigt, moffat, pearson7,
-                         step, rectangle, breit_wigner, logistic,
-                         students_t, lognormal, damped_oscillator,
-                         expgaussian, skewed_gaussian, donaich,
-                         skewed_voigt, exponential, powerlaw, linear,
-                         parabolic)
-
-from . import lineshapes
-
-from .asteval import Interpreter
-from .astutils import get_ast_names
-
-class DimensionalError(Exception):
-    pass
-
-def _validate_1d(independent_vars):
-    if len(independent_vars) != 1:
-        raise DimensionalError(
-            "This model requires exactly one independent variable.")
-
-def index_of(arr, val):
-    """return index of array nearest to a value
-    """
-    if val < min(arr):
-        return 0
-    return np.abs(arr-val).argmin()
-
-def fwhm_expr(model):
-    "return constraint expression for fwhm"
-    return "%.7f*%ssigma" % (model.fwhm_factor, model.prefix)
-
-def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
-    "estimate amp, cen, sigma for a peak, create params"
-    if x is None:
-        return 1.0, 0.0, 1.0
-    maxy, miny = max(y), min(y)
-    maxx, minx = max(x), min(x)
-    imaxy = index_of(y, maxy)
-    cen = x[imaxy]
-    amp = (maxy - miny)*2.0
-    sig = (maxx-minx)/6.0
-
-    halfmax_vals = np.where(y > (maxy+miny)/2.0)[0]
-    if negative:
-        imaxy = index_of(y, miny)
-        amp = -(maxy - miny)*2.0
-        halfmax_vals = np.where(y < (maxy+miny)/2.0)[0]
-    if len(halfmax_vals) > 2:
-        sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0
-        cen = x[halfmax_vals].mean()
-    amp = amp*sig*ampscale
-    sig = sig*sigscale
-
-    pars = model.make_params(amplitude=amp, center=cen, sigma=sig)
-    pars['%ssigma' % model.prefix].set(min=0.0)
-    return pars
-
-def update_param_vals(pars, prefix, **kwargs):
-    """convenience function to update parameter values
-    with keyword arguments"""
-    for key, val in kwargs.items():
-        pname = "%s%s" % (prefix, key)
-        if pname in pars:
-            pars[pname].value = val
-    return pars
-
-COMMON_DOC = """
-
-Parameters
-----------
-independent_vars: list of strings to be set as variable names
-missing: None, 'drop', or 'raise'
-    None: Do not check for null or missing values.
-    'drop': Drop null or missing observations in data.
-        Use pandas.isnull if pandas is available; otherwise,
-        silently fall back to numpy.isnan.
-    'raise': Raise a (more helpful) exception when data contains null
-        or missing values.
-prefix: string to prepend to paramter names, needed to add two Models that
-    have parameter names in common. None by default.
-"""
-
-class ConstantModel(Model):
-    __doc__ = "x -> c" + COMMON_DOC
-    def __init__(self, *args, **kwargs):
-        def constant(x, c):
-            return c
-        super(ConstantModel, self).__init__(constant, *args, **kwargs)
-
-    def guess(self, data, **kwargs):
-        pars = self.make_params()
-        pars['%sc' % self.prefix].set(value=data.mean())
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class LinearModel(Model):
-    __doc__ = linear.__doc__ + COMMON_DOC if linear.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(LinearModel, self).__init__(linear, *args, **kwargs)
-
-    def guess(self, data, x=None, **kwargs):
-        sval, oval = 0., 0.
-        if x is not None:
-            sval, oval = np.polyfit(x, data, 1)
-        pars = self.make_params(intercept=oval, slope=sval)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class QuadraticModel(Model):
-    __doc__ = parabolic.__doc__ + COMMON_DOC if parabolic.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(QuadraticModel, self).__init__(parabolic, *args, **kwargs)
-
-    def guess(self, data, x=None, **kwargs):
-        a, b, c = 0., 0., 0.
-        if x is not None:
-            a, b, c = np.polyfit(x, data, 2)
-        pars = self.make_params(a=a, b=b, c=c)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-ParabolicModel = QuadraticModel
-
-class PolynomialModel(Model):
-    __doc__ = "x -> c0 + c1 * x + c2 * x**2 + ... c7 * x**7" + COMMON_DOC
-    MAX_DEGREE=7
-    DEGREE_ERR = "degree must be an integer less than %d."
-    def __init__(self, degree, *args, **kwargs):
-        if not isinstance(degree, int)  or degree > self.MAX_DEGREE:
-            raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE)
-
-        self.poly_degree = degree
-        pnames = ['c%i' % (i) for i in range(degree + 1)]
-        kwargs['param_names'] = pnames
-
-        def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
-            return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x)
-
-        super(PolynomialModel, self).__init__(polynomial, *args, **kwargs)
-
-    def guess(self, data, x=None, **kwargs):
-        pars = self.make_params()
-        if x is not None:
-            out = np.polyfit(x, data, self.poly_degree)
-            for i, coef in enumerate(out[::-1]):
-                pars['%sc%i'% (self.prefix, i)].set(value=coef)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class GaussianModel(Model):
-    __doc__ = gaussian.__doc__ + COMMON_DOC if gaussian.__doc__ else ""
-    fwhm_factor = 2.354820
-    def __init__(self, *args, **kwargs):
-        super(GaussianModel, self).__init__(gaussian, *args, **kwargs)
-        self.set_param_hint('sigma', min=0)
-        self.set_param_hint('fwhm', expr=fwhm_expr(self))
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class LorentzianModel(Model):
-    __doc__ = lorentzian.__doc__ + COMMON_DOC if lorentzian.__doc__ else ""
-    fwhm_factor = 2.0
-    def __init__(self, *args, **kwargs):
-        super(LorentzianModel, self).__init__(lorentzian, *args, **kwargs)
-        self.set_param_hint('sigma', min=0)
-        self.set_param_hint('fwhm', expr=fwhm_expr(self))
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class VoigtModel(Model):
-    __doc__ = voigt.__doc__ + COMMON_DOC if voigt.__doc__ else ""
-    fwhm_factor = 3.60131
-    def __init__(self, *args, **kwargs):
-        super(VoigtModel, self).__init__(voigt, *args, **kwargs)
-        self.set_param_hint('sigma', min=0)
-        self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
-        self.set_param_hint('fwhm',  expr=fwhm_expr(self))
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative,
-                               ampscale=1.5, sigscale=0.65)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class PseudoVoigtModel(Model):
-    __doc__ = pvoigt.__doc__ + COMMON_DOC if pvoigt.__doc__ else ""
-    fwhm_factor = 2.0
-    def __init__(self, *args, **kwargs):
-        super(PseudoVoigtModel, self).__init__(pvoigt, *args, **kwargs)
-        self.set_param_hint('fraction', value=0.5)
-        self.set_param_hint('fwhm',  expr=fwhm_expr(self))
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
-        pars['%sfraction' % self.prefix].set(value=0.5)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class MoffatModel(Model):
-    __doc__ = moffat.__doc__ + COMMON_DOC if moffat.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(MoffatModel, self).__init__(moffat, *args, **kwargs)
-        self.set_param_hint('fwhm', expr="2*%ssigma*sqrt(2**(1.0/%sbeta)-1)" % (self.prefix, self.prefix))
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative, ampscale=0.5, sigscale=1.)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class Pearson7Model(Model):
-    __doc__ = pearson7.__doc__ + COMMON_DOC if pearson7.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(Pearson7Model, self).__init__(pearson7, *args, **kwargs)
-        self.set_param_hint('expon',  value=1.5)
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative)
-        pars['%sexpon' % self.prefix].set(value=1.5)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class StudentsTModel(Model):
-    __doc__ = students_t.__doc__ + COMMON_DOC if students_t.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(StudentsTModel, self).__init__(students_t, *args, **kwargs)
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class BreitWignerModel(Model):
-    __doc__ = breit_wigner.__doc__ + COMMON_DOC if breit_wigner.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(BreitWignerModel, self).__init__(breit_wigner, *args, **kwargs)
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative)
-        pars['%sq' % self.prefix].set(value=1.0)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class LognormalModel(Model):
-    __doc__ = lognormal.__doc__ + COMMON_DOC if lognormal.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(LognormalModel, self).__init__(lognormal, *args, **kwargs)
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25)
-        pars['%ssigma' % self.prefix].set(min=0.0)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class DampedOscillatorModel(Model):
-    __doc__ = damped_oscillator.__doc__ + COMMON_DOC if damped_oscillator.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(DampedOscillatorModel, self).__init__(damped_oscillator, *args, **kwargs)
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars =guess_from_peak(self, data, x, negative,
-                              ampscale=0.1, sigscale=0.1)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-class ExponentialGaussianModel(Model):
-    __doc__ = expgaussian.__doc__ + COMMON_DOC if expgaussian.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(ExponentialGaussianModel, self).__init__(expgaussian, *args, **kwargs)
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-class SkewedGaussianModel(Model):
-    __doc__ = skewed_gaussian.__doc__ + COMMON_DOC if skewed_gaussian.__doc__ else ""
-    fwhm_factor = 2.354820
-    def __init__(self, *args, **kwargs):
-        super(SkewedGaussianModel, self).__init__(skewed_gaussian, *args, **kwargs)
-        self.set_param_hint('sigma', min=0)
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-class DonaichModel(Model):
-    __doc__ = donaich.__doc__ + COMMON_DOC if donaich.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(DonaichModel, self).__init__(donaich, *args, **kwargs)
-
-    def guess(self, data, x=None, negative=False, **kwargs):
-        pars = guess_from_peak(self, data, x, negative, ampscale=0.5)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class PowerLawModel(Model):
-    __doc__ = powerlaw.__doc__ + COMMON_DOC if powerlaw.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(PowerLawModel, self).__init__(powerlaw, *args, **kwargs)
-
-    def guess(self, data, x=None, **kwargs):
-        try:
-            expon, amp = np.polyfit(np.log(x+1.e-14), np.log(data+1.e-14), 1)
-        except:
-            expon, amp = 1, np.log(abs(max(data)+1.e-9))
-
-        pars = self.make_params(amplitude=np.exp(amp), exponent=expon)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class ExponentialModel(Model):
-    __doc__ = exponential.__doc__ + COMMON_DOC if exponential.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(ExponentialModel, self).__init__(exponential, *args, **kwargs)
-
-    def guess(self, data, x=None, **kwargs):
-        try:
-            sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
-        except:
-            sval, oval = 1., np.log(abs(max(data)+1.e-9))
-        pars = self.make_params(amplitude=np.exp(oval), decay=-1.0/sval)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class StepModel(Model):
-    __doc__ = step.__doc__ + COMMON_DOC if step.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(StepModel, self).__init__(step, *args, **kwargs)
-
-    def guess(self, data, x=None, **kwargs):
-        if x is None:
-            return
-        ymin, ymax = min(data), max(data)
-        xmin, xmax = min(x), max(x)
-        pars = self.make_params(amplitude=(ymax-ymin),
-                                center=(xmax+xmin)/2.0)
-        pars['%ssigma' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class RectangleModel(Model):
-    __doc__ = rectangle.__doc__ + COMMON_DOC if rectangle.__doc__ else ""
-    def __init__(self, *args, **kwargs):
-        super(RectangleModel, self).__init__(rectangle, *args, **kwargs)
-        self.set_param_hint('midpoint',
-                            expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
-                                                                self.prefix))
-    def guess(self, data, x=None, **kwargs):
-        if x is None:
-            return
-        ymin, ymax = min(data), max(data)
-        xmin, xmax = min(x), max(x)
-        pars = self.make_params(amplitude=(ymax-ymin),
-                                center1=(xmax+xmin)/4.0,
-                                center2=3*(xmax+xmin)/4.0)
-        pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
-        pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
-        return update_param_vals(pars, self.prefix, **kwargs)
-
-
-class ExpressionModel(Model):
-    """Model from User-supplied expression
-
-Parameters
-----------
-expr:    string of mathematical expression for model.
-independent_vars: list of strings to be set as variable names
-missing: None, 'drop', or 'raise'
-    None: Do not check for null or missing values.
-    'drop': Drop null or missing observations in data.
-        Use pandas.isnull if pandas is available; otherwise,
-        silently fall back to numpy.isnan.
-    'raise': Raise a (more helpful) exception when data contains null
-        or missing values.
-prefix: NOT supported for ExpressionModel
-"""
-
-    idvar_missing  = "No independent variable found in\n %s"
-    idvar_notfound = "Cannot find independent variables '%s' in\n %s"
-    no_prefix      = "ExpressionModel does not support `prefix` argument"
-    def __init__(self, expr, independent_vars=None, init_script=None,
-                 *args, **kwargs):
-
-        # create ast evaluator, load custom functions
-        self.asteval = Interpreter()
-        for name in lineshapes.functions:
-            self.asteval.symtable[name] = getattr(lineshapes, name, None)
-        if init_script is not None:
-            self.asteval.eval(init_script)
-
-        # save expr as text, parse to ast, save for later use
-        self.expr = expr.strip()
-        self.astcode = self.asteval.parse(self.expr)
-
-        # find all symbol names found in expression
-        sym_names = get_ast_names(self.astcode)
-
-        if independent_vars is None and 'x' in sym_names:
-            independent_vars = ['x']
-        if independent_vars is None:
-            raise ValueError(self.idvar_missing % (self.expr))
-
-        # determine which named symbols are parameter names,
-        # try to find all independent variables
-        idvar_found = [False]*len(independent_vars)
-        param_names = []
-        for name in sym_names:
-            if name in independent_vars:
-                idvar_found[independent_vars.index(name)] = True
-            elif name not in self.asteval.symtable:
-                param_names.append(name)
-
-        # make sure we have all independent parameters
-        if not all(idvar_found):
-            lost = []
-            for ix, found in enumerate(idvar_found):
-                if not found:
-                    lost.append(independent_vars[ix])
-            lost = ', '.join(lost)
-            raise ValueError(self.idvar_notfound % (lost, self.expr))
-
-        kwargs['independent_vars'] = independent_vars
-        if 'prefix' in kwargs:
-            raise Warning(self.no_prefix)
-
-        def _eval(**kwargs):
-            for name, val in kwargs.items():
-                self.asteval.symtable[name] = val
-            return self.asteval.run(self.astcode)
-
-        super(ExpressionModel, self).__init__(_eval, *args, **kwargs)
-
-        # set param names here, and other things normally
-        # set in _parse_params(), which will be short-circuited.
-        self.independent_vars = independent_vars
-        self._func_allargs = independent_vars + param_names
-        self._param_names = set(param_names)
-        self._func_haskeywords = True
-        self.def_vals = {}
-
-    def __repr__(self):
-        return  "<lmfit.ExpressionModel('%s')>" % (self.expr)
-
-    def _parse_params(self):
-        """ExpressionModel._parse_params is over-written (as `pass`)
-        to prevent normal parsing of function for parameter names
-        """
-        pass
+import numpy as np
+from .model import Model
+
+from .lineshapes import (gaussian, lorentzian, voigt, pvoigt, moffat, pearson7,
+                         step, rectangle, breit_wigner, logistic,
+                         students_t, lognormal, damped_oscillator,
+                         expgaussian, skewed_gaussian, donaich,
+                         skewed_voigt, exponential, powerlaw, linear,
+                         parabolic)
+
+from . import lineshapes
+
+from .asteval import Interpreter
+from .astutils import get_ast_names
+
+class DimensionalError(Exception):
+    pass
+
+def _validate_1d(independent_vars):
+    if len(independent_vars) != 1:
+        raise DimensionalError(
+            "This model requires exactly one independent variable.")
+
+def index_of(arr, val):
+    """return index of array nearest to a value
+    """
+    if val < min(arr):
+        return 0
+    return np.abs(arr-val).argmin()
+
+def fwhm_expr(model):
+    "return constraint expression for fwhm"
+    fmt = "{factor:.7f}*{prefix:s}sigma"
+    return fmt.format(factor=model.fwhm_factor, prefix=model.prefix)
+
+def height_expr(model):
+    "return constraint expression for maximum peak height"
+    fmt = "{factor:.7f}*{prefix:s}amplitude/{prefix:s}sigma"
+    return fmt.format(factor=model.height_factor, prefix=model.prefix)
+
+def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
+    "estimate amp, cen, sigma for a peak, create params"
+    if x is None:
+        return 1.0, 0.0, 1.0
+    maxy, miny = max(y), min(y)
+    maxx, minx = max(x), min(x)
+    imaxy = index_of(y, maxy)
+    cen = x[imaxy]
+    amp = (maxy - miny)*2.0
+    sig = (maxx-minx)/6.0
+
+    halfmax_vals = np.where(y > (maxy+miny)/2.0)[0]
+    if negative:
+        imaxy = index_of(y, miny)
+        amp = -(maxy - miny)*2.0
+        halfmax_vals = np.where(y < (maxy+miny)/2.0)[0]
+    if len(halfmax_vals) > 2:
+        sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0
+        cen = x[halfmax_vals].mean()
+    amp = amp*sig*ampscale
+    sig = sig*sigscale
+
+    pars = model.make_params(amplitude=amp, center=cen, sigma=sig)
+    pars['%ssigma' % model.prefix].set(min=0.0)
+    return pars
+
+def update_param_vals(pars, prefix, **kwargs):
+    """convenience function to update parameter values
+    with keyword arguments"""
+    for key, val in kwargs.items():
+        pname = "%s%s" % (prefix, key)
+        if pname in pars:
+            pars[pname].value = val
+    return pars
+
+COMMON_DOC = """
+
+Parameters
+----------
+independent_vars: list of strings to be set as variable names
+missing: None, 'drop', or 'raise'
+    None: Do not check for null or missing values.
+    'drop': Drop null or missing observations in data.
+        Use pandas.isnull if pandas is available; otherwise,
+        silently fall back to numpy.isnan.
+    'raise': Raise a (more helpful) exception when data contains null
+        or missing values.
+prefix: string to prepend to paramter names, needed to add two Models that
+    have parameter names in common. None by default.
+"""
+
+class ConstantModel(Model):
+    __doc__ = "x -> c" + COMMON_DOC
+    def __init__(self, *args, **kwargs):
+        def constant(x, c):
+            return c
+        super(ConstantModel, self).__init__(constant, *args, **kwargs)
+
+    def guess(self, data, **kwargs):
+        pars = self.make_params()
+        pars['%sc' % self.prefix].set(value=data.mean())
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+class ComplexConstantModel(Model):
+    __doc__ = "x -> re+1j*im" + COMMON_DOC
+    def __init__(self, *args, **kwargs):
+        def constant(x, re, im):
+            return re + 1j*im
+        super(ComplexConstantModel, self).__init__(constant, *args, **kwargs)
+
+    def guess(self, data, **kwargs):
+        pars = self.make_params()
+        pars['%sre' % self.prefix].set(value=data.real.mean())
+        pars['%sim' % self.prefix].set(value=data.imag.mean())
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+class LinearModel(Model):
+    __doc__ = linear.__doc__ + COMMON_DOC if linear.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(LinearModel, self).__init__(linear, *args, **kwargs)
+
+    def guess(self, data, x=None, **kwargs):
+        sval, oval = 0., 0.
+        if x is not None:
+            sval, oval = np.polyfit(x, data, 1)
+        pars = self.make_params(intercept=oval, slope=sval)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class QuadraticModel(Model):
+    __doc__ = parabolic.__doc__ + COMMON_DOC if parabolic.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(QuadraticModel, self).__init__(parabolic, *args, **kwargs)
+
+    def guess(self, data, x=None, **kwargs):
+        a, b, c = 0., 0., 0.
+        if x is not None:
+            a, b, c = np.polyfit(x, data, 2)
+        pars = self.make_params(a=a, b=b, c=c)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+ParabolicModel = QuadraticModel
+
+class PolynomialModel(Model):
+    __doc__ = "x -> c0 + c1 * x + c2 * x**2 + ... c7 * x**7" + COMMON_DOC
+    MAX_DEGREE=7
+    DEGREE_ERR = "degree must be an integer less than %d."
+    def __init__(self, degree, *args, **kwargs):
+        if not isinstance(degree, int)  or degree > self.MAX_DEGREE:
+            raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE)
+
+        self.poly_degree = degree
+        pnames = ['c%i' % (i) for i in range(degree + 1)]
+        kwargs['param_names'] = pnames
+
+        def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
+            return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x)
+
+        super(PolynomialModel, self).__init__(polynomial, *args, **kwargs)
+
+    def guess(self, data, x=None, **kwargs):
+        pars = self.make_params()
+        if x is not None:
+            out = np.polyfit(x, data, self.poly_degree)
+            for i, coef in enumerate(out[::-1]):
+                pars['%sc%i'% (self.prefix, i)].set(value=coef)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class GaussianModel(Model):
+    __doc__ = gaussian.__doc__ + COMMON_DOC if gaussian.__doc__ else ""
+    fwhm_factor = 2.354820
+    height_factor = 1./np.sqrt(2*np.pi)
+    def __init__(self, *args, **kwargs):
+        super(GaussianModel, self).__init__(gaussian, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+        self.set_param_hint('fwhm', expr=fwhm_expr(self))
+        self.set_param_hint('height', expr=height_expr(self))
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class LorentzianModel(Model):
+    __doc__ = lorentzian.__doc__ + COMMON_DOC if lorentzian.__doc__ else ""
+    fwhm_factor = 2.0
+    height_factor = 1./np.pi
+    def __init__(self, *args, **kwargs):
+        super(LorentzianModel, self).__init__(lorentzian, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+        self.set_param_hint('fwhm', expr=fwhm_expr(self))
+        self.set_param_hint('height', expr=height_expr(self))
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class VoigtModel(Model):
+    __doc__ = voigt.__doc__ + COMMON_DOC if voigt.__doc__ else ""
+    fwhm_factor = 3.60131
+    height_factor = 1./np.sqrt(2*np.pi)
+    def __init__(self, *args, **kwargs):
+        super(VoigtModel, self).__init__(voigt, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+        self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
+        self.set_param_hint('fwhm',  expr=fwhm_expr(self))
+        self.set_param_hint('height', expr=height_expr(self))
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative,
+                               ampscale=1.5, sigscale=0.65)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class PseudoVoigtModel(Model):
+    __doc__ = pvoigt.__doc__ + COMMON_DOC if pvoigt.__doc__ else ""
+    fwhm_factor = 2.0
+    def __init__(self, *args, **kwargs):
+        super(PseudoVoigtModel, self).__init__(pvoigt, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+        self.set_param_hint('fraction', value=0.5)
+        self.set_param_hint('fwhm',  expr=fwhm_expr(self))
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
+        pars['%sfraction' % self.prefix].set(value=0.5)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class MoffatModel(Model):
+    __doc__ = moffat.__doc__ + COMMON_DOC if moffat.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(MoffatModel, self).__init__(moffat, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+        self.set_param_hint('beta')
+        self.set_param_hint('fwhm', expr="2*%ssigma*sqrt(2**(1.0/%sbeta)-1)" % (self.prefix, self.prefix))
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative, ampscale=0.5, sigscale=1.)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class Pearson7Model(Model):
+    __doc__ = pearson7.__doc__ + COMMON_DOC if pearson7.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(Pearson7Model, self).__init__(pearson7, *args, **kwargs)
+        self.set_param_hint('expon',  value=1.5)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        pars['%sexpon' % self.prefix].set(value=1.5)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class StudentsTModel(Model):
+    __doc__ = students_t.__doc__ + COMMON_DOC if students_t.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(StudentsTModel, self).__init__(students_t, *args, **kwargs)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class BreitWignerModel(Model):
+    __doc__ = breit_wigner.__doc__ + COMMON_DOC if breit_wigner.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(BreitWignerModel, self).__init__(breit_wigner, *args, **kwargs)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        pars['%sq' % self.prefix].set(value=1.0)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class LognormalModel(Model):
+    __doc__ = lognormal.__doc__ + COMMON_DOC if lognormal.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(LognormalModel, self).__init__(lognormal, *args, **kwargs)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25)
+        pars['%ssigma' % self.prefix].set(min=0.0)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class DampedOscillatorModel(Model):
+    __doc__ = damped_oscillator.__doc__ + COMMON_DOC if damped_oscillator.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(DampedOscillatorModel, self).__init__(damped_oscillator, *args, **kwargs)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars =guess_from_peak(self, data, x, negative,
+                              ampscale=0.1, sigscale=0.1)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+class ExponentialGaussianModel(Model):
+    __doc__ = expgaussian.__doc__ + COMMON_DOC if expgaussian.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(ExponentialGaussianModel, self).__init__(expgaussian, *args, **kwargs)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+class SkewedGaussianModel(Model):
+    __doc__ = skewed_gaussian.__doc__ + COMMON_DOC if skewed_gaussian.__doc__ else ""
+    fwhm_factor = 2.354820
+    def __init__(self, *args, **kwargs):
+        super(SkewedGaussianModel, self).__init__(skewed_gaussian, *args, **kwargs)
+        self.set_param_hint('sigma', min=0)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+class DonaichModel(Model):
+    __doc__ = donaich.__doc__ + COMMON_DOC if donaich.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(DonaichModel, self).__init__(donaich, *args, **kwargs)
+
+    def guess(self, data, x=None, negative=False, **kwargs):
+        pars = guess_from_peak(self, data, x, negative, ampscale=0.5)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class PowerLawModel(Model):
+    __doc__ = powerlaw.__doc__ + COMMON_DOC if powerlaw.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(PowerLawModel, self).__init__(powerlaw, *args, **kwargs)
+
+    def guess(self, data, x=None, **kwargs):
+        try:
+            expon, amp = np.polyfit(np.log(x+1.e-14), np.log(data+1.e-14), 1)
+        except:
+            expon, amp = 1, np.log(abs(max(data)+1.e-9))
+
+        pars = self.make_params(amplitude=np.exp(amp), exponent=expon)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class ExponentialModel(Model):
+    __doc__ = exponential.__doc__ + COMMON_DOC if exponential.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(ExponentialModel, self).__init__(exponential, *args, **kwargs)
+
+    def guess(self, data, x=None, **kwargs):
+        try:
+            sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
+        except:
+            sval, oval = 1., np.log(abs(max(data)+1.e-9))
+        pars = self.make_params(amplitude=np.exp(oval), decay=-1.0/sval)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class StepModel(Model):
+    __doc__ = step.__doc__ + COMMON_DOC if step.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(StepModel, self).__init__(step, *args, **kwargs)
+
+    def guess(self, data, x=None, **kwargs):
+        if x is None:
+            return
+        ymin, ymax = min(data), max(data)
+        xmin, xmax = min(x), max(x)
+        pars = self.make_params(amplitude=(ymax-ymin),
+                                center=(xmax+xmin)/2.0)
+        pars['%ssigma' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class RectangleModel(Model):
+    __doc__ = rectangle.__doc__ + COMMON_DOC if rectangle.__doc__ else ""
+    def __init__(self, *args, **kwargs):
+        super(RectangleModel, self).__init__(rectangle, *args, **kwargs)
+
+        self.set_param_hint('center1')
+        self.set_param_hint('center2')
+        self.set_param_hint('midpoint',
+                            expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
+                                                                self.prefix))
+    def guess(self, data, x=None, **kwargs):
+        if x is None:
+            return
+        ymin, ymax = min(data), max(data)
+        xmin, xmax = min(x), max(x)
+        pars = self.make_params(amplitude=(ymax-ymin),
+                                center1=(xmax+xmin)/4.0,
+                                center2=3*(xmax+xmin)/4.0)
+        pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
+        pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
+        return update_param_vals(pars, self.prefix, **kwargs)
+
+
+class ExpressionModel(Model):
+    """Model from User-supplied expression
+
+Parameters
+----------
+expr:    string of mathematical expression for model.
+independent_vars: list of strings to be set as variable names
+missing: None, 'drop', or 'raise'
+    None: Do not check for null or missing values.
+    'drop': Drop null or missing observations in data.
+        Use pandas.isnull if pandas is available; otherwise,
+        silently fall back to numpy.isnan.
+    'raise': Raise a (more helpful) exception when data contains null
+        or missing values.
+prefix: NOT supported for ExpressionModel
+"""
+
+    idvar_missing  = "No independent variable found in\n %s"
+    idvar_notfound = "Cannot find independent variables '%s' in\n %s"
+    no_prefix      = "ExpressionModel does not support `prefix` argument"
+    def __init__(self, expr, independent_vars=None, init_script=None,
+                 *args, **kwargs):
+
+        # create ast evaluator, load custom functions
+        self.asteval = Interpreter()
+        for name in lineshapes.functions:
+            self.asteval.symtable[name] = getattr(lineshapes, name, None)
+        if init_script is not None:
+            self.asteval.eval(init_script)
+
+        # save expr as text, parse to ast, save for later use
+        self.expr = expr.strip()
+        self.astcode = self.asteval.parse(self.expr)
+
+        # find all symbol names found in expression
+        sym_names = get_ast_names(self.astcode)
+
+        if independent_vars is None and 'x' in sym_names:
+            independent_vars = ['x']
+        if independent_vars is None:
+            raise ValueError(self.idvar_missing % (self.expr))
+
+        # determine which named symbols are parameter names,
+        # try to find all independent variables
+        idvar_found = [False]*len(independent_vars)
+        param_names = []
+        for name in sym_names:
+            if name in independent_vars:
+                idvar_found[independent_vars.index(name)] = True
+            elif name not in self.asteval.symtable:
+                param_names.append(name)
+
+        # make sure we have all independent parameters
+        if not all(idvar_found):
+            lost = []
+            for ix, found in enumerate(idvar_found):
+                if not found:
+                    lost.append(independent_vars[ix])
+            lost = ', '.join(lost)
+            raise ValueError(self.idvar_notfound % (lost, self.expr))
+
+        kwargs['independent_vars'] = independent_vars
+        if 'prefix' in kwargs:
+            raise Warning(self.no_prefix)
+
+        def _eval(**kwargs):
+            for name, val in kwargs.items():
+                self.asteval.symtable[name] = val
+            return self.asteval.run(self.astcode)
+
+        super(ExpressionModel, self).__init__(_eval, *args, **kwargs)
+
+        # set param names here, and other things normally
+        # set in _parse_params(), which will be short-circuited.
+        self.independent_vars = independent_vars
+        self._func_allargs = independent_vars + param_names
+        self._param_names = set(param_names)
+        self._func_haskeywords = True
+        self.def_vals = {}
+
+    def __repr__(self):
+        return  "<lmfit.ExpressionModel('%s')>" % (self.expr)
+
+    def _parse_params(self):
+        """ExpressionModel._parse_params is over-written (as `pass`)
+        to prevent normal parsing of function for parameter names
+        """
+        pass
diff --git a/lmfit/ordereddict.py b/lmfit/ordereddict.py
index 524a5c9..2d1d813 100644
--- a/lmfit/ordereddict.py
+++ b/lmfit/ordereddict.py
@@ -1,128 +1,128 @@
-# Copyright (c) 2009 Raymond Hettinger
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction,
-# including without limitation the rights to use, copy, modify, merge,
-# publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
-#
-#     The above copyright notice and this permission notice shall be
-#     included in all copies or substantial portions of the Software.
-#
-#     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-#     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-#     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-#     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-#     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-#     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-#     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-#     OTHER DEALINGS IN THE SOFTWARE.
-
-from UserDict import DictMixin
-
-
-class OrderedDict(dict, DictMixin):
-
-    def __init__(self, *args, **kwds):
-        if len(args) > 1:
-            raise TypeError('expected at most 1 arguments, got %d' % len(args))
-        try:
-            self.__end
-        except AttributeError:
-            self.clear()
-        self.update(*args, **kwds)
-
-    def clear(self):
-        self.__end = end = []
-        end += [None, end, end]         # sentinel node for doubly linked list
-        self.__map = {}                 # key --> [key, prev, next]
-        dict.clear(self)
-
-    def __setitem__(self, key, value):
-        if key not in self:
-            end = self.__end
-            curr = end[1]
-            curr[2] = end[1] = self.__map[key] = [key, curr, end]
-        dict.__setitem__(self, key, value)
-
-    def __delitem__(self, key):
-        dict.__delitem__(self, key)
-        key, prev, next = self.__map.pop(key)
-        prev[2] = next
-        next[1] = prev
-
-    def __iter__(self):
-        end = self.__end
-        curr = end[2]
-        while curr is not end:
-            yield curr[0]
-            curr = curr[2]
-
-    def __reversed__(self):
-        end = self.__end
-        curr = end[1]
-        while curr is not end:
-            yield curr[0]
-            curr = curr[1]
-
-    def popitem(self, last=True):
-        if not self:
-            raise KeyError('dictionary is empty')
-        if last:
-            key = reversed(self).next()
-        else:
-            key = iter(self).next()
-        value = self.pop(key)
-        return key, value
-
-    def __reduce__(self):
-        items = [[k, self[k]] for k in self]
-        tmp = self.__map, self.__end
-        del self.__map, self.__end
-        inst_dict = vars(self).copy()
-        self.__map, self.__end = tmp
-        if inst_dict:
-            return (self.__class__, (items,), inst_dict)
-        return self.__class__, (items,)
-
-    def keys(self):
-        return list(self)
-
-    setdefault = DictMixin.setdefault
-    update = DictMixin.update
-    pop = DictMixin.pop
-    values = DictMixin.values
-    items = DictMixin.items
-    iterkeys = DictMixin.iterkeys
-    itervalues = DictMixin.itervalues
-    iteritems = DictMixin.iteritems
-
-    def __repr__(self):
-        if not self:
-            return '%s()' % (self.__class__.__name__,)
-        return '%s(%r)' % (self.__class__.__name__, self.items())
-
-    def copy(self):
-        return self.__class__(self)
-
-    @classmethod
-    def fromkeys(cls, iterable, value=None):
-        d = cls()
-        for key in iterable:
-            d[key] = value
-        return d
-
-    def __eq__(self, other):
-        if isinstance(other, OrderedDict):
-            if len(self) != len(other):
-                return False
-            for p, q in zip(self.items(), other.items()):
-                if p != q:
-                    return False
-            return True
-        return dict.__eq__(self, other)
-
-    def __ne__(self, other):
-        return not self == other
+# Copyright (c) 2009 Raymond Hettinger
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+#     The above copyright notice and this permission notice shall be
+#     included in all copies or substantial portions of the Software.
+#
+#     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+#     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+#     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+#     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+#     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+#     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+#     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+#     OTHER DEALINGS IN THE SOFTWARE.
+
+from UserDict import DictMixin
+
+
+class OrderedDict(dict, DictMixin):
+
+    def __init__(self, *args, **kwds):
+        if len(args) > 1:
+            raise TypeError('expected at most 1 arguments, got %d' % len(args))
+        try:
+            self.__end
+        except AttributeError:
+            self.clear()
+        self.update(*args, **kwds)
+
+    def clear(self):
+        self.__end = end = []
+        end += [None, end, end]         # sentinel node for doubly linked list
+        self.__map = {}                 # key --> [key, prev, next]
+        dict.clear(self)
+
+    def __setitem__(self, key, value):
+        if key not in self:
+            end = self.__end
+            curr = end[1]
+            curr[2] = end[1] = self.__map[key] = [key, curr, end]
+        dict.__setitem__(self, key, value)
+
+    def __delitem__(self, key):
+        dict.__delitem__(self, key)
+        key, prev, next = self.__map.pop(key)
+        prev[2] = next
+        next[1] = prev
+
+    def __iter__(self):
+        end = self.__end
+        curr = end[2]
+        while curr is not end:
+            yield curr[0]
+            curr = curr[2]
+
+    def __reversed__(self):
+        end = self.__end
+        curr = end[1]
+        while curr is not end:
+            yield curr[0]
+            curr = curr[1]
+
+    def popitem(self, last=True):
+        if not self:
+            raise KeyError('dictionary is empty')
+        if last:
+            key = reversed(self).next()
+        else:
+            key = iter(self).next()
+        value = self.pop(key)
+        return key, value
+
+    def __reduce__(self):
+        items = [[k, self[k]] for k in self]
+        tmp = self.__map, self.__end
+        del self.__map, self.__end
+        inst_dict = vars(self).copy()
+        self.__map, self.__end = tmp
+        if inst_dict:
+            return (self.__class__, (items,), inst_dict)
+        return self.__class__, (items,)
+
+    def keys(self):
+        return list(self)
+
+    setdefault = DictMixin.setdefault
+    update = DictMixin.update
+    pop = DictMixin.pop
+    values = DictMixin.values
+    items = DictMixin.items
+    iterkeys = DictMixin.iterkeys
+    itervalues = DictMixin.itervalues
+    iteritems = DictMixin.iteritems
+
+    def __repr__(self):
+        if not self:
+            return '%s()' % (self.__class__.__name__,)
+        return '%s(%r)' % (self.__class__.__name__, self.items())
+
+    def copy(self):
+        return self.__class__(self)
+
+    @classmethod
+    def fromkeys(cls, iterable, value=None):
+        d = cls()
+        for key in iterable:
+            d[key] = value
+        return d
+
+    def __eq__(self, other):
+        if isinstance(other, OrderedDict):
+            if len(self) != len(other):
+                return False
+            for p, q in zip(self.items(), other.items()):
+                if p != q:
+                    return False
+            return True
+        return dict.__eq__(self, other)
+
+    def __ne__(self, other):
+        return not self == other
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index b90d009..0581148 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -1,725 +1,818 @@
-"""
-Parameter class
-"""
-from __future__ import division
-from numpy import arcsin, cos, sin, sqrt, inf, nan
-import json
-from copy import deepcopy
-try:
-    from collections import OrderedDict
-except ImportError:
-    from ordereddict import OrderedDict
-
-from . import uncertainties
-
-from .asteval import Interpreter
-from .astutils import get_ast_names, valid_symbol_name
-
-
-def check_ast_errors(expr_eval):
-    """check for errors derived from asteval"""
-    if len(expr_eval.error) > 0:
-        expr_eval.raise_exception(None)
-
-
-class Parameters(OrderedDict):
-    """
-    A dictionary of all the Parameters required to specify a fit model.
-
-    All keys must be strings, and valid Python symbol names, and all values
-    must be Parameters.
-
-    Custom methods:
-    ---------------
-
-    add()
-    add_many()
-    dumps() / dump()
-    loads() / load()
-    """
-    def __init__(self, asteval=None, *args, **kwds):
-        super(Parameters, self).__init__(self)
-        self._asteval = asteval
-
-        if asteval is None:
-            self._asteval = Interpreter()
-        self.update(*args, **kwds)
-
-    def __deepcopy__(self, memo):
-        _pars = Parameters()
-
-        # find the symbols that were added by users, not during construction
-        sym_unique = self._asteval.user_defined_symbols()
-        unique_symbols = {key: deepcopy(self._asteval.symtable[key], memo)
-                          for key in sym_unique}
-        _pars._asteval.symtable.update(unique_symbols)
-
-        # we're just about to add a lot of Parameter objects to the newly
-        parameter_list = []
-        for key, par in self.items():
-            if isinstance(par, Parameter):
-                param = Parameter(name=par.name,
-                                  value=par.value,
-                                  min=par.min,
-                                  max=par.max)
-                param.vary = par.vary
-                param.stderr = par.stderr
-                param.correl = par.correl
-                param.init_value = par.init_value
-                param.expr = par.expr
-                parameter_list.append(param)
-
-        _pars.add_many(*parameter_list)
-
-        return _pars
-
-    def __setitem__(self, key, par):
-        if key not in self:
-            if not valid_symbol_name(key):
-                raise KeyError("'%s' is not a valid Parameters name" % key)
-        if par is not None and not isinstance(par, Parameter):
-            raise ValueError("'%s' is not a Parameter" % par)
-        OrderedDict.__setitem__(self, key, par)
-        par.name = key
-        par._expr_eval = self._asteval
-        self._asteval.symtable[key] = par.value
-
-    def __add__(self, other):
-        "add Parameters objects"
-        if not isinstance(other, Parameters):
-            raise ValueError("'%s' is not a Parameters object" % other)
-        out = deepcopy(self)
-        params = other.values()
-        out.add_many(*params)
-        return out
-
-    def __iadd__(self, other):
-        """
-        add/assign Parameters objects
-        """
-        if not isinstance(other, Parameters):
-            raise ValueError("'%s' is not a Parameters object" % other)
-        params = other.values()
-        self.add_many(*params)
-        return self
-
-    def __reduce__(self):
-        """
-        Required to pickle a Parameters instance.
-        """
-        # make a list of all the parameters
-        params = [self[k] for k in self]
-
-        # find the symbols from _asteval.symtable, that need to be remembered.
-        sym_unique = self._asteval.user_defined_symbols()
-        unique_symbols = {key: deepcopy(self._asteval.symtable[key])
-                          for key in sym_unique}
-
-        return self.__class__, (), {'unique_symbols': unique_symbols,
-                                    'params': params}
-
-    def __setstate__(self, state):
-        """
-        Unpickle a Parameters instance.
-
-        Parameters
-        ----------
-        state : list
-            state[0] is a dictionary containing symbols that need to be
-            injected into _asteval.symtable
-            state[1:] are the Parameter instances to be added
-         is list of parameters
-        """
-        # first add all the parameters
-        self.add_many(*state['params'])
-
-        # now update the Interpreter symbol table
-        self._asteval.symtable.update(state['unique_symbols'])
-
-    def update_constraints(self):
-        """
-        Update all constrained parameters, checking that dependencies are
-        evaluated as needed.
-        """
-        _updated = [name for name,par in self.items() if par._expr is None]
-
-        def _update_param(name):
-            """
-            Update a parameter value, including setting bounds.
-            For a constrained parameter (one with an expr defined),
-            this first updates (recursively) all parameters on which
-            the parameter depends (using the 'deps' field).
-            """
-            # Has this param already been updated?
-            if name in _updated:
-                return
-            par = self.__getitem__(name)
-            if par._expr_eval is None:
-                par._expr_eval = self._asteval
-            if par._expr is not None:
-                par.expr = par._expr
-            if par._expr_ast is not None:
-                for dep in par._expr_deps:
-                    if dep in self.keys():
-                        _update_param(dep)
-            self._asteval.symtable[name] = par.value
-            _updated.append(name)
-
-        for name in self.keys():
-            if name not in _updated:
-                _update_param(name)
-
-    def pretty_repr(self, oneline=False):
-        if oneline:
-            return super(Parameters, self).__repr__()
-        s = "Parameters({\n"
-        for key in self.keys():
-            s += "    '%s': %s, \n" % (key, self[key])
-        s += "    })\n"
-        return s
-
-    def pretty_print(self, oneline=False):
-        print(self.pretty_repr(oneline=oneline))
-
-    def add(self, name, value=None, vary=True, min=None, max=None, expr=None):
-        """
-        Convenience function for adding a Parameter:
-
-        Example
-        -------
-        p = Parameters()
-        p.add(name, value=XX, ...)
-
-        is equivalent to:
-        p[name] = Parameter(name=name, value=XX, ....
-        """
-        if isinstance(name, Parameter):
-            self.__setitem__(name.name, name)
-        else:
-            self.__setitem__(name, Parameter(value=value, name=name, vary=vary,
-                                             min=min, max=max, expr=expr))
-
-    def add_many(self, *parlist):
-        """
-        Convenience function for adding a list of Parameters.
-
-        Parameters
-        ----------
-        parlist : sequence
-            A sequence of tuples, or a sequence of `Parameter` instances. If it
-            is a sequence of tuples, then each tuple must contain at least the
-            name. The order in each tuple is the following:
-
-                name, value, vary, min, max, expr
-
-        Example
-        -------
-        p = Parameters()
-        # add a sequence of tuples
-        p.add_many( (name1, val1, True, None, None, None),
-                    (name2, val2, True,  0.0, None, None),
-                    (name3, val3, False, None, None, None),
-                    (name4, val4))
-
-        # add a sequence of Parameter
-        f = Parameter('name5', val5)
-        g = Parameter('name6', val6)
-        p.add_many(f, g)
-        """
-        for para in parlist:
-            if isinstance(para, Parameter):
-                self.__setitem__(para.name, para)
-            else:
-                param = Parameter(*para)
-                self.__setitem__(param.name, param)
-
-    def valuesdict(self):
-        """
-        Returns
-        -------
-        An ordered dictionary of name:value pairs for each Parameter.
-        This is distinct from the Parameters itself, as it has values of
-        the Parameter values, not the full Parameter object.
-        """
-
-        return OrderedDict(((p.name, p.value) for p in self.values()))
-
-    def dumps(self, **kws):
-        """represent Parameters as a JSON string.
-
-        all keyword arguments are passed to `json.dumps()`
-
-        Returns
-        -------
-        json string representation of Parameters
-
-        See Also
-        --------
-        dump(), loads(), load(), json.dumps()
-        """
-        out = [p.__getstate__() for p in self.values()]
-        return json.dumps(out, **kws)
-
-    def loads(self, s, **kws):
-        """load Parameters from a JSON string.
-
-        current Parameters will be cleared before loading.
-
-        all keyword arguments are passed to `json.loads()`
-
-        Returns
-        -------
-        None.   Parameters are updated as a side-effect
-
-        See Also
-        --------
-        dump(), dumps(), load(), json.loads()
-
-        """
-        self.clear()
-        for parstate in json.loads(s, **kws):
-            _par = Parameter()
-            _par.__setstate__(parstate)
-            self.__setitem__(parstate[0], _par)
-
-    def dump(self, fp, **kws):
-        """write JSON representation of Parameters to a file
-        or file-like object (must have a `write()` method).
-
-        Arguments
-        ---------
-        fp         open file-like object with `write()` method.
-
-        all keyword arguments are passed to `dumps()`
-
-        Returns
-        -------
-        return value from `fp.write()`
-
-        See Also
-        --------
-        dump(), load(), json.dump()
-        """
-        return fp.write(self.dumps(**kws))
-
-    def load(self, fp, **kws):
-        """load JSON representation of Parameters from a file
-        or file-like object (must have a `read()` method).
-
-        Arguments
-        ---------
-        fp         open file-like object with `read()` method.
-
-        all keyword arguments are passed to `loads()`
-
-        Returns
-        -------
-        None.   Parameters are updated as a side-effect
-
-        See Also
-        --------
-        dump(), loads(), json.load()
-        """
-        return self.loads(fp.read(), **kws)
-
-
-class Parameter(object):
-    """
-    A Parameter is an object used to define a Fit Model.
-    Attributes
-    ----------
-    name : str
-        Parameter name.
-    value : float
-        The numerical value of the Parameter.
-    vary : bool
-        Whether the Parameter is fixed during a fit.
-    min : float
-        Lower bound for value (None = no lower bound).
-    max : float
-        Upper bound for value (None = no upper bound).
-    expr : str
-        An expression specifying constraints for the parameter.
-    stderr : float
-        The estimated standard error for the best-fit value.
-    correl : dict
-        Specifies correlation with the other fitted Parameter after a fit.
-        Of the form `{'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}`
-    """
-    def __init__(self, name=None, value=None, vary=True,
-                 min=None, max=None, expr=None):
-        """
-        Parameters
-        ----------
-        name : str, optional
-            Name of the parameter.
-        value : float, optional
-            Numerical Parameter value.
-        vary : bool, optional
-            Whether the Parameter is fixed during a fit.
-        min : float, optional
-            Lower bound for value (None = no lower bound).
-        max : float, optional
-            Upper bound for value (None = no upper bound).
-        expr : str, optional
-            Mathematical expression used to constrain the value during the fit.
-        """
-        self.name = name
-        self._val = value
-        self.user_value = value
-        self.init_value = value
-        self.min = min
-        self.max = max
-        self.vary = vary
-        self._expr = expr
-        self._expr_ast = None
-        self._expr_eval = None
-        self._expr_deps = []
-        self._delay_asteval = False
-        self.stderr = None
-        self.correl = None
-        self.from_internal = lambda val: val
-        self._init_bounds()
-
-    def set(self, value=None, vary=None, min=None, max=None, expr=None):
-        """
-        Set or update Parameter attributes.
-
-        Parameters
-        ----------
-        value : float, optional
-            Numerical Parameter value.
-        vary : bool, optional
-            Whether the Parameter is fixed during a fit.
-        min : float, optional
-            Lower bound for value. To remove a lower bound you must use -np.inf
-        max : float, optional
-            Upper bound for value. To remove an upper bound you must use np.inf
-        expr : str, optional
-            Mathematical expression used to constrain the value during the fit.
-            To remove a constraint you must supply an empty string.
-        """
-
-        self.__set_expression(expr)
-        if value is not None:
-            self._val = value
-        if vary is not None:
-            self.vary = vary
-        if min is not None:
-            self.min = min
-        if max is not None:
-            self.max = max
-
-    def _init_bounds(self):
-        """make sure initial bounds are self-consistent"""
-        #_val is None means - infinity.
-        if self._val is not None:
-            if self.max is not None and self._val > self.max:
-                self._val = self.max
-            if self.min is not None and self._val < self.min:
-                self._val = self.min
-        elif self.min is not None and self._expr is None:
-            self._val = self.min
-        elif self.max is not None and self._expr is None:
-            self._val = self.max
-        self.setup_bounds()
-
-    def __getstate__(self):
-        """get state for pickle"""
-        return (self.name, self.value, self.vary, self.expr, self.min,
-                self.max, self.stderr, self.correl, self.init_value)
-
-    def __setstate__(self, state):
-        """set state for pickle"""
-        (self.name, self.value, self.vary, self.expr, self.min,
-         self.max, self.stderr, self.correl, self.init_value) = state
-        self._expr_ast = None
-        self._expr_eval = None
-        self._expr_deps = []
-        self._delay_asteval = False
-        self._init_bounds()
-
-    def __repr__(self):
-        s = []
-        if self.name is not None:
-            s.append("'%s'" % self.name)
-        sval = repr(self._getval())
-        if not self.vary and self._expr is None:
-            sval = "value=%s (fixed)" % (sval)
-        elif self.stderr is not None:
-            sval = "value=%s +/- %.3g" % (sval, self.stderr)
-        s.append(sval)
-        s.append("bounds=[%s:%s]" % (repr(self.min), repr(self.max)))
-        if self._expr is not None:
-            s.append("expr='%s'" % (self.expr))
-        return "<Parameter %s>" % ', '.join(s)
-
-    def setup_bounds(self):
-        """
-        Set up Minuit-style internal/external parameter transformation
-        of min/max bounds.
-
-        As a side-effect, this also defines the self.from_internal method
-        used to re-calculate self.value from the internal value, applying
-        the inverse Minuit-style transformation.  This method should be
-        called prior to passing a Parameter to the user-defined objective
-        function.
-
-        This code borrows heavily from JJ Helmus' leastsqbound.py
-
-        Returns
-        -------
-        The internal value for parameter from self.value (which holds
-        the external, user-expected value).   This internal value should
-        actually be used in a fit.
-        """
-        if self.min in (None, -inf) and self.max in (None, inf):
-            self.from_internal = lambda val: val
-            _val  = self._val
-        elif self.max in (None, inf):
-            self.from_internal = lambda val: self.min - 1.0 + sqrt(val*val + 1)
-            _val  = sqrt((self._val - self.min + 1.0)**2 - 1)
-        elif self.min in (None, -inf):
-            self.from_internal = lambda val: self.max + 1 - sqrt(val*val + 1)
-            _val  = sqrt((self.max - self._val + 1.0)**2 - 1)
-        else:
-            self.from_internal = lambda val: self.min + (sin(val) + 1) * \
-                                 (self.max - self.min) / 2.0
-            _val  = arcsin(2*(self._val - self.min)/(self.max - self.min) - 1)
-        return _val
-
-    def scale_gradient(self, val):
-        """
-        Returns
-        -------
-        scaling factor for gradient the according to Minuit-style
-        transformation.
-        """
-        if self.min in (None, -inf) and self.max in (None, inf):
-            return 1.0
-        elif self.max in (None, inf):
-            return val / sqrt(val*val + 1)
-        elif self.min in (None, -inf):
-            return -val / sqrt(val*val + 1)
-        else:
-            return cos(val) * (self.max - self.min) / 2.0
-
-
-    def _getval(self):
-        """get value, with bounds applied"""
-
-        # Note assignment to self._val has been changed to self.value
-        # The self.value property setter makes sure that the
-        # _expr_eval.symtable is kept updated.
-        # If you just assign to self._val then
-        # _expr_eval.symtable[self.name]
-        # becomes stale if parameter.expr is not None.
-        if (isinstance(self._val, uncertainties.Variable)
-            and self._val is not nan):
-            try:
-                self.value = self._val.nominal_value
-            except AttributeError:
-                pass
-        if not self.vary and self._expr is None:
-            return self._val
-        if not hasattr(self, '_expr_eval'):
-            self._expr_eval = None
-        if not hasattr(self, '_expr_ast'):
-            self._expr_ast = None
-        if self._expr_ast is None and self._expr is not None:
-            self.__set_expression(self._expr)
-
-        if self._expr_ast is not None and self._expr_eval is not None:
-            if not self._delay_asteval:
-                self.value = self._expr_eval(self._expr_ast)
-                check_ast_errors(self._expr_eval)
-
-        if self.min is None:
-            self.min = -inf
-        if self.max is None:
-            self.max = inf
-        if self.max < self.min:
-            self.max, self.min = self.min, self.max
-        if (abs((1.0 * self.max - self.min)/
-                max(abs(self.max), abs(self.min), 1.e-13)) < 1.e-13):
-            raise ValueError("Parameter '%s' has min == max" % self.name)
-        try:
-            self.value = max(self.min, min(self._val, self.max))
-        except(TypeError, ValueError):
-            self.value = nan
-        return self._val
-
-    def set_expr_eval(self, evaluator):
-        "set expression evaluator instance"
-        self._expr_eval = evaluator
-
-    @property
-    def value(self):
-        "The numerical value of the Parameter, with bounds applied"
-        return self._getval()
-
-    @value.setter
-    def value(self, val):
-        "Set the numerical Parameter value."
-        self._val = val
-        if not hasattr(self, '_expr_eval'):  self._expr_eval = None
-        if self._expr_eval is not None:
-            self._expr_eval.symtable[self.name] = val
-
-    @property
-    def expr(self):
-        """
-        The mathematical expression used to constrain the value during the fit.
-        """
-        return self._expr
-
-    @expr.setter
-    def expr(self, val):
-        """
-        The mathematical expression used to constrain the value during the fit.
-        To remove a constraint you must supply an empty string.
-        """
-        self.__set_expression(val)
-
-    def __set_expression(self, val):
-        if val == '':
-            val = None
-        self._expr = val
-        if val is not None:
-            self.vary = False
-        if not hasattr(self, '_expr_eval'):  self._expr_eval = None
-        if val is None: self._expr_ast = None
-        if val is not None and self._expr_eval is not None:
-            self._expr_ast = self._expr_eval.parse(val)
-            check_ast_errors(self._expr_eval)
-            self._expr_deps = get_ast_names(self._expr_ast)
-
-    def __str__(self):
-        "string"
-        return self.__repr__()
-
-    def __abs__(self):
-        "abs"
-        return abs(self._getval())
-
-    def __neg__(self):
-        "neg"
-        return -self._getval()
-
-    def __pos__(self):
-        "positive"
-        return +self._getval()
-
-    def __nonzero__(self):
-        "not zero"
-        return self._getval() != 0
-
-    def __int__(self):
-        "int"
-        return int(self._getval())
-
-    def __long__(self):
-        "long"
-        return long(self._getval())
-
-    def __float__(self):
-        "float"
-        return float(self._getval())
-
-    def __trunc__(self):
-        "trunc"
-        return self._getval().__trunc__()
-
-    def __add__(self, other):
-        "+"
-        return self._getval() + other
-
-    def __sub__(self, other):
-        "-"
-        return self._getval() - other
-
-    def __div__(self, other):
-        "/"
-        return self._getval() / other
-    __truediv__ = __div__
-
-    def __floordiv__(self, other):
-        "//"
-        return self._getval() // other
-
-    def __divmod__(self, other):
-        "divmod"
-        return divmod(self._getval(), other)
-
-    def __mod__(self, other):
-        "%"
-        return self._getval() % other
-
-    def __mul__(self, other):
-        "*"
-        return self._getval() * other
-
-    def __pow__(self, other):
-        "**"
-        return self._getval() ** other
-
-    def __gt__(self, other):
-        ">"
-        return self._getval() > other
-
-    def __ge__(self, other):
-        ">="
-        return self._getval() >= other
-
-    def __le__(self, other):
-        "<="
-        return self._getval() <= other
-
-    def __lt__(self, other):
-        "<"
-        return self._getval() < other
-
-    def __eq__(self, other):
-        "=="
-        return self._getval() == other
-    def __ne__(self, other):
-        "!="
-        return self._getval() != other
-
-    def __radd__(self, other):
-        "+ (right)"
-        return other + self._getval()
-
-    def __rdiv__(self, other):
-        "/ (right)"
-        return other / self._getval()
-    __rtruediv__ = __rdiv__
-
-    def __rdivmod__(self, other):
-        "divmod (right)"
-        return divmod(other, self._getval())
-
-    def __rfloordiv__(self, other):
-        "// (right)"
-        return other // self._getval()
-
-    def __rmod__(self, other):
-        "% (right)"
-        return other % self._getval()
-
-    def __rmul__(self, other):
-        "* (right)"
-        return other * self._getval()
-
-    def __rpow__(self, other):
-        "** (right)"
-        return other ** self._getval()
-
-    def __rsub__(self, other):
-        "- (right)"
-        return other - self._getval()
-
-def isParameter(x):
-    "test for Parameter-ness"
-    return (isinstance(x, Parameter) or
-            x.__class__.__name__ == 'Parameter')
+"""
+Parameter class
+"""
+from __future__ import division
+from numpy import arcsin, cos, sin, sqrt, inf, nan, isfinite
+import json
+from copy import deepcopy
+try:
+    from collections import OrderedDict
+except ImportError:
+    from ordereddict import OrderedDict
+
+from . import uncertainties
+
+from .asteval import Interpreter
+from .astutils import get_ast_names, valid_symbol_name
+
+
+def check_ast_errors(expr_eval):
+    """check for errors derived from asteval"""
+    if len(expr_eval.error) > 0:
+        expr_eval.raise_exception(None)
+
+
+def isclose(x, y, rtol=1e-5, atol=1e-8):
+    """
+    The truth whether two numbers are the same, within an absolute and
+    relative tolerance.
+
+    i.e. abs(`x` - `y`) <= (`atol` + `rtol` * absolute(`y`))
+
+    Parameters
+    ----------
+    x, y : float
+        Input values
+    rtol : float
+        The relative tolerance parameter (see Notes).
+    atol : float
+        The absolute tolerance parameter (see Notes).
+
+    Returns
+    -------
+    y : bool
+        Are `x` and `x` are equal within tolerance?
+    """
+    def within_tol(x, y, atol, rtol):
+        return abs(x - y) <= atol + rtol * abs(y)
+
+    xfin = isfinite(x)
+    yfin = isfinite(y)
+
+    # both are finite
+    if xfin and yfin:
+        return within_tol(x, y, atol, rtol)
+    elif x == y:
+        return True
+    else:
+        return False
+
+
+class Parameters(OrderedDict):
+    """
+    A dictionary of all the Parameters required to specify a fit model.
+
+    All keys must be strings, and valid Python symbol names, and all values
+    must be Parameters.
+
+    Custom methods:
+    ---------------
+
+    add()
+    add_many()
+    dumps() / dump()
+    loads() / load()
+    """
+    def __init__(self, asteval=None, *args, **kwds):
+        super(Parameters, self).__init__(self)
+        self._asteval = asteval
+
+        if asteval is None:
+            self._asteval = Interpreter()
+        self.update(*args, **kwds)
+
+    def copy(self):
+        """Parameters.copy() should always be a deepcopy"""
+        return self.__deepcopy__(None)
+
+    def __copy__(self, memo):
+        """Parameters.copy() should always be a deepcopy"""
+        self.__deepcopy__(memo)
+
+    def __deepcopy__(self, memo):
+        """Parameters deepcopy needs to make sure that
+        asteval is available and that all individula
+        parameter objects are copied"""
+        _pars = Parameters(asteval=None)
+
+        # find the symbols that were added by users, not during construction
+        sym_unique = self._asteval.user_defined_symbols()
+        unique_symbols = {key: deepcopy(self._asteval.symtable[key], memo)
+                          for key in sym_unique}
+        _pars._asteval.symtable.update(unique_symbols)
+
+        # we're just about to add a lot of Parameter objects to the newly
+        parameter_list = []
+        for key, par in self.items():
+            if isinstance(par, Parameter):
+                param = Parameter(name=par.name,
+                                  value=par.value,
+                                  min=par.min,
+                                  max=par.max)
+                param.vary = par.vary
+                param.stderr = par.stderr
+                param.correl = par.correl
+                param.init_value = par.init_value
+                param.expr = par.expr
+                parameter_list.append(param)
+
+        _pars.add_many(*parameter_list)
+
+        return _pars
+
+    def __setitem__(self, key, par):
+        if key not in self:
+            if not valid_symbol_name(key):
+                raise KeyError("'%s' is not a valid Parameters name" % key)
+        if par is not None and not isinstance(par, Parameter):
+            raise ValueError("'%s' is not a Parameter" % par)
+        OrderedDict.__setitem__(self, key, par)
+        par.name = key
+        par._expr_eval = self._asteval
+        self._asteval.symtable[key] = par.value
+
+    def __add__(self, other):
+        """
+        Add Parameters objects
+        """
+        if not isinstance(other, Parameters):
+            raise ValueError("'%s' is not a Parameters object" % other)
+        out = deepcopy(self)
+        params = other.values()
+        out.add_many(*params)
+        return out
+
+    def __iadd__(self, other):
+        """
+        Add/assign Parameters objects
+        """
+        if not isinstance(other, Parameters):
+            raise ValueError("'%s' is not a Parameters object" % other)
+        params = other.values()
+        self.add_many(*params)
+        return self
+
+    def __reduce__(self):
+        """
+        Required to pickle a Parameters instance.
+        """
+        # make a list of all the parameters
+        params = [self[k] for k in self]
+
+        # find the symbols from _asteval.symtable, that need to be remembered.
+        sym_unique = self._asteval.user_defined_symbols()
+        unique_symbols = {key: deepcopy(self._asteval.symtable[key])
+                          for key in sym_unique}
+
+        return self.__class__, (), {'unique_symbols': unique_symbols,
+                                    'params': params}
+
+    def __setstate__(self, state):
+        """
+        Unpickle a Parameters instance.
+
+        Parameters
+        ----------
+        state : dict
+            state['unique_symbols'] is a dictionary containing symbols that
+            need to be injected into _asteval.symtable
+            state['params'] is a list of Parameter instances to be added
+        """
+        # first update the Interpreter symbol table. This needs to be done
+        # first because Parameter's early in the list may depend on later
+        # Parameter's. This leads to problems because add_many eventually leads
+        # to a Parameter value being retrieved with _getval, which, if the
+        # dependent value hasn't already been added to the symtable, leads to
+        # an Error. Another way of doing this would be to remove all the expr
+        # from the Parameter instances before they get added, then to restore
+        # them.
+        self._asteval.symtable.update(state['unique_symbols'])
+
+        # then add all the parameters
+        self.add_many(*state['params'])
+
+
+    def update_constraints(self):
+        """
+        Update all constrained parameters, checking that dependencies are
+        evaluated as needed.
+        """
+        requires_update = {name for name, par in self.items()
+                           if par._expr is not None}
+        updated_tracker = set(requires_update)
+
+        def _update_param(name):
+            """
+            Update a parameter value, including setting bounds.
+            For a constrained parameter (one with an expr defined),
+            this first updates (recursively) all parameters on which
+            the parameter depends (using the 'deps' field).
+            """
+            par = self.__getitem__(name)
+            if par._expr_eval is None:
+                par._expr_eval = self._asteval
+            for dep in par._expr_deps:
+                if dep in updated_tracker:
+                    _update_param(dep)
+            self._asteval.symtable[name] = par.value
+            updated_tracker.discard(name)
+
+        for name in requires_update:
+            _update_param(name)
+
+    def pretty_repr(self, oneline=False):
+        if oneline:
+            return super(Parameters, self).__repr__()
+        s = "Parameters({\n"
+        for key in self.keys():
+            s += "    '%s': %s, \n" % (key, self[key])
+        s += "    })\n"
+        return s
+
+    def pretty_print(self, oneline=False, colwidth=8, precision=4, fmt='g',
+                     columns=['value', 'min', 'max', 'stderr', 'vary', 'expr']):
+        """Pretty-print parameters data.
+
+        Parameters
+        ----------
+        oneline : boolean
+            If True prints a one-line parameters representation. Default False.
+        colwidth : int
+            column width for all except the first (i.e. name) column.
+        columns : list of strings
+            list of columns names to print. All values must be valid
+            :class:`Parameter` attributes.
+        precision : int
+            number of digits to be printed after floating point.
+        format : string
+            single-char numeric formatter. Valid values: 'f' floating point,
+            'g' floating point and exponential, 'e' exponential.
+        """
+        if oneline:
+            print(self.pretty_repr(oneline=oneline))
+            return
+
+        name_len = max(len(s) for s in self)
+        allcols = ['name'] + columns
+        title = '{:{name_len}} ' + len(columns) * ' {:>{n}}'
+        print(title.format(*allcols, name_len=name_len, n=colwidth).title())
+        numstyle = '{%s:>{n}.{p}{f}}'  # format for numeric columns
+        otherstyles = dict(name='{name:<{name_len}} ', stderr='{stderr!s:>{n}}',
+                           vary='{vary!s:>{n}}', expr='{expr!s:>{n}}')
+        line = ' '.join([otherstyles.get(k, numstyle % k) for k in allcols])
+        for name, values in sorted(self.items()):
+            pvalues = {k: getattr(values, k) for k in columns}
+            pvalues['name'] = name
+            # stderr is a special case: it is either numeric or None (i.e. str)
+            if 'stderr' in columns and pvalues['stderr'] is not None:
+                pvalues['stderr'] = (numstyle % '').format(
+                    pvalues['stderr'], n=colwidth, p=precision, f=fmt)
+            print(line.format(name_len=name_len, n=colwidth, p=precision, f=fmt,
+                              **pvalues))
+
+    def add(self, name, value=None, vary=True, min=-inf, max=inf, expr=None):
+        """
+        Convenience function for adding a Parameter:
+
+        Example
+        -------
+        p = Parameters()
+        p.add(name, value=XX, ...)
+
+        is equivalent to:
+        p[name] = Parameter(name=name, value=XX, ....
+        """
+        if isinstance(name, Parameter):
+            self.__setitem__(name.name, name)
+        else:
+            self.__setitem__(name, Parameter(value=value, name=name, vary=vary,
+                                             min=min, max=max, expr=expr))
+
+    def add_many(self, *parlist):
+        """
+        Convenience function for adding a list of Parameters.
+
+        Parameters
+        ----------
+        parlist : sequence
+            A sequence of tuples, or a sequence of `Parameter` instances. If it
+            is a sequence of tuples, then each tuple must contain at least the
+            name. The order in each tuple is the following:
+
+                name, value, vary, min, max, expr
+
+        Example
+        -------
+        p = Parameters()
+        # add a sequence of tuples
+        p.add_many( (name1, val1, True, None, None, None),
+                    (name2, val2, True,  0.0, None, None),
+                    (name3, val3, False, None, None, None),
+                    (name4, val4))
+
+        # add a sequence of Parameter
+        f = Parameter('name5', val5)
+        g = Parameter('name6', val6)
+        p.add_many(f, g)
+        """
+        for para in parlist:
+            if isinstance(para, Parameter):
+                self.__setitem__(para.name, para)
+            else:
+                param = Parameter(*para)
+                self.__setitem__(param.name, param)
+
+    def valuesdict(self):
+        """
+        Returns
+        -------
+        An ordered dictionary of name:value pairs for each Parameter.
+        This is distinct from the Parameters itself, as it has values of
+        the Parameter values, not the full Parameter object.
+        """
+
+        return OrderedDict(((p.name, p.value) for p in self.values()))
+
+    def dumps(self, **kws):
+        """represent Parameters as a JSON string.
+
+        all keyword arguments are passed to `json.dumps()`
+
+        Returns
+        -------
+        json string representation of Parameters
+
+        See Also
+        --------
+        dump(), loads(), load(), json.dumps()
+        """
+        out = [p.__getstate__() for p in self.values()]
+        return json.dumps(out, **kws)
+
+    def loads(self, s, **kws):
+        """load Parameters from a JSON string.
+
+        current Parameters will be cleared before loading.
+
+        all keyword arguments are passed to `json.loads()`
+
+        Returns
+        -------
+        None.   Parameters are updated as a side-effect
+
+        See Also
+        --------
+        dump(), dumps(), load(), json.loads()
+
+        """
+        self.clear()
+        for parstate in json.loads(s, **kws):
+            _par = Parameter()
+            _par.__setstate__(parstate)
+            self.__setitem__(parstate[0], _par)
+
+    def dump(self, fp, **kws):
+        """write JSON representation of Parameters to a file
+        or file-like object (must have a `write()` method).
+
+        Arguments
+        ---------
+        fp         open file-like object with `write()` method.
+
+        all keyword arguments are passed to `dumps()`
+
+        Returns
+        -------
+        return value from `fp.write()`
+
+        See Also
+        --------
+        dump(), load(), json.dump()
+        """
+        return fp.write(self.dumps(**kws))
+
+    def load(self, fp, **kws):
+        """load JSON representation of Parameters from a file
+        or file-like object (must have a `read()` method).
+
+        Arguments
+        ---------
+        fp         open file-like object with `read()` method.
+
+        all keyword arguments are passed to `loads()`
+
+        Returns
+        -------
+        None.   Parameters are updated as a side-effect
+
+        See Also
+        --------
+        dump(), loads(), json.load()
+        """
+        return self.loads(fp.read(), **kws)
+
+
+class Parameter(object):
+    """
+    A Parameter is an object used to define a Fit Model.
+    Attributes
+    ----------
+    name : str
+        Parameter name.
+    value : float
+        The numerical value of the Parameter.
+    vary : bool
+        Whether the Parameter is fixed during a fit.
+    min : float
+        Lower bound for value (None or -inf means no lower bound).
+    max : float
+        Upper bound for value (None or inf means no upper bound).
+    expr : str
+        An expression specifying constraints for the parameter.
+    stderr : float
+        The estimated standard error for the best-fit value.
+    correl : dict
+        Specifies correlation with the other fitted Parameter after a fit.
+        Of the form `{'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}`
+    """
+    def __init__(self, name=None, value=None, vary=True,
+                 min=-inf, max=inf, expr=None):
+        """
+        Parameters
+        ----------
+        name : str, optional
+            Name of the parameter.
+        value : float, optional
+            Numerical Parameter value.
+        vary : bool, optional
+            Whether the Parameter is fixed during a fit.
+        min : float, optional
+            Lower bound for value (None or -inf means no lower bound).
+        max : float, optional
+            Upper bound for value (None or inf means no upper bound).
+        expr : str, optional
+            Mathematical expression used to constrain the value during the fit.
+        """
+        self.name = name
+        self._val = value
+        self.user_value = value
+        self.init_value = value
+        self.min = min
+        self.max = max
+        self.vary = vary
+        self._expr = expr
+        self._expr_ast = None
+        self._expr_eval = None
+        self._expr_deps = []
+        self._delay_asteval = False
+        self.stderr = None
+        self.correl = None
+        self.from_internal = lambda val: val
+        self._init_bounds()
+
+    def set(self, value=None, vary=None, min=-inf, max=inf, expr=None):
+        """
+        Set or update Parameter attributes.
+
+        Parameters
+        ----------
+        value : float, optional
+            Numerical Parameter value.
+        vary : bool, optional
+            Whether the Parameter is fixed during a fit.
+        min : float, optional
+            Lower bound for value. To remove a lower bound you must use -np.inf
+        max : float, optional
+            Upper bound for value. To remove an upper bound you must use np.inf
+        expr : str, optional
+            Mathematical expression used to constrain the value during the fit.
+            To remove a constraint you must supply an empty string.
+        """
+
+        self.__set_expression(expr)
+        if value is not None:
+            self._val = value
+        if vary is not None:
+            self.vary = vary
+        if min is None:
+            min = -inf
+        if max is None:
+            max = inf
+        self.min = min
+        self.max = max
+
+    def _init_bounds(self):
+        """make sure initial bounds are self-consistent"""
+        # _val is None means - infinity.
+        if self.max is None:
+            self.max = inf
+        if self.min is None:
+            self.min = -inf
+        if self._val is not None:
+            if self.min > self.max:
+                self.min, self.max = self.max, self.min
+            if isclose(self.min, self.max, atol=1e-13, rtol=1e-13):
+                raise ValueError("Parameter '%s' has min == max" % self.name)
+
+            if self._val > self.max:
+                self._val = self.max
+            if self._val < self.min:
+                self._val = self.min
+        elif self._expr is None:
+            self._val = self.min
+        self.setup_bounds()
+
+    def __getstate__(self):
+        """get state for pickle"""
+        return (self.name, self.value, self.vary, self.expr, self.min,
+                self.max, self.stderr, self.correl, self.init_value)
+
+    def __setstate__(self, state):
+        """set state for pickle"""
+        (self.name, self.value, self.vary, self.expr, self.min,
+         self.max, self.stderr, self.correl, self.init_value) = state
+        self._expr_ast = None
+        self._expr_eval = None
+        self._expr_deps = []
+        self._delay_asteval = False
+        self._init_bounds()
+
+    def __repr__(self):
+        s = []
+        if self.name is not None:
+            s.append("'%s'" % self.name)
+        sval = repr(self._getval())
+        if not self.vary and self._expr is None:
+            sval = "value=%s (fixed)" % sval
+        elif self.stderr is not None:
+            sval = "value=%s +/- %.3g" % (sval, self.stderr)
+        s.append(sval)
+        s.append("bounds=[%s:%s]" % (repr(self.min), repr(self.max)))
+        if self._expr is not None:
+            s.append("expr='%s'" % self.expr)
+        return "<Parameter %s>" % ', '.join(s)
+
+    def setup_bounds(self):
+        """
+        Set up Minuit-style internal/external parameter transformation
+        of min/max bounds.
+
+        As a side-effect, this also defines the self.from_internal method
+        used to re-calculate self.value from the internal value, applying
+        the inverse Minuit-style transformation.  This method should be
+        called prior to passing a Parameter to the user-defined objective
+        function.
+
+        This code borrows heavily from JJ Helmus' leastsqbound.py
+
+        Returns
+        -------
+        The internal value for parameter from self.value (which holds
+        the external, user-expected value).   This internal value should
+        actually be used in a fit.
+        """
+        if self.min is None:
+            self.min = -inf
+        if self.max is None:
+            self.max = inf
+        if self.min == -inf and self.max == inf:
+            self.from_internal = lambda val: val
+            _val = self._val
+        elif self.max == inf:
+            self.from_internal = lambda val: self.min - 1.0 + sqrt(val*val + 1)
+            _val = sqrt((self._val - self.min + 1.0)**2 - 1)
+        elif self.min == -inf:
+            self.from_internal = lambda val: self.max + 1 - sqrt(val*val + 1)
+            _val = sqrt((self.max - self._val + 1.0)**2 - 1)
+        else:
+            self.from_internal = lambda val: self.min + (sin(val) + 1) * \
+                                 (self.max - self.min) / 2.0
+            _val = arcsin(2*(self._val - self.min)/(self.max - self.min) - 1)
+        return _val
+
+    def scale_gradient(self, val):
+        """
+        Returns
+        -------
+        scaling factor for gradient the according to Minuit-style
+        transformation.
+        """
+        if self.min == -inf and self.max == inf:
+            return 1.0
+        elif self.max == inf:
+            return val / sqrt(val*val + 1)
+        elif self.min == -inf:
+            return -val / sqrt(val*val + 1)
+        else:
+            return cos(val) * (self.max - self.min) / 2.0
+
+    def _getval(self):
+        """get value, with bounds applied"""
+
+        # Note assignment to self._val has been changed to self.value
+        # The self.value property setter makes sure that the
+        # _expr_eval.symtable is kept updated.
+        # If you just assign to self._val then
+        # _expr_eval.symtable[self.name]
+        # becomes stale if parameter.expr is not None.
+        if (isinstance(self._val, uncertainties.Variable)
+            and self._val is not nan):
+            try:
+                self.value = self._val.nominal_value
+            except AttributeError:
+                pass
+        if not self.vary and self._expr is None:
+            return self._val
+
+        if self._expr is not None:
+            if self._expr_ast is None:
+                self.__set_expression(self._expr)
+
+            if self._expr_eval is not None:
+                if not self._delay_asteval:
+                    self.value = self._expr_eval(self._expr_ast)
+                    check_ast_errors(self._expr_eval)
+
+        v = self._val
+        if v > self.max: v = self.max
+        if v < self.min: v = self.min
+        self.value = self._val = v
+        return self._val
+
+    def set_expr_eval(self, evaluator):
+        """set expression evaluator instance"""
+        self._expr_eval = evaluator
+
+    @property
+    def value(self):
+        """The numerical value of the Parameter, with bounds applied"""
+        return self._getval()
+
+    @value.setter
+    def value(self, val):
+        """
+        Set the numerical Parameter value.
+        """
+        self._val = val
+        if not hasattr(self, '_expr_eval'):
+            self._expr_eval = None
+        if self._expr_eval is not None:
+            self._expr_eval.symtable[self.name] = val
+
+    @property
+    def expr(self):
+        """
+        The mathematical expression used to constrain the value during the fit.
+        """
+        return self._expr
+
+    @expr.setter
+    def expr(self, val):
+        """
+        The mathematical expression used to constrain the value during the fit.
+        To remove a constraint you must supply an empty string.
+        """
+        self.__set_expression(val)
+
+    def __set_expression(self, val):
+        if val == '':
+            val = None
+        self._expr = val
+        if val is not None:
+            self.vary = False
+        if not hasattr(self, '_expr_eval'):
+            self._expr_eval = None
+        if val is None:
+            self._expr_ast = None
+        if val is not None and self._expr_eval is not None:
+            self._expr_ast = self._expr_eval.parse(val)
+            check_ast_errors(self._expr_eval)
+            self._expr_deps = get_ast_names(self._expr_ast)
+
+    def __str__(self):
+        """string"""
+        return self.__repr__()
+
+    def __abs__(self):
+        """abs"""
+        return abs(self._getval())
+
+    def __neg__(self):
+        """neg"""
+        return -self._getval()
+
+    def __pos__(self):
+        """positive"""
+        return +self._getval()
+
+    def __nonzero__(self):
+        """not zero"""
+        return self._getval() != 0
+
+    def __int__(self):
+        """int"""
+        return int(self._getval())
+
+    def __float__(self):
+        """float"""
+        return float(self._getval())
+
+    def __trunc__(self):
+        """trunc"""
+        return self._getval().__trunc__()
+
+    def __add__(self, other):
+        """+"""
+        return self._getval() + other
+
+    def __sub__(self, other):
+        """-"""
+        return self._getval() - other
+
+    def __div__(self, other):
+        """/"""
+        return self._getval() / other
+    __truediv__ = __div__
+
+    def __floordiv__(self, other):
+        """//"""
+        return self._getval() // other
+
+    def __divmod__(self, other):
+        """divmod"""
+        return divmod(self._getval(), other)
+
+    def __mod__(self, other):
+        """%"""
+        return self._getval() % other
+
+    def __mul__(self, other):
+        """*"""
+        return self._getval() * other
+
+    def __pow__(self, other):
+        """**"""
+        return self._getval() ** other
+
+    def __gt__(self, other):
+        """>"""
+        return self._getval() > other
+
+    def __ge__(self, other):
+        """>="""
+        return self._getval() >= other
+
+    def __le__(self, other):
+        """<="""
+        return self._getval() <= other
+
+    def __lt__(self, other):
+        """<"""
+        return self._getval() < other
+
+    def __eq__(self, other):
+        """=="""
+        return self._getval() == other
+
+    def __ne__(self, other):
+        """!="""
+        return self._getval() != other
+
+    def __radd__(self, other):
+        """+ (right)"""
+        return other + self._getval()
+
+    def __rdiv__(self, other):
+        """/ (right)"""
+        return other / self._getval()
+    __rtruediv__ = __rdiv__
+
+    def __rdivmod__(self, other):
+        """divmod (right)"""
+        return divmod(other, self._getval())
+
+    def __rfloordiv__(self, other):
+        """// (right)"""
+        return other // self._getval()
+
+    def __rmod__(self, other):
+        """% (right)"""
+        return other % self._getval()
+
+    def __rmul__(self, other):
+        """* (right)"""
+        return other * self._getval()
+
+    def __rpow__(self, other):
+        """** (right)"""
+        return other ** self._getval()
+
+    def __rsub__(self, other):
+        """- (right)"""
+        return other - self._getval()
+
+
+def isParameter(x):
+    """Test for Parameter-ness"""
+    return (isinstance(x, Parameter) or
+            x.__class__.__name__ == 'Parameter')
diff --git a/lmfit/printfuncs.py b/lmfit/printfuncs.py
index 76537bc..4b279b0 100644
--- a/lmfit/printfuncs.py
+++ b/lmfit/printfuncs.py
@@ -1,227 +1,229 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Fri Apr 20 19:24:21 2012
-
- at author: Tillsten
-
-Changes:
-  -  13-Feb-2013 M Newville
-     complemented  "report_errors" and "report_ci" with
-     "error_report" and "ci_report" (respectively) which
-     return the text of the report.  Thus report_errors()
-     is simply:
-        def report_errors(params, modelpars=None, show_correl=True):
-            print error_report(params, modelpars=modelpars,
-                               show_correl=show_correl)
-     and similar for report_ci() / ci_report()
-
-"""
-
-from __future__ import print_function
-from .parameter import Parameters
-import re
-
-def alphanumeric_sort(s, _nsre=re.compile('([0-9]+)')):
-    return [int(text) if text.isdigit() else text.lower()
-            for text in re.split(_nsre, s)]
-
-def getfloat_attr(obj, attr, fmt='%.3f'):
-    "format an attribute of an object for printing"
-    val = getattr(obj, attr, None)
-    if val is None:
-        return 'unknown'
-    if isinstance(val, int):
-        return '%d' % val
-    if isinstance(val, float):
-        return fmt % val
-    else:
-        return repr(val)
-
-def gformat(val, length=11):
-    """format a number with '%g'-like format, except that
-    the return will be length ``length`` (default=12)
-    and have at least length-6 significant digits
-    """
-    length = max(length, 7)
-    fmt = '{: .%ig}' % (length-6)
-    if isinstance(val, int):
-        out = ('{: .%ig}' % (length-2)).format(val)
-        if len(out) > length:
-            out = fmt.format(val)
-    else:
-        out = fmt.format(val)
-    if len(out) < length:
-        if 'e' in out:
-            ie = out.find('e')
-            if '.' not in out[:ie]:
-                out = out[:ie] + '.' + out[ie:]
-            out = out.replace('e', '0'*(length-len(out))+'e')
-        else:
-            fmt = '{: .%ig}' % (length-1)
-            out = fmt.format(val)[:length]
-            if len(out) < length:
-                pad = '0' if '.' in  out else ' '
-                out += pad*(length-len(out))
-    return out
-
-CORREL_HEAD = '[[Correlations]] (unreported correlations are < % .3f)'
-
-def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
-               sort_pars=False):
-    """return text of a report for fitted params best-fit values,
-    uncertainties and correlations
-
-    arguments
-    ----------
-       inpars       Parameters from fit or Minizer object returned from a fit.
-       modelpars    Optional Known Model Parameters [None]
-       show_correl  whether to show list of sorted correlations [True]
-       min_correl   smallest correlation absolute value to show [0.1]
-       sort_pars    If True, then fit_report will show parameter names
-                    sorted in alphanumerical order.  If False, then the
-                    parameters will be listed in the order they were added to
-                    the Parameters dictionary. If sort_pars is callable, then
-                    this (one argument) function is used to extract a
-                    comparison key from each list element.
-    """
-    if isinstance(inpars, Parameters):
-        result, params = None, inpars
-    if hasattr(inpars, 'params'):
-        result = inpars
-        params = inpars.params
-
-    if sort_pars:
-        if callable(sort_pars):
-            key = sort_pars
-        else:
-            key = alphanumeric_sort
-        parnames = sorted(params, key=key)
-    else:
-        # dict.keys() returns a KeysView in py3, and they're indexed further
-        # down
-        parnames = list(params.keys())
-
-    buff = []
-    add = buff.append
-    if result is not None:
-        add("[[Fit Statistics]]")
-        add("    # function evals   = %s" % getfloat_attr(result, 'nfev'))
-        add("    # data points      = %s" % getfloat_attr(result, 'ndata'))
-        add("    # variables        = %s" % getfloat_attr(result, 'nvarys'))
-        add("    chi-square         = %s" % getfloat_attr(result, 'chisqr'))
-        add("    reduced chi-square = %s" % getfloat_attr(result, 'redchi'))
-
-    namelen = max([len(n) for n in parnames])
-    add("[[Variables]]")
-    for name in parnames:
-        par = params[name]
-        space = ' '*(namelen+1-len(name))
-        nout = "%s:%s" % (name, space)
-        inval = '(init= ?)'
-        if par.init_value is not None:
-            inval = '(init=% .7g)' % par.init_value
-        if modelpars is not None and name in modelpars:
-            inval = '%s, model_value =% .7g' % (inval, modelpars[name].value)
-        try:
-            sval = gformat(par.value)
-        except (TypeError, ValueError):
-            sval = 'Non Numeric Value?'
-
-        if par.stderr is not None:
-            serr = gformat(par.stderr, length=9)
-
-            try:
-                spercent = '({:.2%})'.format(abs(par.stderr/par.value))
-            except ZeroDivisionError:
-                spercent = ''
-            sval = '%s +/-%s %s' % (sval, serr, spercent)
-
-        if par.vary:
-            add("    %s %s %s" % (nout, sval, inval))
-        elif par.expr is not None:
-            add("    %s %s  == '%s'" % (nout, sval, par.expr))
-        else:
-            add("    %s % .7g (fixed)" % (nout, par.value))
-
-    if show_correl:
-        add(CORREL_HEAD % min_correl)
-        correls = {}
-        for i, name in enumerate(parnames):
-            par = params[name]
-            if not par.vary:
-                continue
-            if hasattr(par, 'correl') and par.correl is not None:
-                for name2 in parnames[i+1:]:
-                    if (name != name2 and name2 in par.correl and
-                        abs(par.correl[name2]) > min_correl):
-                        correls["%s, %s" % (name, name2)] = par.correl[name2]
-
-        sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
-        sort_correl.reverse()
-        for name, val in sort_correl:
-            lspace = max(1, 25 - len(name))
-            add('    C(%s)%s = % .3f ' % (name, (' '*30)[:lspace], val))
-    return '\n'.join(buff)
-
-
-def report_errors(params, **kws):
-    """print a report for fitted params:  see error_report()"""
-    print(fit_report(params, **kws))
-
-
-def report_fit(params, **kws):
-    """print a report for fitted params:  see error_report()"""
-    print(fit_report(params, **kws))
-
-
-def ci_report(ci, with_offset=True, ndigits=5):
-    """return text of a report for confidence intervals
-
-    Parameters
-    ----------
-    with_offset : bool (default `True`)
-        whether to subtract best value from all other values.
-    ndigits : int (default 5)
-        number of significant digits to show
-
-    Returns
-    -------
-       text of formatted report on confidence intervals.
-    """
-    maxlen = max([len(i) for i in ci])
-    buff = []
-    add = buff.append
-    def convp(x):
-        if abs(x[0]) < 1.e-2:
-            return "_BEST_"
-        return "%.2f%%" % (x[0]*100)
-
-    title_shown = False
-    fmt_best = fmt_diff  = "{0:.%if}" % ndigits
-    if with_offset:
-        fmt_diff = "{0:+.%if}" % ndigits
-    for name, row in ci.items():
-        if not title_shown:
-            add("".join([''.rjust(maxlen+1)]+[i.rjust(ndigits+5)
-                                            for i in map(convp, row)]))
-            title_shown = True
-        thisrow = [" %s:" % name.ljust(maxlen)]
-        offset = 0.0
-        if with_offset:
-            for cval, val in row:
-                if abs(cval) < 1.e-2:
-                    offset = val
-        for cval, val in row:
-            if cval < 1.e-2:
-                sval = fmt_best.format(val)
-            else:
-                sval = fmt_diff.format(val-offset)
-            thisrow.append(sval.rjust(ndigits+5))
-        add("".join(thisrow))
-
-    return '\n'.join(buff)
-
-
-def report_ci(ci):
-    """print a report for confidence intervals"""
-    print(ci_report(ci))
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Apr 20 19:24:21 2012
+
+ at author: Tillsten
+
+Changes:
+  -  13-Feb-2013 M Newville
+     complemented  "report_errors" and "report_ci" with
+     "error_report" and "ci_report" (respectively) which
+     return the text of the report.  Thus report_errors()
+     is simply:
+        def report_errors(params, modelpars=None, show_correl=True):
+            print error_report(params, modelpars=modelpars,
+                               show_correl=show_correl)
+     and similar for report_ci() / ci_report()
+
+"""
+
+from __future__ import print_function
+from .parameter import Parameters
+import re
+
+def alphanumeric_sort(s, _nsre=re.compile('([0-9]+)')):
+    return [int(text) if text.isdigit() else text.lower()
+            for text in re.split(_nsre, s)]
+
+def getfloat_attr(obj, attr, fmt='%.3f'):
+    "format an attribute of an object for printing"
+    val = getattr(obj, attr, None)
+    if val is None:
+        return 'unknown'
+    if isinstance(val, int):
+        return '%d' % val
+    if isinstance(val, float):
+        return fmt % val
+    else:
+        return repr(val)
+
+def gformat(val, length=11):
+    """format a number with '%g'-like format, except that
+    the return will be length ``length`` (default=12)
+    and have at least length-6 significant digits
+    """
+    length = max(length, 7)
+    fmt = '{: .%ig}' % (length-6)
+    if isinstance(val, int):
+        out = ('{: .%ig}' % (length-2)).format(val)
+        if len(out) > length:
+            out = fmt.format(val)
+    else:
+        out = fmt.format(val)
+    if len(out) < length:
+        if 'e' in out:
+            ie = out.find('e')
+            if '.' not in out[:ie]:
+                out = out[:ie] + '.' + out[ie:]
+            out = out.replace('e', '0'*(length-len(out))+'e')
+        else:
+            fmt = '{: .%ig}' % (length-1)
+            out = fmt.format(val)[:length]
+            if len(out) < length:
+                pad = '0' if '.' in  out else ' '
+                out += pad*(length-len(out))
+    return out
+
+CORREL_HEAD = '[[Correlations]] (unreported correlations are < % .3f)'
+
+def fit_report(inpars, modelpars=None, show_correl=True, min_correl=0.1,
+               sort_pars=False):
+    """return text of a report for fitted params best-fit values,
+    uncertainties and correlations
+
+    arguments
+    ----------
+       inpars       Parameters from fit or Minizer object returned from a fit.
+       modelpars    Optional Known Model Parameters [None]
+       show_correl  whether to show list of sorted correlations [True]
+       min_correl   smallest correlation absolute value to show [0.1]
+       sort_pars    If True, then fit_report will show parameter names
+                    sorted in alphanumerical order.  If False, then the
+                    parameters will be listed in the order they were added to
+                    the Parameters dictionary. If sort_pars is callable, then
+                    this (one argument) function is used to extract a
+                    comparison key from each list element.
+    """
+    if isinstance(inpars, Parameters):
+        result, params = None, inpars
+    if hasattr(inpars, 'params'):
+        result = inpars
+        params = inpars.params
+
+    if sort_pars:
+        if callable(sort_pars):
+            key = sort_pars
+        else:
+            key = alphanumeric_sort
+        parnames = sorted(params, key=key)
+    else:
+        # dict.keys() returns a KeysView in py3, and they're indexed further
+        # down
+        parnames = list(params.keys())
+
+    buff = []
+    add = buff.append
+    if result is not None:
+        add("[[Fit Statistics]]")
+        add("    # function evals   = %s" % getfloat_attr(result, 'nfev'))
+        add("    # data points      = %s" % getfloat_attr(result, 'ndata'))
+        add("    # variables        = %s" % getfloat_attr(result, 'nvarys'))
+        add("    chi-square         = %s" % getfloat_attr(result, 'chisqr'))
+        add("    reduced chi-square = %s" % getfloat_attr(result, 'redchi'))
+        add("    Akaike info crit   = %s" % getfloat_attr(result, 'aic'))
+        add("    Bayesian info crit = %s" % getfloat_attr(result, 'bic'))
+
+    namelen = max([len(n) for n in parnames])
+    add("[[Variables]]")
+    for name in parnames:
+        par = params[name]
+        space = ' '*(namelen+1-len(name))
+        nout = "%s:%s" % (name, space)
+        inval = '(init= ?)'
+        if par.init_value is not None:
+            inval = '(init=% .7g)' % par.init_value
+        if modelpars is not None and name in modelpars:
+            inval = '%s, model_value =% .7g' % (inval, modelpars[name].value)
+        try:
+            sval = gformat(par.value)
+        except (TypeError, ValueError):
+            sval = 'Non Numeric Value?'
+
+        if par.stderr is not None:
+            serr = gformat(par.stderr, length=9)
+
+            try:
+                spercent = '({:.2%})'.format(abs(par.stderr/par.value))
+            except ZeroDivisionError:
+                spercent = ''
+            sval = '%s +/-%s %s' % (sval, serr, spercent)
+
+        if par.vary:
+            add("    %s %s %s" % (nout, sval, inval))
+        elif par.expr is not None:
+            add("    %s %s  == '%s'" % (nout, sval, par.expr))
+        else:
+            add("    %s % .7g (fixed)" % (nout, par.value))
+
+    if show_correl:
+        add(CORREL_HEAD % min_correl)
+        correls = {}
+        for i, name in enumerate(parnames):
+            par = params[name]
+            if not par.vary:
+                continue
+            if hasattr(par, 'correl') and par.correl is not None:
+                for name2 in parnames[i+1:]:
+                    if (name != name2 and name2 in par.correl and
+                        abs(par.correl[name2]) > min_correl):
+                        correls["%s, %s" % (name, name2)] = par.correl[name2]
+
+        sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
+        sort_correl.reverse()
+        for name, val in sort_correl:
+            lspace = max(1, 25 - len(name))
+            add('    C(%s)%s = % .3f ' % (name, (' '*30)[:lspace], val))
+    return '\n'.join(buff)
+
+
+def report_errors(params, **kws):
+    """print a report for fitted params:  see error_report()"""
+    print(fit_report(params, **kws))
+
+
+def report_fit(params, **kws):
+    """print a report for fitted params:  see error_report()"""
+    print(fit_report(params, **kws))
+
+
+def ci_report(ci, with_offset=True, ndigits=5):
+    """return text of a report for confidence intervals
+
+    Parameters
+    ----------
+    with_offset : bool (default `True`)
+        whether to subtract best value from all other values.
+    ndigits : int (default 5)
+        number of significant digits to show
+
+    Returns
+    -------
+       text of formatted report on confidence intervals.
+    """
+    maxlen = max([len(i) for i in ci])
+    buff = []
+    add = buff.append
+    def convp(x):
+        if abs(x[0]) < 1.e-2:
+            return "_BEST_"
+        return "%.2f%%" % (x[0]*100)
+
+    title_shown = False
+    fmt_best = fmt_diff  = "{0:.%if}" % ndigits
+    if with_offset:
+        fmt_diff = "{0:+.%if}" % ndigits
+    for name, row in ci.items():
+        if not title_shown:
+            add("".join([''.rjust(maxlen+1)]+[i.rjust(ndigits+5)
+                                            for i in map(convp, row)]))
+            title_shown = True
+        thisrow = [" %s:" % name.ljust(maxlen)]
+        offset = 0.0
+        if with_offset:
+            for cval, val in row:
+                if abs(cval) < 1.e-2:
+                    offset = val
+        for cval, val in row:
+            if cval < 1.e-2:
+                sval = fmt_best.format(val)
+            else:
+                sval = fmt_diff.format(val-offset)
+            thisrow.append(sval.rjust(ndigits+5))
+        add("".join(thisrow))
+
+    return '\n'.join(buff)
+
+
+def report_ci(ci):
+    """print a report for confidence intervals"""
+    print(ci_report(ci))
diff --git a/lmfit/ui/__init__.py b/lmfit/ui/__init__.py
index b6e3431..e835e21 100644
--- a/lmfit/ui/__init__.py
+++ b/lmfit/ui/__init__.py
@@ -1,48 +1,48 @@
-# These variables are used at the end of the module to decide
-# which BaseFitter subclass the Fitter will point to.
-import warnings
-
-has_ipython, has_matplotlib = False, False
-
-try:
-    import matplotlib
-except ImportError:
-    pass
-else:
-    has_matplotlib = True
-
-try:
-    import IPython
-except ImportError:
-    warnings.warn("lmfit.Fitter will use basic mode, not IPython: need matplotlib")
-else:
-    _ipy_msg1 = "lmfit.Fitter will use basic mode, not IPython: need IPython2."
-    _ipy_msg2 = "lmfit.Fitter will use basic mode, not IPython: could not get IPython version"
-    _ipy_msg3 = "lmfit.Fitter will use basic mode, not IPython: need ipywidgets."
-    try:
-        major_version = IPython.release.version_info[0]
-        if major_version < 2:
-            warnings.warn(_ipy_msg1)
-        elif major_version > 3:
-            # After IPython 3, widgets were moved to a separate package.
-            # There is a shim to allow the old import, but the package has to be
-            # installed for that to work.
-            try:
-                import ipywidgets
-            except ImportError:
-                warnings.warn(_ipy_msg3)
-        else:
-            # has_ipython = iPython installed and we are in an IPython session.
-            has_ipython = IPython.get_ipython() is not None
-    except Exception as e:
-        warnings.warn(_ipy_msg2)
-
-from .basefitter import BaseFitter
-Fitter = BaseFitter
-if has_matplotlib:
-    from .basefitter import MPLFitter
-    Fitter = MPLFitter
-
-if has_ipython:
-    from .ipy_fitter import NotebookFitter
-    Fitter = NotebookFitter
+# These variables are used at the end of the module to decide
+# which BaseFitter subclass the Fitter will point to.
+import warnings
+
+has_ipython, has_matplotlib = False, False
+
+try:
+    import matplotlib
+except ImportError:
+    pass
+else:
+    has_matplotlib = True
+
+try:
+    import IPython
+except ImportError:
+    warnings.warn("lmfit.Fitter will use basic mode, not IPython: need matplotlib")
+else:
+    _ipy_msg1 = "lmfit.Fitter will use basic mode, not IPython: need IPython2."
+    _ipy_msg2 = "lmfit.Fitter will use basic mode, not IPython: could not get IPython version"
+    _ipy_msg3 = "lmfit.Fitter will use basic mode, not IPython: need ipywidgets."
+    try:
+        major_version = IPython.release.version_info[0]
+        if major_version < 2:
+            warnings.warn(_ipy_msg1)
+        elif major_version > 3:
+            # After IPython 3, widgets were moved to a separate package.
+            # There is a shim to allow the old import, but the package has to be
+            # installed for that to work.
+            try:
+                import ipywidgets
+            except ImportError:
+                warnings.warn(_ipy_msg3)
+        else:
+            # has_ipython = iPython installed and we are in an IPython session.
+            has_ipython = IPython.get_ipython() is not None
+    except Exception as e:
+        warnings.warn(_ipy_msg2)
+
+from .basefitter import BaseFitter
+Fitter = BaseFitter
+if has_matplotlib:
+    from .basefitter import MPLFitter
+    Fitter = MPLFitter
+
+if has_ipython:
+    from .ipy_fitter import NotebookFitter
+    Fitter = NotebookFitter
diff --git a/lmfit/ui/basefitter.py b/lmfit/ui/basefitter.py
index 8b4eace..1f8f815 100644
--- a/lmfit/ui/basefitter.py
+++ b/lmfit/ui/basefitter.py
@@ -1,320 +1,320 @@
-import warnings
-import numpy as np
-
-from ..model import Model
-from ..models import ExponentialModel  # arbitrary default
-from ..asteval import Interpreter
-from ..astutils import NameFinder
-from ..parameter import check_ast_errors
-
-
-_COMMON_DOC = """
-    This an interactive container for fitting models to particular data.
-
-    It maintains the attributes `current_params` and `current_result`. When
-    its fit() method is called, the best fit becomes the new `current_params`.
-    The most basic usage is iteratively fitting data, taking advantage of
-    this stateful memory that keep the parameters between each fit.
-"""
-
-_COMMON_EXAMPLES_DOC = """
-
-    Examples
-    --------
-    >>> fitter = Fitter(data, model=SomeModel, x=x)
-
-    >>> fitter.model
-    # This property can be changed, to try different models on the same
-    # data with the same independent vars.
-    # (This is especially handy in the notebook.)
-
-    >>> fitter.current_params
-    # This copy of the model's Parameters is updated after each fit.
-
-    >>> fitter.fit()
-    # Perform a fit using fitter.current_params as a guess.
-    # Optionally, pass a params argument or individual keyword arguments
-    # to override current_params.
-
-    >>> fitter.current_result
-    # This is the result of the latest fit. It contain the usual
-    # copies of the Parameters, in the attributes params and init_params.
-
-    >>> fitter.data = new_data
-    # If this property is updated, the `current_params` are retained an used
-    # as an initial guess if fit() is called again.
-    """
-
-
-class BaseFitter(object):
-    __doc__ = _COMMON_DOC + """
-
-    Parameters
-    ----------
-    data : array-like
-    model : lmfit.Model
-        optional initial Model to use, maybe be set or changed later
-    """ + _COMMON_EXAMPLES_DOC
-    def __init__(self, data, model=None, **kwargs):
-        self._data = data
-        self.kwargs = kwargs
-
-        # GUI-based subclasses need a default value for the menu of models,
-        # and so an arbitrary default is applied here, for uniformity
-        # among the subclasses.
-        if model is None:
-            model = ExponentialModel
-        self.model = model
-
-    def _on_model_value_change(self, name, value):
-        self.model = value
-
-    def _on_fit_button_click(self, b):
-        self.fit()
-
-    def _on_guess_button_click(self, b):
-        self.guess()
-
-    @property
-    def data(self):
-        return self._data
-
-    @data.setter
-    def data(self, value):
-        self._data = value
-
-    @property
-    def model(self):
-        return self._model
-
-    @model.setter
-    def model(self, value):
-        if callable(value):
-            model = value()
-        else:
-            model = value
-        self._model = model
-        self.current_result = None
-        self._current_params = model.make_params()
-
-        # Use these to evaluate any Parameters that use expressions.
-        self.asteval = Interpreter()
-        self.namefinder = NameFinder()
-
-        self._finalize_model(value)
-
-        self.guess()
-
-    def _finalize_model(self, value):
-        # subclasses optionally override to update display here
-        pass
-
-    @property
-    def current_params(self):
-        """Each time fit() is called, these will be updated to reflect
-        the latest best params. They will be used as the initial guess
-        for the next fit, unless overridden by arguments to fit()."""
-        return self._current_params
-
-    @current_params.setter
-    def current_params(self, new_params):
-        # Copy contents, but retain original params objects.
-        for name, par in new_params.items():
-            self._current_params[name].value = par.value
-            self._current_params[name].expr = par.expr
-            self._current_params[name].vary = par.vary
-            self._current_params[name].min = par.min
-            self._current_params[name].max = par.max
-
-        # Compute values for expression-based Parameters.
-        self.__assign_deps(self._current_params)
-        for _, par in self._current_params.items():
-            if par.value is None:
-                self.__update_paramval(self._current_params, par.name)
-
-        self._finalize_params()
-
-    def _finalize_params(self):
-        # subclasses can override this to pass params to display
-        pass
-
-    def guess(self):
-        count_indep_vars = len(self.model.independent_vars)
-        guessing_successful = True
-        try:
-            if count_indep_vars == 0:
-                guess = self.model.guess(self._data)
-            elif count_indep_vars == 1:
-                key = self.model.independent_vars[0]
-                val = self.kwargs[key]
-                d = {key: val}
-                guess = self.model.guess(self._data, **d)
-        except NotImplementedError:
-            guessing_successful = False
-        self.current_params = guess
-        return guessing_successful
-
-    def __assign_deps(self, params):
-        # N.B. This does not use self.current_params but rather
-        # new Parameters that are being built by self.guess().
-        for name, par in params.items():
-            if par.expr is not None:
-                par.ast = self.asteval.parse(par.expr)
-                check_ast_errors(self.asteval.error)
-                par.deps = []
-                self.namefinder.names = []
-                self.namefinder.generic_visit(par.ast)
-                for symname in self.namefinder.names:
-                    if (symname in self.current_params and
-                        symname not in par.deps):
-                        par.deps.append(symname)
-                self.asteval.symtable[name] = par.value
-                if par.name is None:
-                    par.name = name
-
-    def __update_paramval(self, params, name):
-        # N.B. This does not use self.current_params but rather
-        # new Parameters that are being built by self.guess().
-        par = params[name]
-        if getattr(par, 'expr', None) is not None:
-            if getattr(par, 'ast', None) is None:
-                par.ast = self.asteval.parse(par.expr)
-            if par.deps is not None:
-                for dep in par.deps:
-                    self.__update_paramval(params, dep)
-            par.value = self.asteval.run(par.ast)
-            out = check_ast_errors(self.asteval.error)
-            if out is not None:
-                self.asteval.raise_exception(None)
-        self.asteval.symtable[name] = par.value
-
-    def fit(self, *args, **kwargs):
-        "Use current_params unless overridden by arguments passed here."
-        guess = dict(self.current_params)
-        guess.update(self.kwargs)  # from __init__, e.g. x=x
-        guess.update(kwargs)
-        self.current_result = self.model.fit(self._data, *args, **guess)
-        self.current_params = self.current_result.params
-
-
-class MPLFitter(BaseFitter):
-    # This is a small elaboration on BaseModel; it adds a plot()
-    # method that depends on matplotlib. It adds several plot-
-    # styling arguments to the signature.
-    __doc__ = _COMMON_DOC + """
-
-    Parameters
-    ----------
-    data : array-like
-    model : lmfit.Model
-        optional initial Model to use, maybe be set or changed later
-
-    Additional Parameters
-    ---------------------
-    axes_style : dictionary representing style keyword arguments to be
-        passed through to `Axes.set(...)`
-    data_style : dictionary representing style keyword arguments to be passed
-        through to the matplotlib `plot()` command the plots the data points
-    init_style : dictionary representing style keyword arguments to be passed
-        through to the matplotlib `plot()` command the plots the initial fit
-        line
-    best_style : dictionary representing style keyword arguments to be passed
-        through to the matplotlib `plot()` command the plots the best fit
-        line
-    **kwargs : independent variables or extra arguments, passed like `x=x`
-        """ + _COMMON_EXAMPLES_DOC
-    def __init__(self, data, model=None, axes_style={},
-                data_style={}, init_style={}, best_style={}, **kwargs):
-        self.axes_style = axes_style
-        self.data_style = data_style
-        self.init_style = init_style
-        self.best_style = best_style
-        super(MPLFitter, self).__init__(data, model, **kwargs)
-
-    def plot(self, axes_style={}, data_style={}, init_style={}, best_style={},
-             ax=None):
-        """Plot data, initial guess fit, and best fit.
-
-    Optional style arguments pass keyword dictionaries through to their
-    respective components of the matplotlib plot.
-
-    Precedence is:
-    1. arguments passed to this function, plot()
-    2. arguments passed to the Fitter when it was first declared
-    3. hard-coded defaults
-
-    Parameters
-    ---------------------
-    axes_style : dictionary representing style keyword arguments to be
-        passed through to `Axes.set(...)`
-    data_style : dictionary representing style keyword arguments to be passed
-        through to the matplotlib `plot()` command the plots the data points
-    init_style : dictionary representing style keyword arguments to be passed
-        through to the matplotlib `plot()` command the plots the initial fit
-        line
-    best_style : dictionary representing style keyword arguments to be passed
-        through to the matplotlib `plot()` command the plots the best fit
-        line
-    ax : matplotlib.Axes
-            optional `Axes` object. Axes will be generated if not provided.
-        """
-        try:
-            import matplotlib.pyplot as plt
-        except ImportError:
-            raise ImportError("Matplotlib is required to use this Fitter. "
-                              "Use BaseFitter or a subclass thereof "
-                              "that does not depend on matplotlib.")
-
-        # Configure style
-        _axes_style= dict()  # none, but this is here for possible future use
-        _axes_style.update(self.axes_style)
-        _axes_style.update(axes_style)
-        _data_style= dict(color='blue', marker='o', linestyle='none')
-        _data_style.update(**_normalize_kwargs(self.data_style, 'line2d'))
-        _data_style.update(**_normalize_kwargs(data_style, 'line2d'))
-        _init_style = dict(color='gray')
-        _init_style.update(**_normalize_kwargs(self.init_style, 'line2d'))
-        _init_style.update(**_normalize_kwargs(init_style, 'line2d'))
-        _best_style= dict(color='red')
-        _best_style.update(**_normalize_kwargs(self.best_style, 'line2d'))
-        _best_style.update(**_normalize_kwargs(best_style, 'line2d'))
-
-        if ax is None:
-            fig, ax = plt.subplots()
-        count_indep_vars = len(self.model.independent_vars)
-        if count_indep_vars == 0:
-            ax.plot(self._data, **_data_style)
-        elif count_indep_vars == 1:
-            indep_var = self.kwargs[self.model.independent_vars[0]]
-            ax.plot(indep_var, self._data, **_data_style)
-        else:
-            raise NotImplementedError("Cannot plot models with more than one "
-                                      "indepedent variable.")
-        result = self.current_result  # alias for brevity
-        if not result:
-            ax.set(**_axes_style)
-            return  # short-circuit the rest of the plotting
-        if count_indep_vars == 0:
-            ax.plot(result.init_fit, **_init_style)
-            ax.plot(result.best_fit, **_best_style)
-        elif count_indep_vars == 1:
-            ax.plot(indep_var, result.init_fit, **_init_style)
-            ax.plot(indep_var, result.best_fit, **_best_style)
-        ax.set(**_axes_style)
-
-
-def _normalize_kwargs(kwargs, kind='patch'):
-    """Convert matplotlib keywords from short to long form."""
-    # Source:
-    # github.com/tritemio/FRETBursts/blob/fit_experim/fretbursts/burst_plot.py
-    if kind == 'line2d':
-        long_names = dict(c='color', ls='linestyle', lw='linewidth',
-                          mec='markeredgecolor', mew='markeredgewidth',
-                          mfc='markerfacecolor', ms='markersize',)
-    elif kind == 'patch':
-        long_names = dict(c='color', ls='linestyle', lw='linewidth',
-                          ec='edgecolor', fc='facecolor',)
-    for short_name in long_names:
-        if short_name in kwargs:
-            kwargs[long_names[short_name]] = kwargs.pop(short_name)
-    return kwargs
+import warnings
+import numpy as np
+
+from ..model import Model
+from ..models import ExponentialModel  # arbitrary default
+from ..asteval import Interpreter
+from ..astutils import NameFinder
+from ..parameter import check_ast_errors
+
+
+_COMMON_DOC = """
+    This an interactive container for fitting models to particular data.
+
+    It maintains the attributes `current_params` and `current_result`. When
+    its fit() method is called, the best fit becomes the new `current_params`.
+    The most basic usage is iteratively fitting data, taking advantage of
+    this stateful memory that keep the parameters between each fit.
+"""
+
+_COMMON_EXAMPLES_DOC = """
+
+    Examples
+    --------
+    >>> fitter = Fitter(data, model=SomeModel, x=x)
+
+    >>> fitter.model
+    # This property can be changed, to try different models on the same
+    # data with the same independent vars.
+    # (This is especially handy in the notebook.)
+
+    >>> fitter.current_params
+    # This copy of the model's Parameters is updated after each fit.
+
+    >>> fitter.fit()
+    # Perform a fit using fitter.current_params as a guess.
+    # Optionally, pass a params argument or individual keyword arguments
+    # to override current_params.
+
+    >>> fitter.current_result
+    # This is the result of the latest fit. It contain the usual
+    # copies of the Parameters, in the attributes params and init_params.
+
+    >>> fitter.data = new_data
+    # If this property is updated, the `current_params` are retained an used
+    # as an initial guess if fit() is called again.
+    """
+
+
+class BaseFitter(object):
+    __doc__ = _COMMON_DOC + """
+
+    Parameters
+    ----------
+    data : array-like
+    model : lmfit.Model
+        optional initial Model to use, maybe be set or changed later
+    """ + _COMMON_EXAMPLES_DOC
+    def __init__(self, data, model=None, **kwargs):
+        self._data = data
+        self.kwargs = kwargs
+
+        # GUI-based subclasses need a default value for the menu of models,
+        # and so an arbitrary default is applied here, for uniformity
+        # among the subclasses.
+        if model is None:
+            model = ExponentialModel
+        self.model = model
+
+    def _on_model_value_change(self, name, value):
+        self.model = value
+
+    def _on_fit_button_click(self, b):
+        self.fit()
+
+    def _on_guess_button_click(self, b):
+        self.guess()
+
+    @property
+    def data(self):
+        return self._data
+
+    @data.setter
+    def data(self, value):
+        self._data = value
+
+    @property
+    def model(self):
+        return self._model
+
+    @model.setter
+    def model(self, value):
+        if callable(value):
+            model = value()
+        else:
+            model = value
+        self._model = model
+        self.current_result = None
+        self._current_params = model.make_params()
+
+        # Use these to evaluate any Parameters that use expressions.
+        self.asteval = Interpreter()
+        self.namefinder = NameFinder()
+
+        self._finalize_model(value)
+
+        self.guess()
+
+    def _finalize_model(self, value):
+        # subclasses optionally override to update display here
+        pass
+
+    @property
+    def current_params(self):
+        """Each time fit() is called, these will be updated to reflect
+        the latest best params. They will be used as the initial guess
+        for the next fit, unless overridden by arguments to fit()."""
+        return self._current_params
+
+    @current_params.setter
+    def current_params(self, new_params):
+        # Copy contents, but retain original params objects.
+        for name, par in new_params.items():
+            self._current_params[name].value = par.value
+            self._current_params[name].expr = par.expr
+            self._current_params[name].vary = par.vary
+            self._current_params[name].min = par.min
+            self._current_params[name].max = par.max
+
+        # Compute values for expression-based Parameters.
+        self.__assign_deps(self._current_params)
+        for _, par in self._current_params.items():
+            if par.value is None:
+                self.__update_paramval(self._current_params, par.name)
+
+        self._finalize_params()
+
+    def _finalize_params(self):
+        # subclasses can override this to pass params to display
+        pass
+
+    def guess(self):
+        count_indep_vars = len(self.model.independent_vars)
+        guessing_successful = True
+        try:
+            if count_indep_vars == 0:
+                guess = self.model.guess(self._data)
+            elif count_indep_vars == 1:
+                key = self.model.independent_vars[0]
+                val = self.kwargs[key]
+                d = {key: val}
+                guess = self.model.guess(self._data, **d)
+        except NotImplementedError:
+            guessing_successful = False
+        self.current_params = guess
+        return guessing_successful
+
+    def __assign_deps(self, params):
+        # N.B. This does not use self.current_params but rather
+        # new Parameters that are being built by self.guess().
+        for name, par in params.items():
+            if par.expr is not None:
+                par.ast = self.asteval.parse(par.expr)
+                check_ast_errors(self.asteval.error)
+                par.deps = []
+                self.namefinder.names = []
+                self.namefinder.generic_visit(par.ast)
+                for symname in self.namefinder.names:
+                    if (symname in self.current_params and
+                        symname not in par.deps):
+                        par.deps.append(symname)
+                self.asteval.symtable[name] = par.value
+                if par.name is None:
+                    par.name = name
+
+    def __update_paramval(self, params, name):
+        # N.B. This does not use self.current_params but rather
+        # new Parameters that are being built by self.guess().
+        par = params[name]
+        if getattr(par, 'expr', None) is not None:
+            if getattr(par, 'ast', None) is None:
+                par.ast = self.asteval.parse(par.expr)
+            if par.deps is not None:
+                for dep in par.deps:
+                    self.__update_paramval(params, dep)
+            par.value = self.asteval.run(par.ast)
+            out = check_ast_errors(self.asteval.error)
+            if out is not None:
+                self.asteval.raise_exception(None)
+        self.asteval.symtable[name] = par.value
+
+    def fit(self, *args, **kwargs):
+        "Use current_params unless overridden by arguments passed here."
+        guess = dict(self.current_params)
+        guess.update(self.kwargs)  # from __init__, e.g. x=x
+        guess.update(kwargs)
+        self.current_result = self.model.fit(self._data, *args, **guess)
+        self.current_params = self.current_result.params
+
+
+class MPLFitter(BaseFitter):
+    # This is a small elaboration on BaseModel; it adds a plot()
+    # method that depends on matplotlib. It adds several plot-
+    # styling arguments to the signature.
+    __doc__ = _COMMON_DOC + """
+
+    Parameters
+    ----------
+    data : array-like
+    model : lmfit.Model
+        optional initial Model to use, maybe be set or changed later
+
+    Additional Parameters
+    ---------------------
+    axes_style : dictionary representing style keyword arguments to be
+        passed through to `Axes.set(...)`
+    data_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the data points
+    init_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the initial fit
+        line
+    best_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the best fit
+        line
+    **kwargs : independent variables or extra arguments, passed like `x=x`
+        """ + _COMMON_EXAMPLES_DOC
+    def __init__(self, data, model=None, axes_style={},
+                data_style={}, init_style={}, best_style={}, **kwargs):
+        self.axes_style = axes_style
+        self.data_style = data_style
+        self.init_style = init_style
+        self.best_style = best_style
+        super(MPLFitter, self).__init__(data, model, **kwargs)
+
+    def plot(self, axes_style={}, data_style={}, init_style={}, best_style={},
+             ax=None):
+        """Plot data, initial guess fit, and best fit.
+
+    Optional style arguments pass keyword dictionaries through to their
+    respective components of the matplotlib plot.
+
+    Precedence is:
+    1. arguments passed to this function, plot()
+    2. arguments passed to the Fitter when it was first declared
+    3. hard-coded defaults
+
+    Parameters
+    ---------------------
+    axes_style : dictionary representing style keyword arguments to be
+        passed through to `Axes.set(...)`
+    data_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the data points
+    init_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the initial fit
+        line
+    best_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the best fit
+        line
+    ax : matplotlib.Axes
+            optional `Axes` object. Axes will be generated if not provided.
+        """
+        try:
+            import matplotlib.pyplot as plt
+        except ImportError:
+            raise ImportError("Matplotlib is required to use this Fitter. "
+                              "Use BaseFitter or a subclass thereof "
+                              "that does not depend on matplotlib.")
+
+        # Configure style
+        _axes_style= dict()  # none, but this is here for possible future use
+        _axes_style.update(self.axes_style)
+        _axes_style.update(axes_style)
+        _data_style= dict(color='blue', marker='o', linestyle='none')
+        _data_style.update(**_normalize_kwargs(self.data_style, 'line2d'))
+        _data_style.update(**_normalize_kwargs(data_style, 'line2d'))
+        _init_style = dict(color='gray')
+        _init_style.update(**_normalize_kwargs(self.init_style, 'line2d'))
+        _init_style.update(**_normalize_kwargs(init_style, 'line2d'))
+        _best_style= dict(color='red')
+        _best_style.update(**_normalize_kwargs(self.best_style, 'line2d'))
+        _best_style.update(**_normalize_kwargs(best_style, 'line2d'))
+
+        if ax is None:
+            fig, ax = plt.subplots()
+        count_indep_vars = len(self.model.independent_vars)
+        if count_indep_vars == 0:
+            ax.plot(self._data, **_data_style)
+        elif count_indep_vars == 1:
+            indep_var = self.kwargs[self.model.independent_vars[0]]
+            ax.plot(indep_var, self._data, **_data_style)
+        else:
+            raise NotImplementedError("Cannot plot models with more than one "
+                                      "indepedent variable.")
+        result = self.current_result  # alias for brevity
+        if not result:
+            ax.set(**_axes_style)
+            return  # short-circuit the rest of the plotting
+        if count_indep_vars == 0:
+            ax.plot(result.init_fit, **_init_style)
+            ax.plot(result.best_fit, **_best_style)
+        elif count_indep_vars == 1:
+            ax.plot(indep_var, result.init_fit, **_init_style)
+            ax.plot(indep_var, result.best_fit, **_best_style)
+        ax.set(**_axes_style)
+
+
+def _normalize_kwargs(kwargs, kind='patch'):
+    """Convert matplotlib keywords from short to long form."""
+    # Source:
+    # github.com/tritemio/FRETBursts/blob/fit_experim/fretbursts/burst_plot.py
+    if kind == 'line2d':
+        long_names = dict(c='color', ls='linestyle', lw='linewidth',
+                          mec='markeredgecolor', mew='markeredgewidth',
+                          mfc='markerfacecolor', ms='markersize',)
+    elif kind == 'patch':
+        long_names = dict(c='color', ls='linestyle', lw='linewidth',
+                          ec='edgecolor', fc='facecolor',)
+    for short_name in long_names:
+        if short_name in kwargs:
+            kwargs[long_names[short_name]] = kwargs.pop(short_name)
+    return kwargs
diff --git a/lmfit/ui/ipy_fitter.py b/lmfit/ui/ipy_fitter.py
index 29c9446..80edda5 100644
--- a/lmfit/ui/ipy_fitter.py
+++ b/lmfit/ui/ipy_fitter.py
@@ -1,282 +1,282 @@
-import warnings
-import numpy as np
-
-from ..model import Model
-
-from .basefitter import MPLFitter, _COMMON_DOC, _COMMON_EXAMPLES_DOC
-
-# Note: If IPython is not available of the version is < 2,
-# this module will not be imported, and a different Fitter.
-
-import IPython
-from IPython.display import display, clear_output
-# Widgets were only experimental in IPython 2.x, but this does work there.
-# Handle the change in naming from 2.x to 3.x.
-IPY2 = IPython.release.version_info[0] == 2
-IPY3 = IPython.release.version_info[0] == 3
-if IPY2:
-    from IPython.html.widgets import DropdownWidget as Dropdown
-    from IPython.html.widgets import ButtonWidget as Button
-    from IPython.html.widgets import ContainerWidget
-    from IPython.html.widgets import FloatTextWidget as FloatText
-    from IPython.html.widgets import CheckboxWidget as Checkbox
-    class HBox(ContainerWidget):
-        def __init__(self, *args, **kwargs):
-           self.add_class('hbox')
-           super(self, ContainerWidget).__init__(*args, **kwargs)
-elif IPY3:
-    # as of IPython 3.x:
-    from IPython.html.widgets import Dropdown
-    from IPython.html.widgets import Button
-    from IPython.html.widgets import HBox
-    from IPython.html.widgets import FloatText
-    from IPython.html.widgets import Checkbox
-else:
-    # as of IPython 4.x+:
-    from ipywidgets import Dropdown
-    from ipywidgets import Button
-    from ipywidgets import HBox
-    from ipywidgets import FloatText
-    from ipywidgets import Checkbox
-
-
-class ParameterWidgetGroup(object):
-    """Construct several widgets that together represent a Parameter.
-
-    This will only be used if IPython is available."""
-    def __init__(self, par):
-        self.par = par
-
-        # Define widgets.
-        self.value_text = FloatText(description=par.name,
-                                    min=self.par.min, max=self.par.max)
-        self.value_text.width = 100
-        self.min_text = FloatText(description='min', max=self.par.max)
-        self.min_text.width = 100
-        self.max_text = FloatText(description='max', min=self.par.min)
-        self.max_text.width = 100
-        self.min_checkbox = Checkbox(description='min')
-        self.max_checkbox = Checkbox(description='max')
-        self.vary_checkbox = Checkbox(description='vary')
-
-        # Set widget values and visibility.
-        if par.value is not None:
-            self.value_text.value = self.par.value
-        min_unset = self.par.min is None or self.par.min == -np.inf
-        max_unset = self.par.max is None or self.par.max == np.inf
-        self.min_checkbox.value = not min_unset
-        self.min_text.visible = not min_unset
-        self.min_text.value = self.par.min
-        self.max_checkbox.value = not max_unset
-        self.max_text.visible = not max_unset
-        self.max_text.value = self.par.max
-        self.vary_checkbox.value = self.par.vary
-
-        # Configure widgets to sync with par attributes.
-        self.value_text.on_trait_change(self._on_value_change, 'value')
-        self.min_text.on_trait_change(self._on_min_value_change, 'value')
-        self.max_text.on_trait_change(self._on_max_value_change, 'value')
-        self.min_checkbox.on_trait_change(self._on_min_checkbox_change,
-                                          'value')
-        self.max_checkbox.on_trait_change(self._on_max_checkbox_change,
-                                          'value')
-        self.vary_checkbox.on_trait_change(self._on_vary_change, 'value')
-
-    def _on_value_change(self, name, value):
-        self.par.value = value
-
-    def _on_min_checkbox_change(self, name, value):
-        self.min_text.visible = value
-        if value:
-            # -np.inf does not play well with a numerical text field,
-            # so set min to -1 if activated (and back to -inf if deactivated).
-            self.min_text.value = -1
-            self.par.min = self.min_text.value
-            self.value_text.min = self.min_text.value
-        else:
-            self.par.min = None
-
-    def _on_max_checkbox_change(self, name, value):
-        self.max_text.visible = value
-        if value:
-            # np.inf does not play well with a numerical text field,
-            # so set max to 1 if activated (and back to inf if deactivated).
-            self.max_text.value = 1
-            self.par.max = self.max_text.value
-            self.value_text.max = self.max_text.value
-        else:
-            self.par.max = None
-
-    def _on_min_value_change(self, name, value):
-        self.par.min = value
-        self.value_text.min = value
-        self.max_text.min = value
-
-    def _on_max_value_change(self, name, value):
-        self.par.max = value
-        self.value_text.max = value
-        self.min_text.max = value
-
-    def _on_vary_change(self, name, value):
-        self.par.vary = value
-        # self.value_text.disabled = not value
-
-    def close(self):
-        # one convenience method to close (i.e., hide and disconnect) all
-        # widgets in this group
-        self.value_text.close()
-        self.min_text.close()
-        self.max_text.close()
-        self.vary_checkbox.close()
-        self.min_checkbox.close()
-        self.max_checkbox.close()
-
-    def _repr_html_(self):
-        box = HBox()
-        box.children = [self.value_text, self.vary_checkbox,
-                        self.min_checkbox, self.min_text,
-                        self.max_checkbox, self.max_text]
-        display(box)
-
-    # Make it easy to set the widget attributes directly.
-    @property
-    def value(self):
-        return self.value_text.value
-
-    @value.setter
-    def value(self, value):
-        self.value_text.value = value
-
-    @property
-    def vary(self):
-        return self.vary_checkbox.value
-
-    @vary.setter
-    def vary(self, value):
-        self.vary_checkbox.value = value
-
-    @property
-    def min(self):
-        return self.min_text.value
-
-    @min.setter
-    def min(self, value):
-        self.min_text.value = value
-
-    @property
-    def max(self):
-        return self.max_text.value
-
-    @max.setter
-    def max(self, value):
-        self.max_text.value = value
-
-    @property
-    def name(self):
-       return self.par.name
-
-
-class NotebookFitter(MPLFitter):
-    __doc__ = _COMMON_DOC + """
-    If IPython is available, it uses the IPython notebook's rich display
-    to fit data interactively in a web-based GUI. The Parameters are
-    represented in a web-based form that is kept in sync with `current_params`.
-    All subclasses to Model, including user-defined ones, are shown in a
-    drop-down menu.
-
-    Clicking the "Fit" button updates a plot, as above, and updates the
-    Parameters in the form to reflect the best fit.
-
-    Parameters
-    ----------
-    data : array-like
-    model : lmfit.Model
-        optional initial Model to use, maybe be set or changed later
-    all_models : list
-        optional list of Models to populate drop-down menu, by default
-        all built-in and user-defined subclasses of Model are used
-
-    Additional Parameters
-    ---------------------
-    axes_style : dictionary representing style keyword arguments to be
-        passed through to `Axes.set(...)`
-    data_style : dictionary representing style keyword arguments to be passed
-        through to the matplotlib `plot()` command the plots the data points
-    init_style : dictionary representing style keyword arguments to be passed
-        through to the matplotlib `plot()` command the plots the initial fit
-        line
-    best_style : dictionary representing style keyword arguments to be passed
-        through to the matplotlib `plot()` command the plots the best fit
-        line
-    **kwargs : independent variables or extra arguments, passed like `x=x`
-    """ + _COMMON_EXAMPLES_DOC
-    def __init__(self, data, model=None, all_models=None, axes_style={},
-                data_style={}, init_style={}, best_style={}, **kwargs):
-        # Dropdown menu of all subclasses of Model, incl. user-defined.
-        self.models_menu = Dropdown()
-        # Dropbox API is very different between IPy 2.x and 3.x.
-        if IPY2:
-            if all_models is None:
-                all_models = dict([(m.__name__, m) for m in Model.__subclasses__()])
-            self.models_menu.values = all_models
-        else:
-            if all_models is None:
-                all_models = [(m.__name__, m) for m in Model.__subclasses__()]
-            self.models_menu.options = all_models
-        self.models_menu.on_trait_change(self._on_model_value_change,
-                                             'value')
-        # Button to trigger fitting.
-        self.fit_button = Button(description='Fit')
-        self.fit_button.on_click(self._on_fit_button_click)
-
-        # Button to trigger guessing.
-        self.guess_button = Button(description='Auto-Guess')
-        self.guess_button.on_click(self._on_guess_button_click)
-
-        # Parameter widgets are not built here. They are (re-)built when
-        # the model is (re-)set.
-        super(NotebookFitter, self).__init__(data, model, axes_style,
-                                             data_style, init_style,
-                                             best_style, **kwargs)
-
-    def _repr_html_(self):
-        display(self.models_menu)
-        button_box = HBox()
-        button_box.children = [self.fit_button, self.guess_button]
-        display(button_box)
-        for pw in self.param_widgets:
-            display(pw)
-        self.plot()
-
-    def guess(self):
-        guessing_successful = super(NotebookFitter, self).guess()
-        self.guess_button.disabled = not guessing_successful
-
-    def _finalize_model(self, value):
-        first_run = not hasattr(self, 'param_widgets')
-        if not first_run:
-            # Remove all Parameter widgets, and replace them with widgets
-            # for the new model.
-            for pw in self.param_widgets:
-                pw.close()
-        self.models_menu.value = value
-        self.param_widgets = [ParameterWidgetGroup(p)
-                              for _, p in self._current_params.items()]
-        if not first_run:
-            for pw in self.param_widgets:
-                display(pw)
-
-    def _finalize_params(self):
-        for pw in self.param_widgets:
-            pw.value = self._current_params[pw.name].value
-            pw.min = self._current_params[pw.name].min
-            pw.max = self._current_params[pw.name].max
-            pw.vary = self._current_params[pw.name].vary
-
-    def plot(self):
-        clear_output(wait=True)
-        super(NotebookFitter, self).plot()
-
-    def fit(self):
-        super(NotebookFitter, self).fit()
-        self.plot()
+import warnings
+import numpy as np
+
+from ..model import Model
+
+from .basefitter import MPLFitter, _COMMON_DOC, _COMMON_EXAMPLES_DOC
+
+# Note: If IPython is not available of the version is < 2,
+# this module will not be imported, and a different Fitter.
+
+import IPython
+from IPython.display import display, clear_output
+# Widgets were only experimental in IPython 2.x, but this does work there.
+# Handle the change in naming from 2.x to 3.x.
+IPY2 = IPython.release.version_info[0] == 2
+IPY3 = IPython.release.version_info[0] == 3
+if IPY2:
+    from IPython.html.widgets import DropdownWidget as Dropdown
+    from IPython.html.widgets import ButtonWidget as Button
+    from IPython.html.widgets import ContainerWidget
+    from IPython.html.widgets import FloatTextWidget as FloatText
+    from IPython.html.widgets import CheckboxWidget as Checkbox
+    class HBox(ContainerWidget):
+        def __init__(self, *args, **kwargs):
+           self.add_class('hbox')
+           super(self, ContainerWidget).__init__(*args, **kwargs)
+elif IPY3:
+    # as of IPython 3.x:
+    from IPython.html.widgets import Dropdown
+    from IPython.html.widgets import Button
+    from IPython.html.widgets import HBox
+    from IPython.html.widgets import FloatText
+    from IPython.html.widgets import Checkbox
+else:
+    # as of IPython 4.x+:
+    from ipywidgets import Dropdown
+    from ipywidgets import Button
+    from ipywidgets import HBox
+    from ipywidgets import FloatText
+    from ipywidgets import Checkbox
+
+
+class ParameterWidgetGroup(object):
+    """Construct several widgets that together represent a Parameter.
+
+    This will only be used if IPython is available."""
+    def __init__(self, par):
+        self.par = par
+
+        # Define widgets.
+        self.value_text = FloatText(description=par.name,
+                                    min=self.par.min, max=self.par.max)
+        self.value_text.width = 100
+        self.min_text = FloatText(description='min', max=self.par.max)
+        self.min_text.width = 100
+        self.max_text = FloatText(description='max', min=self.par.min)
+        self.max_text.width = 100
+        self.min_checkbox = Checkbox(description='min')
+        self.max_checkbox = Checkbox(description='max')
+        self.vary_checkbox = Checkbox(description='vary')
+
+        # Set widget values and visibility.
+        if par.value is not None:
+            self.value_text.value = self.par.value
+        min_unset = self.par.min is None or self.par.min == -np.inf
+        max_unset = self.par.max is None or self.par.max == np.inf
+        self.min_checkbox.value = not min_unset
+        self.min_text.visible = not min_unset
+        self.min_text.value = self.par.min
+        self.max_checkbox.value = not max_unset
+        self.max_text.visible = not max_unset
+        self.max_text.value = self.par.max
+        self.vary_checkbox.value = self.par.vary
+
+        # Configure widgets to sync with par attributes.
+        self.value_text.on_trait_change(self._on_value_change, 'value')
+        self.min_text.on_trait_change(self._on_min_value_change, 'value')
+        self.max_text.on_trait_change(self._on_max_value_change, 'value')
+        self.min_checkbox.on_trait_change(self._on_min_checkbox_change,
+                                          'value')
+        self.max_checkbox.on_trait_change(self._on_max_checkbox_change,
+                                          'value')
+        self.vary_checkbox.on_trait_change(self._on_vary_change, 'value')
+
+    def _on_value_change(self, name, value):
+        self.par.value = value
+
+    def _on_min_checkbox_change(self, name, value):
+        self.min_text.visible = value
+        if value:
+            # -np.inf does not play well with a numerical text field,
+            # so set min to -1 if activated (and back to -inf if deactivated).
+            self.min_text.value = -1
+            self.par.min = self.min_text.value
+            self.value_text.min = self.min_text.value
+        else:
+            self.par.min = None
+
+    def _on_max_checkbox_change(self, name, value):
+        self.max_text.visible = value
+        if value:
+            # np.inf does not play well with a numerical text field,
+            # so set max to 1 if activated (and back to inf if deactivated).
+            self.max_text.value = 1
+            self.par.max = self.max_text.value
+            self.value_text.max = self.max_text.value
+        else:
+            self.par.max = None
+
+    def _on_min_value_change(self, name, value):
+        self.par.min = value
+        self.value_text.min = value
+        self.max_text.min = value
+
+    def _on_max_value_change(self, name, value):
+        self.par.max = value
+        self.value_text.max = value
+        self.min_text.max = value
+
+    def _on_vary_change(self, name, value):
+        self.par.vary = value
+        # self.value_text.disabled = not value
+
+    def close(self):
+        # one convenience method to close (i.e., hide and disconnect) all
+        # widgets in this group
+        self.value_text.close()
+        self.min_text.close()
+        self.max_text.close()
+        self.vary_checkbox.close()
+        self.min_checkbox.close()
+        self.max_checkbox.close()
+
+    def _repr_html_(self):
+        box = HBox()
+        box.children = [self.value_text, self.vary_checkbox,
+                        self.min_checkbox, self.min_text,
+                        self.max_checkbox, self.max_text]
+        display(box)
+
+    # Make it easy to set the widget attributes directly.
+    @property
+    def value(self):
+        return self.value_text.value
+
+    @value.setter
+    def value(self, value):
+        self.value_text.value = value
+
+    @property
+    def vary(self):
+        return self.vary_checkbox.value
+
+    @vary.setter
+    def vary(self, value):
+        self.vary_checkbox.value = value
+
+    @property
+    def min(self):
+        return self.min_text.value
+
+    @min.setter
+    def min(self, value):
+        self.min_text.value = value
+
+    @property
+    def max(self):
+        return self.max_text.value
+
+    @max.setter
+    def max(self, value):
+        self.max_text.value = value
+
+    @property
+    def name(self):
+       return self.par.name
+
+
+class NotebookFitter(MPLFitter):
+    __doc__ = _COMMON_DOC + """
+    If IPython is available, it uses the IPython notebook's rich display
+    to fit data interactively in a web-based GUI. The Parameters are
+    represented in a web-based form that is kept in sync with `current_params`.
+    All subclasses to Model, including user-defined ones, are shown in a
+    drop-down menu.
+
+    Clicking the "Fit" button updates a plot, as above, and updates the
+    Parameters in the form to reflect the best fit.
+
+    Parameters
+    ----------
+    data : array-like
+    model : lmfit.Model
+        optional initial Model to use, maybe be set or changed later
+    all_models : list
+        optional list of Models to populate drop-down menu, by default
+        all built-in and user-defined subclasses of Model are used
+
+    Additional Parameters
+    ---------------------
+    axes_style : dictionary representing style keyword arguments to be
+        passed through to `Axes.set(...)`
+    data_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the data points
+    init_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the initial fit
+        line
+    best_style : dictionary representing style keyword arguments to be passed
+        through to the matplotlib `plot()` command the plots the best fit
+        line
+    **kwargs : independent variables or extra arguments, passed like `x=x`
+    """ + _COMMON_EXAMPLES_DOC
+    def __init__(self, data, model=None, all_models=None, axes_style={},
+                data_style={}, init_style={}, best_style={}, **kwargs):
+        # Dropdown menu of all subclasses of Model, incl. user-defined.
+        self.models_menu = Dropdown()
+        # Dropbox API is very different between IPy 2.x and 3.x.
+        if IPY2:
+            if all_models is None:
+                all_models = dict([(m.__name__, m) for m in Model.__subclasses__()])
+            self.models_menu.values = all_models
+        else:
+            if all_models is None:
+                all_models = [(m.__name__, m) for m in Model.__subclasses__()]
+            self.models_menu.options = all_models
+        self.models_menu.on_trait_change(self._on_model_value_change,
+                                             'value')
+        # Button to trigger fitting.
+        self.fit_button = Button(description='Fit')
+        self.fit_button.on_click(self._on_fit_button_click)
+
+        # Button to trigger guessing.
+        self.guess_button = Button(description='Auto-Guess')
+        self.guess_button.on_click(self._on_guess_button_click)
+
+        # Parameter widgets are not built here. They are (re-)built when
+        # the model is (re-)set.
+        super(NotebookFitter, self).__init__(data, model, axes_style,
+                                             data_style, init_style,
+                                             best_style, **kwargs)
+
+    def _repr_html_(self):
+        display(self.models_menu)
+        button_box = HBox()
+        button_box.children = [self.fit_button, self.guess_button]
+        display(button_box)
+        for pw in self.param_widgets:
+            display(pw)
+        self.plot()
+
+    def guess(self):
+        guessing_successful = super(NotebookFitter, self).guess()
+        self.guess_button.disabled = not guessing_successful
+
+    def _finalize_model(self, value):
+        first_run = not hasattr(self, 'param_widgets')
+        if not first_run:
+            # Remove all Parameter widgets, and replace them with widgets
+            # for the new model.
+            for pw in self.param_widgets:
+                pw.close()
+        self.models_menu.value = value
+        self.param_widgets = [ParameterWidgetGroup(p)
+                              for _, p in self._current_params.items()]
+        if not first_run:
+            for pw in self.param_widgets:
+                display(pw)
+
+    def _finalize_params(self):
+        for pw in self.param_widgets:
+            pw.value = self._current_params[pw.name].value
+            pw.min = self._current_params[pw.name].min
+            pw.max = self._current_params[pw.name].max
+            pw.vary = self._current_params[pw.name].vary
+
+    def plot(self):
+        clear_output(wait=True)
+        super(NotebookFitter, self).plot()
+
+    def fit(self):
+        super(NotebookFitter, self).fit()
+        self.plot()
diff --git a/lmfit/uncertainties/__init__.py b/lmfit/uncertainties/__init__.py
index 829a283..5c2ec34 100644
--- a/lmfit/uncertainties/__init__.py
+++ b/lmfit/uncertainties/__init__.py
@@ -1,1645 +1,1645 @@
-#!! Whenever the documentation below is updated, setup.py should be
-# checked for consistency.
-
-'''
-Calculations with full error propagation for quantities with uncertainties.
-Derivatives can also be calculated.
-
-Web user guide: http://packages.python.org/uncertainties/.
-
-Example of possible calculation: (0.2 +/- 0.01)**2 = 0.04 +/- 0.004.
-
-Correlations between expressions are correctly taken into account (for
-instance, with x = 0.2+/-0.01, 2*x-x-x is exactly zero, as is y-x-x
-with y = 2*x).
-
-Examples:
-
-  import uncertainties
-  from uncertainties import ufloat
-  from uncertainties.umath import *  # sin(), etc.
-
-  # Mathematical operations:
-  x = ufloat((0.20, 0.01))  # x = 0.20+/-0.01
-  x = ufloat("0.20+/-0.01")  # Other representation
-  x = ufloat("0.20(1)")  # Other representation
-  x = ufloat("0.20")  # Implicit uncertainty of +/-1 on the last digit
-  print x**2  # Square: prints "0.04+/-0.004"
-  print sin(x**2)  # Prints "0.0399...+/-0.00399..."
-
-  print x.std_score(0.17)  # Prints "-3.0": deviation of -3 sigmas
-
-  # Access to the nominal value, and to the uncertainty:
-  square = x**2  # Square
-  print square  # Prints "0.04+/-0.004"
-  print square.nominal_value  # Prints "0.04"
-  print square.std_dev()  # Prints "0.004..."
-
-  print square.derivatives[x]  # Partial derivative: 0.4 (= 2*0.20)
-
-  # Correlations:
-  u = ufloat((1, 0.05), "u variable")  # Tag
-  v = ufloat((10, 0.1), "v variable")
-  sum_value = u+v
-
-  u.set_std_dev(0.1)  # Standard deviations can be updated on the fly
-  print sum_value - u - v  # Prints "0.0" (exact result)
-
-  # List of all sources of error:
-  print sum_value  # Prints "11+/-0.1414..."
-  for (var, error) in sum_value.error_components().items():
-      print "%s: %f" % (var.tag, error)  # Individual error components
-
-  # Covariance matrices:
-  cov_matrix = uncertainties.covariance_matrix([u, v, sum_value])
-  print cov_matrix  # 3x3 matrix
-
-  # Correlated variables can be constructed from a covariance matrix, if
-  # NumPy is available:
-  (u2, v2, sum2) = uncertainties.correlated_values([1, 10, 11],
-                                                   cov_matrix)
-  print u2  # Value and uncertainty of u: correctly recovered (1+/-0.1)
-  print uncertainties.covariance_matrix([u2, v2, sum2])  # == cov_matrix
-
-- The main function provided by this module is ufloat, which creates
-numbers with uncertainties (Variable objects).  Variable objects can
-be used as if they were regular Python numbers.  The main attributes
-and methods of Variable objects are defined in the documentation of
-the Variable class.
-
-- Valid operations on numbers with uncertainties include basic
-mathematical functions (addition, etc.).
-
-Most operations from the standard math module (sin, etc.) can be applied
-on numbers with uncertainties by using their generalization from the
-uncertainties.umath module:
-
-  from uncertainties.umath import sin
-  print sin(ufloat("1+/-0.01"))  # 0.841...+/-0.005...
-  print sin(1)  # umath.sin() also works on floats, exactly like math.sin()
-
-Logical operations (>, ==, etc.) are also supported.
-
-Basic operations on NumPy arrays or matrices of numbers with
-uncertainties can be performed:
-
-  2*numpy.array([ufloat((1, 0.01)), ufloat((2, 0.1))])
-
-More complex operations on NumPy arrays can be performed through the
-dedicated uncertainties.unumpy sub-module (see its documentation).
-
-Calculations that are performed through non-Python code (Fortran, C,
-etc.) can handle numbers with uncertainties instead of floats through
-the provided wrap() wrapper:
-
-  import uncertainties
-
-  # wrapped_f is a version of f that can take arguments with
-  # uncertainties, even if f only takes floats:
-  wrapped_f = uncertainties.wrap(f)
-
-If some derivatives of the wrapped function f are known (analytically,
-or numerically), they can be given to wrap()--see the documentation
-for wrap().
-
-- Utility functions are also provided: the covariance matrix between
-random variables can be calculated with covariance_matrix(), or used
-as input for the definition of correlated quantities (correlated_values()
-function--defined only if the NumPy module is available).
-
-- Mathematical expressions involving numbers with uncertainties
-generally return AffineScalarFunc objects, which also print as a value
-with uncertainty.  Their most useful attributes and methods are
-described in the documentation for AffineScalarFunc.  Note that
-Variable objects are also AffineScalarFunc objects.  UFloat is an
-alias for AffineScalarFunc, provided as a convenience: testing whether
-a value carries an uncertainty handled by this module should be done
-with insinstance(my_value, UFloat).
-
-- Mathematically, numbers with uncertainties are, in this package,
-probability distributions.  These probabilities are reduced to two
-numbers: a nominal value and an uncertainty.  Thus, both variables
-(Variable objects) and the result of mathematical operations
-(AffineScalarFunc objects) contain these two values (respectively in
-their nominal_value attribute and through their std_dev() method).
-
-The uncertainty of a number with uncertainty is simply defined in
-this package as the standard deviation of the underlying probability
-distribution.
-
-The numbers with uncertainties manipulated by this package are assumed
-to have a probability distribution mostly contained around their
-nominal value, in an interval of about the size of their standard
-deviation.  This should cover most practical cases.  A good choice of
-nominal value for a number with uncertainty is thus the median of its
-probability distribution, the location of highest probability, or the
-average value.
-
-- When manipulating ensembles of numbers, some of which contain
-uncertainties, it can be useful to access the nominal value and
-uncertainty of all numbers in a uniform manner:
-
-  x = ufloat("3+/-0.1")
-  print nominal_value(x)  # Prints 3
-  print std_dev(x)  # Prints 0.1
-  print nominal_value(3)  # Prints 3: nominal_value works on floats
-  print std_dev(3)  # Prints 0: std_dev works on floats
-
-- Probability distributions (random variables and calculation results)
-are printed as:
-
-  nominal value +/- standard deviation
-
-but this does not imply any property on the nominal value (beyond the
-fact that the nominal value is normally inside the region of high
-probability density), or that the probability distribution of the
-result is symmetrical (this is rarely strictly the case).
-
-- Linear approximations of functions (around the nominal values) are
-used for the calculation of the standard deviation of mathematical
-expressions with this package.
-
-The calculated standard deviations and nominal values are thus
-meaningful approximations as long as the functions involved have
-precise linear expansions in the region where the probability
-distribution of their variables is the largest.  It is therefore
-important that uncertainties be small.  Mathematically, this means
-that the linear term of functions around the nominal values of their
-variables should be much larger than the remaining higher-order terms
-over the region of significant probability.
-
-For instance, sin(0+/-0.01) yields a meaningful standard deviation
-since it is quite linear over 0+/-0.01.  However, cos(0+/-0.01) yields
-an approximate standard deviation of 0 (because the cosine is not well
-approximated by a line around 0), which might not be precise enough
-for all applications.
-
-- Comparison operations (>, ==, etc.) on numbers with uncertainties
-have a pragmatic semantics, in this package: numbers with
-uncertainties can be used wherever Python numbers are used, most of
-the time with a result identical to the one that would be obtained
-with their nominal value only.  However, since the objects defined in
-this module represent probability distributions and not pure numbers,
-comparison operator are interpreted in a specific way.
-
-The result of a comparison operation ("==", ">", etc.) is defined so as
-to be essentially consistent with the requirement that uncertainties
-be small: the value of a comparison operation is True only if the
-operation yields True for all infinitesimal variations of its random
-variables, except, possibly, for an infinitely small number of cases.
-
-Example:
-
-  "x = 3.14; y = 3.14" is such that x == y
-
-but
-
-  x = ufloat((3.14, 0.01))
-  y = ufloat((3.14, 0.01))
-
-is not such that x == y, since x and y are independent random
-variables that almost never give the same value.  However, x == x
-still holds.
-
-The boolean value (bool(x), "if x...") of a number with uncertainty x
-is the result of x != 0.
-
-- The uncertainties package is for Python 2.5 and above.
-
-- This package contains tests.  They can be run either manually or
-automatically with the nose unit testing framework (nosetests).
-
-(c) 2009-2013 by Eric O. LEBIGOT (EOL) <eric.lebigot at normalesup.org>.
-Please send feature requests, bug reports, or feedback to this address.
-
-Please support future development by donating $5 or more through PayPal!
-
-This software is released under a dual license.  (1) The BSD license.
-(2) Any other license, as long as it is obtained from the original
-author.'''
-
-# The idea behind this module is to replace the result of mathematical
-# operations by a local approximation of the defining function.  For
-# example, sin(0.2+/-0.01) becomes the affine function
-# (AffineScalarFunc object) whose nominal value is sin(0.2) and
-# whose variations are given by sin(0.2+delta) = 0.98...*delta.
-# Uncertainties can then be calculated by using this local linear
-# approximation of the original function.
-
-from __future__ import division  # Many analytical derivatives depend on this
-
-import re
-import math
-from math import sqrt, log  # Optimization: no attribute look-up
-import copy
-import warnings
-
-# Numerical version:
-__version_info__ = (1, 9)
-__version__ = '.'.join(map(str, __version_info__))
-
-__author__ = 'Eric O. LEBIGOT (EOL) <eric.lebigot at normalesup.org>'
-
-# Attributes that are always exported (some other attributes are
-# exported only if the NumPy module is available...):
-__all__ = [
-
-    # All sub-modules and packages are not imported by default,
-    # in particular because NumPy might be unavailable.
-    'ufloat',  # Main function: returns a number with uncertainty
-
-    # Uniform access to nominal values and standard deviations:
-    'nominal_value',
-    'std_dev',
-
-    # Utility functions (more are exported if NumPy is present):
-    'covariance_matrix',
-
-    # Class for testing whether an object is a number with
-    # uncertainty.  Not usually created by users (except through the
-    # Variable subclass), but possibly manipulated by external code
-    # ['derivatives()' method, etc.].
-    'UFloat',
-
-    # Wrapper for allowing non-pure-Python function to handle
-    # quantities with uncertainties:
-    'wrap',
-
-    # The documentation for wrap() indicates that numerical
-    # derivatives are calculated through partial_derivative().  The
-    # user might also want to change the size of the numerical
-    # differentiation step.
-    'partial_derivative'
-    ]
-
-###############################################################################
-
-def set_doc(doc_string):
-    """
-    Decorator function that sets the docstring to the given text.
-
-    It is useful for functions whose docstring is calculated
-    (including string substitutions).
-    """
-    def set_doc_string(func):
-        func.__doc__ = doc_string
-        return func
-    return set_doc_string
-
-# Some types known to not depend on Variable objects are put in
-# CONSTANT_TYPES.  The most common types can be put in front, as this
-# may slightly improve the execution speed.
-CONSTANT_TYPES = (float, int, complex) # , long)
-
-###############################################################################
-# Utility for issuing deprecation warnings
-
-def deprecation(message):
-    '''
-    Warns the user with the given message, by issuing a
-    DeprecationWarning.
-    '''
-    warnings.warn(message, DeprecationWarning, stacklevel=2)
-
-
-###############################################################################
-
-## Definitions that depend on the availability of NumPy:
-
-
-try:
-    import numpy
-except ImportError:
-    pass
-else:
-
-    # NumPy numbers do not depend on Variable objects:
-    CONSTANT_TYPES += (numpy.number,)
-
-    # Entering variables as a block of correlated values.  Only available
-    # if NumPy is installed.
-
-    #! It would be possible to dispense with NumPy, but a routine should be
-    # written for obtaining the eigenvectors of a symmetric matrix.  See
-    # for instance Numerical Recipes: (1) reduction to tri-diagonal
-    # [Givens or Householder]; (2) QR / QL decomposition.
-
-    def correlated_values(nom_values, covariance_mat, tags=None):
-        """
-        Returns numbers with uncertainties (AffineScalarFunc objects)
-        that correctly reproduce the given covariance matrix, and have
-        the given (float) values as their nominal value.
-
-        The correlated_values_norm() function returns the same result,
-        but takes a correlation matrix instead of a covariance matrix.
-
-        The list of values and the covariance matrix must have the
-        same length, and the matrix must be a square (symmetric) one.
-
-        The numbers with uncertainties returned depend on newly
-        created, independent variables (Variable objects).
-
-        If 'tags' is not None, it must list the tag of each new
-        independent variable.
-
-        nom_values -- sequence with the nominal (real) values of the
-        numbers with uncertainties to be returned.
-
-        covariance_mat -- full covariance matrix of the returned
-        numbers with uncertainties (not the statistical correlation
-        matrix, i.e., not the normalized covariance matrix). For
-        example, the first element of this matrix is the variance of
-        the first returned number with uncertainty.
-        """
-
-        # If no tags were given, we prepare tags for the newly created
-        # variables:
-        if tags is None:
-            tags = (None,) * len(nom_values)
-
-        # The covariance matrix is diagonalized in order to define
-        # the independent variables that model the given values:
-
-        (variances, transform) = numpy.linalg.eigh(covariance_mat)
-
-        # Numerical errors might make some variances negative: we set
-        # them to zero:
-        variances[variances < 0] = 0.
-
-        # Creation of new, independent variables:
-
-        # We use the fact that the eigenvectors in 'transform' are
-        # special: 'transform' is unitary: its inverse is its transpose:
-
-        variables = tuple(
-            # The variables represent "pure" uncertainties:
-            Variable(0, sqrt(variance), tag)
-            for (variance, tag) in zip(variances, tags))
-
-        # Representation of the initial correlated values:
-        values_funcs = tuple(
-            AffineScalarFunc(value, dict(zip(variables, coords)))
-            for (coords, value) in zip(transform, nom_values))
-
-        return values_funcs
-
-    __all__.append('correlated_values')
-
-    def correlated_values_norm(values_with_std_dev, correlation_mat,
-                               tags=None):
-        '''
-        Returns correlated values like correlated_values(), but takes
-        instead as input:
-
-        - nominal (float) values along with their standard deviation, and
-
-        - a correlation matrix (i.e. a normalized covariance matrix
-          normalized with individual standard deviations).
-
-        values_with_std_dev -- sequence of (nominal value, standard
-        deviation) pairs. The returned, correlated values have these
-        nominal values and standard deviations.
-
-        correlation_mat -- correlation matrix (i.e. the normalized
-        covariance matrix, a matrix with ones on its diagonal).
-        '''
-
-        (nominal_values, std_devs) = numpy.transpose(values_with_std_dev)
-
-        return correlated_values(
-            nominal_values,
-            correlation_mat*std_devs*std_devs[numpy.newaxis].T,
-            tags)
-
-    __all__.append('correlated_values_norm')
-
-###############################################################################
-
-# Mathematical operations with local approximations (affine scalar
-# functions)
-
-class NotUpcast(Exception):
-    'Raised when an object cannot be converted to a number with uncertainty'
-
-def to_affine_scalar(x):
-    """
-    Transforms x into a constant affine scalar function
-    (AffineScalarFunc), unless it is already an AffineScalarFunc (in
-    which case x is returned unchanged).
-
-    Raises an exception unless 'x' belongs to some specific classes of
-    objects that are known not to depend on AffineScalarFunc objects
-    (which then cannot be considered as constants).
-    """
-
-    if isinstance(x, AffineScalarFunc):
-        return x
-
-    #! In Python 2.6+, numbers.Number could be used instead, here:
-    if isinstance(x, CONSTANT_TYPES):
-        # No variable => no derivative to define:
-        return AffineScalarFunc(x, {})
-
-    # Case of lists, etc.
-    raise NotUpcast("%s cannot be converted to a number with"
-                    " uncertainty" % type(x))
-
-def partial_derivative(f, param_num):
-    """
-    Returns a function that numerically calculates the partial
-    derivative of function f with respect to its argument number
-    param_num.
-
-    The step parameter represents the shift of the parameter used in
-    the numerical approximation.
-    """
-
-    def partial_derivative_of_f(*args, **kws):
-        """
-        Partial derivative, calculated with the (-epsilon, +epsilon)
-        method, which is more precise than the (0, +epsilon) method.
-        """
-        # f_nominal_value = f(*args)
-        param_kw = None
-        if '__param__kw__' in kws:
-            param_kw = kws.pop('__param__kw__')
-        shifted_args = list(args)  # Copy, and conversion to a mutable
-        shifted_kws  = {}
-        for k, v in kws.items():
-            shifted_kws[k] = v
-        step = 1.e-8
-        if param_kw in shifted_kws:
-            step = step*abs(shifted_kws[param_kw])
-        elif param_num < len(shifted_args):
-            # The step is relative to the parameter being varied, so that
-            # shsifting it does not suffer from finite precision:
-            step = step*abs(shifted_args[param_num])
-
-        if param_kw in shifted_kws:
-            shifted_kws[param_kw] += step
-        elif param_num < len(shifted_args):
-            shifted_args[param_num] += step
-
-        shifted_f_plus = f(*shifted_args, **shifted_kws)
-
-        if param_kw in shifted_kws:
-            shifted_kws[param_kw] -= 2*step
-        elif param_num < len(shifted_args):
-            shifted_args[param_num] -= 2*step
-        shifted_f_minus = f(*shifted_args, **shifted_kws)
-
-        return (shifted_f_plus - shifted_f_minus)/2/step
-
-    return partial_derivative_of_f
-
-class NumericalDerivatives(object):
-    """
-    Convenient access to the partial derivatives of a function,
-    calculated numerically.
-    """
-    # This is not a list because the number of arguments of the
-    # function is not known in advance, in general.
-
-    def __init__(self, function):
-        """
-        'function' is the function whose derivatives can be computed.
-        """
-        self._function = function
-
-    def __getitem__(self, n):
-        """
-        Returns the n-th numerical derivative of the function.
-        """
-        return partial_derivative(self._function, n)
-
-def wrap(f, derivatives_iter=None):
-    """
-    Wraps a function f into a function that also accepts numbers with
-    uncertainties (UFloat objects) and returns a number with
-    uncertainties.  Doing so may be necessary when function f cannot
-    be expressed analytically (with uncertainties-compatible operators
-    and functions like +, *, umath.sin(), etc.).
-
-    f must return a scalar (not a list, etc.).
-
-    In the wrapped function, the standard Python scalar arguments of f
-    (float, int, etc.) can be replaced by numbers with
-    uncertainties. The result will contain the appropriate
-    uncertainty.
-
-    If no argument to the wrapped function has an uncertainty, f
-    simply returns its usual, scalar result.
-
-    If supplied, derivatives_iter can be an iterable that generally
-    contains functions; each successive function is the partial
-    derivative of f with respect to the corresponding variable (one
-    function for each argument of f, which takes as many arguments as
-    f).  If instead of a function, an element of derivatives_iter
-    contains None, then it is automatically replaced by the relevant
-    numerical derivative; this can be used for non-scalar arguments of
-    f (like string arguments).
-
-    If derivatives_iter is None, or if derivatives_iter contains a
-    fixed (and finite) number of elements, then any missing derivative
-    is calculated numerically.
-
-    An infinite number of derivatives can be specified by having
-    derivatives_iter be an infinite iterator; this can for instance
-    be used for specifying the derivatives of functions with a
-    undefined number of argument (like sum(), whose partial
-    derivatives all return 1).
-
-    Example (for illustration purposes only, as
-    uncertainties.umath.sin() runs faster than the examples that
-    follow): wrap(math.sin) is a sine function that can be applied to
-    numbers with uncertainties.  Its derivative will be calculated
-    numerically.  wrap(math.sin, [None]) would have produced the same
-    result.  wrap(math.sin, [math.cos]) is the same function, but with
-    an analytically defined derivative.
-    """
-
-    if derivatives_iter is None:
-        derivatives_iter = NumericalDerivatives(f)
-    else:
-        # Derivatives that are not defined are calculated numerically,
-        # if there is a finite number of them (the function lambda
-        # *args: fsum(args) has a non-defined number of arguments, as
-        # it just performs a sum):
-        try:  # Is the number of derivatives fixed?
-            len(derivatives_iter)
-        except TypeError:
-            pass
-        else:
-            derivatives_iter = [
-                partial_derivative(f, k) if derivative is None
-                else derivative
-                for (k, derivative) in enumerate(derivatives_iter)]
-
-    #! Setting the doc string after "def f_with...()" does not
-    # seem to work.  We define it explicitly:
-    @set_doc("""\
-    Version of %s(...) that returns an affine approximation
-    (AffineScalarFunc object), if its result depends on variables
-    (Variable objects).  Otherwise, returns a simple constant (when
-    applied to constant arguments).
-
-    Warning: arguments of the function that are not AffineScalarFunc
-    objects must not depend on uncertainties.Variable objects in any
-    way.  Otherwise, the dependence of the result in
-    uncertainties.Variable objects will be incorrect.
-
-    Original documentation:
-    %s""" % (f.__name__, f.__doc__))
-    def f_with_affine_output(*args, **kwargs):
-        # Can this function perform the calculation of an
-        # AffineScalarFunc (or maybe float) result?
-        try:
-            old_funcs = map(to_affine_scalar, args)
-            aff_funcs = [to_affine_scalar(a) for a in args]
-            aff_kws = kwargs
-            aff_varkws = []
-            for key, val in kwargs.items():
-                if isinstance(val, Variable):
-                    aff_kws[key] = to_affine_scalar(val)
-                    aff_varkws.append(key)
-
-        except NotUpcast:
-
-            # This function does not know how to itself perform
-            # calculations with non-float-like arguments (as they
-            # might for instance be objects whose value really changes
-            # if some Variable objects had different values):
-
-            # Is it clear that we can't delegate the calculation?
-
-            if any(isinstance(arg, AffineScalarFunc) for arg in args):
-                # This situation arises for instance when calculating
-                # AffineScalarFunc(...)*numpy.array(...).  In this
-                # case, we must let NumPy handle the multiplication
-                # (which is then performed element by element):
-                return NotImplemented
-            else:
-                # If none of the arguments is an AffineScalarFunc, we
-                # can delegate the calculation to the original
-                # function.  This can be useful when it is called with
-                # only one argument (as in
-                # numpy.log10(numpy.ndarray(...)):
-                return f(*args, **kwargs)
-
-        ########################################
-        # Nominal value of the constructed AffineScalarFunc:
-        args_values = [e.nominal_value for e in aff_funcs]
-        kw_values = {}
-        for key, val in aff_kws.items():
-            kw_values[key] = val
-            if key in aff_varkws:
-                kw_values[key] = val.nominal_value
-        f_nominal_value = f(*args_values, **kw_values)
-
-        ########################################
-
-        # List of involved variables (Variable objects):
-        variables = set()
-        for expr in aff_funcs:
-            variables |= set(expr.derivatives)
-        for vname  in aff_varkws:
-            variables |= set(aff_kws[vname].derivatives)
-        ## It is sometimes useful to only return a regular constant:
-
-        # (1) Optimization / convenience behavior: when 'f' is called
-        # on purely constant values (e.g., sin(2)), there is no need
-        # for returning a more complex AffineScalarFunc object.
-
-        # (2) Functions that do not return a "float-like" value might
-        # not have a relevant representation as an AffineScalarFunc.
-        # This includes boolean functions, since their derivatives are
-        # either 0 or are undefined: they are better represented as
-        # Python constants than as constant AffineScalarFunc functions.
-
-        if not variables or isinstance(f_nominal_value, bool):
-            return f_nominal_value
-
-        # The result of 'f' does depend on 'variables'...
-
-        ########################################
-
-        # Calculation of the derivatives with respect to the arguments
-        # of f (aff_funcs):
-
-        # The chain rule is applied.  This is because, in the case of
-        # numerical derivatives, it allows for a better-controlled
-        # numerical stability than numerically calculating the partial
-        # derivatives through '[f(x + dx, y + dy, ...) -
-        # f(x,y,...)]/da' where dx, dy,... are calculated by varying
-        # 'a'.  In fact, it is numerically better to control how big
-        # (dx, dy,...) are: 'f' is a simple mathematical function and
-        # it is possible to know how precise the df/dx are (which is
-        # not possible with the numerical df/da calculation above).
-
-        # We use numerical derivatives, if we don't already have a
-        # list of derivatives:
-
-        #! Note that this test could be avoided by requiring the
-        # caller to always provide derivatives.  When changing the
-        # functions of the math module, this would force this module
-        # to know about all the math functions.  Another possibility
-        # would be to force derivatives_iter to contain, say, the
-        # first 3 derivatives of f.  But any of these two ideas has a
-        # chance to break, one day... (if new functions are added to
-        # the math module, or if some function has more than 3
-        # arguments).
-
-        derivatives_wrt_args = []
-        for (arg, derivative) in zip(aff_funcs, derivatives_iter):
-            derivatives_wrt_args.append(derivative(*args_values, **aff_kws)
-                                        if arg.derivatives
-                                        else 0)
-
-
-        kws_values = []
-        for vname in aff_varkws:
-            kws_values.append( aff_kws[vname].nominal_value)
-        for (vname, derivative) in zip(aff_varkws, derivatives_iter):
-            derivatives_wrt_args.append(derivative(__param__kw__=vname,
-                                                   **kw_values)
-                                        if aff_kws[vname].derivatives
-                                        else 0)
-
-        ########################################
-        # Calculation of the derivative of f with respect to all the
-        # variables (Variable) involved.
-
-        # Initial value (is updated below):
-        derivatives_wrt_vars = dict((var, 0.) for var in variables)
-
-        # The chain rule is used (we already have
-        # derivatives_wrt_args):
-
-        for (func, f_derivative) in zip(aff_funcs, derivatives_wrt_args):
-            for (var, func_derivative) in func.derivatives.items():
-                derivatives_wrt_vars[var] += f_derivative * func_derivative
-
-        for (vname, f_derivative) in zip(aff_varkws, derivatives_wrt_args):
-            func = aff_kws[vname]
-            for (var, func_derivative) in func.derivatives.items():
-                derivatives_wrt_vars[var] += f_derivative * func_derivative
-
-        # The function now returns an AffineScalarFunc object:
-        return AffineScalarFunc(f_nominal_value, derivatives_wrt_vars)
-
-    # It is easier to work with f_with_affine_output, which represents
-    # a wrapped version of 'f', when it bears the same name as 'f':
-    f_with_affine_output.__name__ = f.__name__
-
-    return f_with_affine_output
-
-def _force_aff_func_args(func):
-    """
-    Takes an operator op(x, y) and wraps it.
-
-    The constructed operator returns func(x, to_affine_scalar(y)) if y
-    can be upcast with to_affine_scalar(); otherwise, it returns
-    NotImplemented.
-
-    Thus, func() is only called on two AffineScalarFunc objects, if
-    its first argument is an AffineScalarFunc.
-    """
-
-    def op_on_upcast_args(x, y):
-        """
-        Returns %s(self, to_affine_scalar(y)) if y can be upcast
-        through to_affine_scalar.  Otherwise returns NotImplemented.
-        """ % func.__name__
-
-        try:
-            y_with_uncert = to_affine_scalar(y)
-        except NotUpcast:
-            # This module does not know how to handle the comparison:
-            # (example: y is a NumPy array, in which case the NumPy
-            # array will decide that func() should be applied
-            # element-wise between x and all the elements of y):
-            return NotImplemented
-        else:
-            return func(x, y_with_uncert)
-
-    return op_on_upcast_args
-
-########################################
-
-# Definition of boolean operators, that assume that self and
-# y_with_uncert are AffineScalarFunc.
-
-# The fact that uncertainties must be smalled is used, here: the
-# comparison functions are supposed to be constant for most values of
-# the random variables.
-
-# Even though uncertainties are supposed to be small, comparisons
-# between 3+/-0.1 and 3.0 are handled (even though x == 3.0 is not a
-# constant function in the 3+/-0.1 interval).  The comparison between
-# x and x is handled too, when x has an uncertainty.  In fact, as
-# explained in the main documentation, it is possible to give a useful
-# meaning to the comparison operators, in these cases.
-
-def _eq_on_aff_funcs(self, y_with_uncert):
-    """
-    __eq__ operator, assuming that both self and y_with_uncert are
-    AffineScalarFunc objects.
-    """
-    difference = self - y_with_uncert
-    # Only an exact zero difference means that self and y are
-    # equal numerically:
-    return not(difference._nominal_value or difference.std_dev())
-
-def _ne_on_aff_funcs(self, y_with_uncert):
-    """
-    __ne__ operator, assuming that both self and y_with_uncert are
-    AffineScalarFunc objects.
-    """
-
-    return not _eq_on_aff_funcs(self, y_with_uncert)
-
-def _gt_on_aff_funcs(self, y_with_uncert):
-    """
-    __gt__ operator, assuming that both self and y_with_uncert are
-    AffineScalarFunc objects.
-    """
-    return self._nominal_value > y_with_uncert._nominal_value
-
-def _ge_on_aff_funcs(self, y_with_uncert):
-    """
-    __ge__ operator, assuming that both self and y_with_uncert are
-    AffineScalarFunc objects.
-    """
-
-    return (_gt_on_aff_funcs(self, y_with_uncert)
-            or _eq_on_aff_funcs(self, y_with_uncert))
-
-def _lt_on_aff_funcs(self, y_with_uncert):
-    """
-    __lt__ operator, assuming that both self and y_with_uncert are
-    AffineScalarFunc objects.
-    """
-    return self._nominal_value < y_with_uncert._nominal_value
-
-def _le_on_aff_funcs(self, y_with_uncert):
-    """
-    __le__ operator, assuming that both self and y_with_uncert are
-    AffineScalarFunc objects.
-    """
-
-    return (_lt_on_aff_funcs(self, y_with_uncert)
-            or _eq_on_aff_funcs(self, y_with_uncert))
-
-########################################
-
-class AffineScalarFunc(object):
-    """
-    Affine functions that support basic mathematical operations
-    (addition, etc.).  Such functions can for instance be used for
-    representing the local (linear) behavior of any function.
-
-    This class is mostly meant to be used internally.
-
-    This class can also be used to represent constants.
-
-    The variables of affine scalar functions are Variable objects.
-
-    AffineScalarFunc objects include facilities for calculating the
-    'error' on the function, from the uncertainties on its variables.
-
-    Main attributes and methods:
-
-    - nominal_value, std_dev(): value at the origin / nominal value,
-      and standard deviation.
-
-    - error_components(): error_components()[x] is the error due to
-      Variable x.
-
-    - derivatives: derivatives[x] is the (value of the) derivative
-      with respect to Variable x.  This attribute is a dictionary
-      whose keys are the Variable objects on which the function
-      depends.
-
-      All the Variable objects on which the function depends are in
-      'derivatives'.
-
-    - std_score(x): position of number x with respect to the
-      nominal value, in units of the standard deviation.
-    """
-
-    # To save memory in large arrays:
-    __slots__ = ('_nominal_value', 'derivatives')
-
-    #! The code could be modify in order to accommodate for non-float
-    # nominal values.  This could for instance be done through
-    # the operator module: instead of delegating operations to
-    # float.__*__ operations, they could be delegated to
-    # operator.__*__ functions (while taking care of properly handling
-    # reverse operations: __radd__, etc.).
-
-    def __init__(self, nominal_value, derivatives):
-        """
-        nominal_value -- value of the function at the origin.
-        nominal_value must not depend in any way of the Variable
-        objects in 'derivatives' (the value at the origin of the
-        function being defined is a constant).
-
-        derivatives -- maps each Variable object on which the function
-        being defined depends to the value of the derivative with
-        respect to that variable, taken at the nominal value of all
-        variables.
-
-        Warning: the above constraint is not checked, and the user is
-        responsible for complying with it.
-        """
-
-        # Defines the value at the origin:
-
-        # Only float-like values are handled.  One reason is that it
-        # does not make sense for a scalar function to be affine to
-        # not yield float values.  Another reason is that it would not
-        # make sense to have a complex nominal value, here (it would
-        # not be handled correctly at all): converting to float should
-        # be possible.
-        self._nominal_value = float(nominal_value)
-        self.derivatives = derivatives
-
-    # The following prevents the 'nominal_value' attribute from being
-    # modified by the user:
-    @property
-    def nominal_value(self):
-        "Nominal value of the random number."
-        return self._nominal_value
-
-    ############################################################
-
-
-    ### Operators: operators applied to AffineScalarFunc and/or
-    ### float-like objects only are supported.  This is why methods
-    ### from float are used for implementing these operators.
-
-    # Operators with no reflection:
-
-    ########################################
-
-    # __nonzero__() is supposed to return a boolean value (it is used
-    # by bool()).  It is for instance used for converting the result
-    # of comparison operators to a boolean, in sorted().  If we want
-    # to be able to sort AffineScalarFunc objects, __nonzero__ cannot
-    # return a AffineScalarFunc object.  Since boolean results (such
-    # as the result of bool()) don't have a very meaningful
-    # uncertainty unless it is zero, this behavior is fine.
-
-    def __nonzero__(self):
-        """
-        Equivalent to self != 0.
-        """
-        #! This might not be relevant for AffineScalarFunc objects
-        # that contain values in a linear space which does not convert
-        # the float 0 into the null vector (see the __eq__ function:
-        # __nonzero__ works fine if subtracting the 0 float from a
-        # vector of the linear space works as if 0 were the null
-        # vector of that space):
-        return self != 0.  # Uses the AffineScalarFunc.__ne__ function
-
-    ########################################
-
-    ## Logical operators: warning: the resulting value cannot always
-    ## be differentiated.
-
-    # The boolean operations are not differentiable everywhere, but
-    # almost...
-
-    # (1) I can rely on the assumption that the user only has "small"
-    # errors on variables, as this is used in the calculation of the
-    # standard deviation (which performs linear approximations):
-
-    # (2) However, this assumption is not relevant for some
-    # operations, and does not have to hold, in some cases.  This
-    # comes from the fact that logical operations (e.g. __eq__(x,y))
-    # are not differentiable for many usual cases.  For instance, it
-    # is desirable to have x == x for x = n+/-e, whatever the size of e.
-    # Furthermore, n+/-e != n+/-e', if e != e', whatever the size of e or
-    # e'.
-
-    # (3) The result of logical operators does not have to be a
-    # function with derivatives, as these derivatives are either 0 or
-    # don't exist (i.e., the user should probably not rely on
-    # derivatives for his code).
-
-    # __eq__ is used in "if data in [None, ()]", for instance.  It is
-    # therefore important to be able to handle this case too, which is
-    # taken care of when _force_aff_func_args(_eq_on_aff_funcs)
-    # returns NotImplemented.
-    __eq__ = _force_aff_func_args(_eq_on_aff_funcs)
-
-    __ne__ = _force_aff_func_args(_ne_on_aff_funcs)
-    __gt__ = _force_aff_func_args(_gt_on_aff_funcs)
-
-    # __ge__ is not the opposite of __lt__ because these operators do
-    # not always yield a boolean (for instance, 0 <= numpy.arange(10)
-    # yields an array).
-    __ge__ = _force_aff_func_args(_ge_on_aff_funcs)
-
-    __lt__ = _force_aff_func_args(_lt_on_aff_funcs)
-    __le__ = _force_aff_func_args(_le_on_aff_funcs)
-
-    ########################################
-
-    # Uncertainties handling:
-
-    def error_components(self):
-        """
-        Individual components of the standard deviation of the affine
-        function (in absolute value), returned as a dictionary with
-        Variable objects as keys.
-
-        This method assumes that the derivatives contained in the
-        object take scalar values (and are not a tuple, like what
-        math.frexp() returns, for instance).
-        """
-
-        # Calculation of the variance:
-        error_components = {}
-        for (variable, derivative) in self.derivatives.items():
-            # Individual standard error due to variable:
-            error_components[variable] = abs(derivative*variable._std_dev)
-
-        return error_components
-
-    def std_dev(self):
-        """
-        Standard deviation of the affine function.
-
-        This method assumes that the function returns scalar results.
-
-        This returned standard deviation depends on the current
-        standard deviations [std_dev()] of the variables (Variable
-        objects) involved.
-        """
-        #! It would be possible to not allow the user to update the
-        #std dev of Variable objects, in which case AffineScalarFunc
-        #objects could have a pre-calculated or, better, cached
-        #std_dev value (in fact, many intermediate AffineScalarFunc do
-        #not need to have their std_dev calculated: only the final
-        #AffineScalarFunc returned to the user does).
-        return sqrt(sum(
-            delta**2 for delta in self.error_components().values()))
-
-    def _general_representation(self, to_string):
-        """
-        Uses the to_string() conversion function on both the nominal
-        value and the standard deviation, and returns a string that
-        describes them.
-
-        to_string() is typically repr() or str().
-        """
-
-        (nominal_value, std_dev) = (self._nominal_value, self.std_dev())
-
-        # String representation:
-
-        # Not putting spaces around "+/-" helps with arrays of
-        # Variable, as each value with an uncertainty is a
-        # block of signs (otherwise, the standard deviation can be
-        # mistaken for another element of the array).
-
-        return ("%s+/-%s" % (to_string(nominal_value), to_string(std_dev))
-                if std_dev
-                else to_string(nominal_value))
-
-    def __repr__(self):
-        return self._general_representation(repr)
-
-    def __str__(self):
-        return self._general_representation(str)
-
-    def std_score(self, value):
-        """
-        Returns 'value' - nominal value, in units of the standard
-        deviation.
-
-        Raises a ValueError exception if the standard deviation is zero.
-        """
-        try:
-            # The ._nominal_value is a float: there is no integer division,
-            # here:
-            return (value - self._nominal_value) / self.std_dev()
-        except ZeroDivisionError:
-            raise ValueError("The standard deviation is zero:"
-                             " undefined result.")
-
-    def __deepcopy__(self, memo):
-        """
-        Hook for the standard copy module.
-
-        The returned AffineScalarFunc is a completely fresh copy,
-        which is fully independent of any variable defined so far.
-        New variables are specially created for the returned
-        AffineScalarFunc object.
-        """
-        return AffineScalarFunc(
-            self._nominal_value,
-            dict((copy.deepcopy(var), deriv)
-                 for (var, deriv) in self.derivatives.items()))
-
-    def __getstate__(self):
-        """
-        Hook for the pickle module.
-        """
-        obj_slot_values = dict((k, getattr(self, k)) for k in
-                               # self.__slots__ would not work when
-                               # self is an instance of a subclass:
-                               AffineScalarFunc.__slots__)
-        return obj_slot_values
-
-    def __setstate__(self, data_dict):
-        """
-        Hook for the pickle module.
-        """
-        for (name, value) in data_dict.items():
-            setattr(self, name, value)
-
-# Nicer name, for users: isinstance(ufloat(...), UFloat) is True:
-UFloat = AffineScalarFunc
-
-def get_ops_with_reflection():
-
-    """
-    Returns operators with a reflection, along with their derivatives
-    (for float operands).
-    """
-
-    # Operators with a reflection:
-
-    # We do not include divmod().  This operator could be included, by
-    # allowing its result (a tuple) to be differentiated, in
-    # derivative_value().  However, a similar result can be achieved
-    # by the user by calculating separately the division and the
-    # result.
-
-    # {operator(x, y): (derivative wrt x, derivative wrt y)}:
-
-    # Note that unknown partial derivatives can be numerically
-    # calculated by expressing them as something like
-    # "partial_derivative(float.__...__, 1)(x, y)":
-
-    # String expressions are used, so that reversed operators are easy
-    # to code, and execute relatively efficiently:
-
-    derivatives_list = {
-        'add': ("1.", "1."),
-        # 'div' is the '/' operator when __future__.division is not in
-        # effect.  Since '/' is applied to
-        # AffineScalarFunc._nominal_value numbers, it is applied on
-        # floats, and is therefore the "usual" mathematical division.
-        'div': ("1/y", "-x/y**2"),
-        'floordiv': ("0.", "0."),  # Non exact: there is a discontinuities
-        # The derivative wrt the 2nd arguments is something like (..., x//y),
-        # but it is calculated numerically, for convenience:
-        'mod': ("1.", "partial_derivative(float.__mod__, 1)(x, y)"),
-        'mul': ("y", "x"),
-        'pow': ("y*x**(y-1)", "log(x)*x**y"),
-        'sub': ("1.", "-1."),
-        'truediv': ("1/y", "-x/y**2")
-        }
-
-    # Conversion to Python functions:
-    ops_with_reflection = {}
-    for (op, derivatives) in derivatives_list.items():
-        ops_with_reflection[op] = [
-            eval("lambda x, y: %s" % expr) for expr in derivatives ]
-
-        ops_with_reflection["r"+op] = [
-            eval("lambda y, x: %s" % expr) for expr in reversed(derivatives)]
-
-    return ops_with_reflection
-
-# Operators that have a reflection, along with their derivatives:
-_ops_with_reflection = get_ops_with_reflection()
-
-# Some effectively modified operators (for the automated tests):
-_modified_operators = []
-_modified_ops_with_reflection = []
-
-def add_operators_to_AffineScalarFunc():
-    """
-    Adds many operators (__add__, etc.) to the AffineScalarFunc class.
-    """
-
-    ########################################
-
-    #! Derivatives are set to return floats.  For one thing,
-    # uncertainties generally involve floats, as they are based on
-    # small variations of the parameters.  It is also better to
-    # protect the user from unexpected integer result that behave
-    # badly with the division.
-
-    ## Operators that return a numerical value:
-
-    # Single-argument operators that should be adapted from floats to
-    # AffineScalarFunc objects, associated to their derivative:
-    simple_numerical_operators_derivatives = {
-        'abs': lambda x: 1. if x>=0 else -1.,
-        'neg': lambda x: -1.,
-        'pos': lambda x: 1.,
-        'trunc': lambda x: 0.
-        }
-
-    for (op, derivative) in (
-          simple_numerical_operators_derivatives.items()):
-
-        attribute_name = "__%s__" % op
-        # float objects don't exactly have the same attributes between
-        # different versions of Python (for instance, __trunc__ was
-        # introduced with Python 2.6):
-        try:
-            setattr(AffineScalarFunc, attribute_name,
-                    wrap(getattr(float, attribute_name),
-                                 [derivative]))
-        except AttributeError:
-            pass
-        else:
-            _modified_operators.append(op)
-
-    ########################################
-
-    # Reversed versions (useful for float*AffineScalarFunc, for instance):
-    for (op, derivatives) in _ops_with_reflection.items():
-        attribute_name = '__%s__' % op
-        # float objects don't exactly have the same attributes between
-        # different versions of Python (for instance, __div__ and
-        # __rdiv__ were removed, in Python 3):
-        try:
-            setattr(AffineScalarFunc, attribute_name,
-                    wrap(getattr(float, attribute_name), derivatives))
-        except AttributeError:
-            pass
-        else:
-            _modified_ops_with_reflection.append(op)
-
-    ########################################
-    # Conversions to pure numbers are meaningless.  Note that the
-    # behavior of float(1j) is similar.
-    for coercion_type in ('complex', 'int', 'long', 'float'):
-        def raise_error(self):
-            raise TypeError("can't convert an affine function (%s)"
-                            ' to %s; use x.nominal_value'
-                            # In case AffineScalarFunc is sub-classed:
-                            % (self.__class__, coercion_type))
-
-        setattr(AffineScalarFunc, '__%s__' % coercion_type, raise_error)
-
-add_operators_to_AffineScalarFunc()  # Actual addition of class attributes
-
-class Variable(AffineScalarFunc):
-    """
-    Representation of a float-like scalar random variable, along with
-    its uncertainty.
-
-    Objects are meant to represent variables that are independent from
-    each other (correlations are handled through the AffineScalarFunc
-    class).
-    """
-
-    # To save memory in large arrays:
-    __slots__ = ('_std_dev', 'tag')
-
-    def __init__(self, value, std_dev, tag=None):
-        """
-        The nominal value and the standard deviation of the variable
-        are set.  These values must be scalars.
-
-        'tag' is a tag that the user can associate to the variable.  This
-        is useful for tracing variables.
-
-        The meaning of the nominal value is described in the main
-        module documentation.
-        """
-
-        #! The value, std_dev, and tag are assumed by __copy__() not to
-        # be copied.  Either this should be guaranteed here, or __copy__
-        # should be updated.
-
-        # Only float-like values are handled.  One reason is that the
-        # division operator on integers would not produce a
-        # differentiable functions: for instance, Variable(3, 0.1)/2
-        # has a nominal value of 3/2 = 1, but a "shifted" value
-        # of 3.1/2 = 1.55.
-        value = float(value)
-
-        # If the variable changes by dx, then the value of the affine
-        # function that gives its value changes by 1*dx:
-
-        # ! Memory cycles are created.  However, they are garbage
-        # collected, if possible.  Using a weakref.WeakKeyDictionary
-        # takes much more memory.  Thus, this implementation chooses
-        # more cycles and a smaller memory footprint instead of no
-        # cycles and a larger memory footprint.
-
-        # ! Using AffineScalarFunc instead of super() results only in
-        # a 3 % speed loss (Python 2.6, Mac OS X):
-        super(Variable, self).__init__(value, {self: 1.})
-
-        # We force the error to be float-like.  Since it is considered
-        # as a Gaussian standard deviation, it is semantically
-        # positive (even though there would be no problem defining it
-        # as a sigma, where sigma can be negative and still define a
-        # Gaussian):
-
-        assert std_dev >= 0, "the error must be a positive number"
-        # Since AffineScalarFunc.std_dev is a property, we cannot do
-        # "self.std_dev = ...":
-        self._std_dev = std_dev
-
-        self.tag = tag
-
-    # Standard deviations can be modified (this is a feature).
-    # AffineScalarFunc objects that depend on the Variable have their
-    # std_dev() automatically modified (recalculated with the new
-    # std_dev of their Variables):
-    def set_std_dev(self, value):
-        """
-        Updates the standard deviation of the variable to a new value.
-        """
-
-        # A zero variance is accepted.  Thus, it is possible to
-        # conveniently use infinitely precise variables, for instance
-        # to study special cases.
-
-        self._std_dev = value
-
-    # The following method is overridden so that we can represent the tag:
-    def _general_representation(self, to_string):
-        """
-        Uses the to_string() conversion function on both the nominal
-        value and standard deviation and returns a string that
-        describes the number.
-
-        to_string() is typically repr() or str().
-        """
-        num_repr  = super(Variable, self)._general_representation(to_string)
-
-        # Optional tag: only full representations (to_string == repr)
-        # contain the tag, as the tag is required in order to recreate
-        # the variable.  Outputting the tag for regular string ("print
-        # x") would be too heavy and produce an unusual representation
-        # of a number with uncertainty.
-        return (num_repr if ((self.tag is None) or (to_string != repr))
-                else "< %s = %s >" % (self.tag, num_repr))
-
-    def __hash__(self):
-        # All Variable objects are by definition independent
-        # variables, so they never compare equal; therefore, their
-        # id() are therefore allowed to differ
-        # (http://docs.python.org/reference/datamodel.html#object.__hash__):
-        return id(self)
-
-    def __copy__(self):
-        """
-        Hook for the standard copy module.
-        """
-
-        # This copy implicitly takes care of the reference of the
-        # variable to itself (in self.derivatives): the new Variable
-        # object points to itself, not to the original Variable.
-
-        # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html
-
-        #! The following assumes that the arguments to Variable are
-        # *not* copied upon construction, since __copy__ is not supposed
-        # to copy "inside" information:
-        return Variable(self.nominal_value, self.std_dev(), self.tag)
-
-    def __deepcopy__(self, memo):
-        """
-        Hook for the standard copy module.
-
-        A new variable is created.
-        """
-
-        # This deep copy implicitly takes care of the reference of the
-        # variable to itself (in self.derivatives): the new Variable
-        # object points to itself, not to the original Variable.
-
-        # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html
-
-        return self.__copy__()
-
-    def __getstate__(self):
-        """
-        Hook for the standard pickle module.
-        """
-        obj_slot_values = dict((k, getattr(self, k)) for k in self.__slots__)
-        obj_slot_values.update(AffineScalarFunc.__getstate__(self))
-        # Conversion to a usual dictionary:
-        return obj_slot_values
-
-    def __setstate__(self, data_dict):
-        """
-        Hook for the standard pickle module.
-        """
-        for (name, value) in data_dict.items():
-            setattr(self, name, value)
-
-###############################################################################
-
-# Utilities
-
-def nominal_value(x):
-    """
-    Returns the nominal value of x if it is a quantity with
-    uncertainty (i.e., an AffineScalarFunc object); otherwise, returns
-    x unchanged.
-
-    This utility function is useful for transforming a series of
-    numbers, when only some of them generally carry an uncertainty.
-    """
-
-    return x.nominal_value if isinstance(x, AffineScalarFunc) else x
-
-def std_dev(x):
-    """
-    Returns the standard deviation of x if it is a quantity with
-    uncertainty (i.e., an AffineScalarFunc object); otherwise, returns
-    the float 0.
-
-    This utility function is useful for transforming a series of
-    numbers, when only some of them generally carry an uncertainty.
-    """
-
-    return x.std_dev() if isinstance(x, AffineScalarFunc) else 0.
-
-def covariance_matrix(nums_with_uncert):
-    """
-    Returns a matrix that contains the covariances between the given
-    sequence of numbers with uncertainties (AffineScalarFunc objects).
-    The resulting matrix implicitly depends on their ordering in
-    'nums_with_uncert'.
-
-    The covariances are floats (never int objects).
-
-    The returned covariance matrix is the exact linear approximation
-    result, if the nominal values of the numbers with uncertainties
-    and of their variables are their mean.  Otherwise, the returned
-    covariance matrix should be close to its linear approximation
-    value.
-
-    The returned matrix is a list of lists.
-    """
-    # See PSI.411 in EOL's notes.
-
-    covariance_matrix = []
-    for (i1, expr1) in enumerate(nums_with_uncert):
-        derivatives1 = expr1.derivatives  # Optimization
-        vars1 = set(derivatives1)
-        coefs_expr1 = []
-        for (i2, expr2) in enumerate(nums_with_uncert[:i1+1]):
-            derivatives2 = expr2.derivatives  # Optimization
-            coef = 0.
-            for var in vars1.intersection(derivatives2):
-                # var is a variable common to both numbers with
-                # uncertainties:
-                coef += (derivatives1[var]*derivatives2[var]*var._std_dev**2)
-            coefs_expr1.append(coef)
-        covariance_matrix.append(coefs_expr1)
-
-    # We symmetrize the matrix:
-    for (i, covariance_coefs) in enumerate(covariance_matrix):
-        covariance_coefs.extend(covariance_matrix[j][i]
-                                for j in range(i+1, len(covariance_matrix)))
-
-    return covariance_matrix
-
-try:
-    import numpy
-except ImportError:
-    pass
-else:
-    def correlation_matrix(nums_with_uncert):
-        '''
-        Returns the correlation matrix of the given sequence of
-        numbers with uncertainties, as a NumPy array of floats.
-        '''
-
-        cov_mat = numpy.array(covariance_matrix(nums_with_uncert))
-
-        std_devs = numpy.sqrt(cov_mat.diagonal())
-
-        return cov_mat/std_devs/std_devs[numpy.newaxis].T
-
-    __all__.append('correlation_matrix')
-
-###############################################################################
-# Parsing of values with uncertainties:
-
-POSITIVE_DECIMAL_UNSIGNED = r'(\d+)(\.\d*)?'
-
-# Regexp for a number with uncertainty (e.g., "-1.234(2)e-6"), where the
-# uncertainty is optional (in which case the uncertainty is implicit):
-NUMBER_WITH_UNCERT_RE_STR = '''
-    ([+-])?  # Sign
-    %s  # Main number
-    (?:\(%s\))?  # Optional uncertainty
-    ([eE][+-]?\d+)?  # Optional exponent
-    ''' % (POSITIVE_DECIMAL_UNSIGNED, POSITIVE_DECIMAL_UNSIGNED)
-
-NUMBER_WITH_UNCERT_RE = re.compile(
-    "^%s$" % NUMBER_WITH_UNCERT_RE_STR, re.VERBOSE)
-
-def parse_error_in_parentheses(representation):
-    """
-    Returns (value, error) from a string representing a number with
-    uncertainty like 12.34(5), 12.34(142), 12.5(3.4) or 12.3(4.2)e3.
-    If no parenthesis is given, an uncertainty of one on the last
-    digit is assumed.
-
-    Raises ValueError if the string cannot be parsed.
-    """
-
-    match = NUMBER_WITH_UNCERT_RE.search(representation)
-
-    if match:
-        # The 'main' part is the nominal value, with 'int'eger part, and
-        # 'dec'imal part.  The 'uncert'ainty is similarly broken into its
-        # integer and decimal parts.
-        (sign, main_int, main_dec, uncert_int, uncert_dec,
-         exponent) = match.groups()
-    else:
-        raise ValueError("Unparsable number representation: '%s'."
-                         " Was expecting a string of the form 1.23(4)"
-                         " or 1.234" % representation)
-
-    # The value of the number is its nominal value:
-    value = float(''.join((sign or '',
-                           main_int,
-                           main_dec or '.0',
-                           exponent or '')))
-
-    if uncert_int is None:
-        # No uncertainty was found: an uncertainty of 1 on the last
-        # digit is assumed:
-        uncert_int = '1'
-
-    # Do we have a fully explicit uncertainty?
-    if uncert_dec is not None:
-        uncert = float("%s%s" % (uncert_int, uncert_dec or ''))
-    else:
-        # uncert_int represents an uncertainty on the last digits:
-
-        # The number of digits after the period defines the power of
-        # 10 than must be applied to the provided uncertainty:
-        num_digits_after_period = (0 if main_dec is None
-                                   else len(main_dec)-1)
-        uncert = int(uncert_int)/10**num_digits_after_period
-
-    # We apply the exponent to the uncertainty as well:
-    uncert *= float("1%s" % (exponent or ''))
-
-    return (value, uncert)
-
-
-# The following function is not exposed because it can in effect be
-# obtained by doing x = ufloat(representation) and
-# x.nominal_value and x.std_dev():
-def str_to_number_with_uncert(representation):
-    """
-    Given a string that represents a number with uncertainty, returns the
-    nominal value and the uncertainty.
-
-    The string can be of the form:
-    - 124.5+/-0.15
-    - 124.50(15)
-    - 124.50(123)
-    - 124.5
-
-    When no numerical error is given, an uncertainty of 1 on the last
-    digit is implied.
-
-    Raises ValueError if the string cannot be parsed.
-    """
-
-    try:
-        # Simple form 1234.45+/-1.2:
-        (value, uncert) = representation.split('+/-')
-    except ValueError:
-        # Form with parentheses or no uncertainty:
-        parsed_value = parse_error_in_parentheses(representation)
-    else:
-        try:
-            parsed_value = (float(value), float(uncert))
-        except ValueError:
-            raise ValueError('Cannot parse %s: was expecting a number'
-                             ' like 1.23+/-0.1' % representation)
-
-    return parsed_value
-
-def ufloat(representation, tag=None):
-    """
-    Returns a random variable (Variable object).
-
-    Converts the representation of a number into a number with
-    uncertainty (a random variable, defined by a nominal value and
-    a standard deviation).
-
-    The representation can be a (value, standard deviation) sequence,
-    or a string.
-
-    Strings of the form '12.345+/-0.015', '12.345(15)', or '12.3' are
-    recognized (see full list below).  In the last case, an
-    uncertainty of +/-1 is assigned to the last digit.
-
-    'tag' is an optional string tag for the variable.  Variables
-    don't have to have distinct tags.  Tags are useful for tracing
-    what values (and errors) enter in a given result (through the
-    error_components() method).
-
-    Examples of valid string representations:
-
-        -1.23(3.4)
-        -1.34(5)
-        1(6)
-        3(4.2)
-        -9(2)
-        1234567(1.2)
-        12.345(15)
-        -12.3456(78)e-6
-        12.3(0.4)e-5
-        0.29
-        31.
-        -31.
-        31
-        -3.1e10
-        169.0(7)
-        169.1(15)
-    """
-
-    # This function is somewhat optimized so as to help with the
-    # creation of lots of Variable objects (through unumpy.uarray, for
-    # instance).
-
-    # representations is "normalized" so as to be a valid sequence of
-    # 2 arguments for Variable().
-
-    #! Accepting strings and any kind of sequence slows down the code
-    # by about 5 %.  On the other hand, massive initializations of
-    # numbers with uncertainties are likely to be performed with
-    # unumpy.uarray, which does not support parsing from strings and
-    # thus does not have any overhead.
-
-    #! Different, in Python 3:
-    if isinstance(representation, basestring):
-        representation = str_to_number_with_uncert(representation)
-
-    #! The tag is forced to be a string, so that the user does not
-    # create a Variable(2.5, 0.5) in order to represent 2.5 +/- 0.5.
-    # Forcing 'tag' to be a string prevents numerical uncertainties
-    # from being considered as tags, here:
-    if tag is not None:
-        #! 'unicode' is removed in Python3:
-        assert isinstance(tag, (str, unicode)), "The tag can only be a string."
-
-    #! The special ** syntax is for Python 2.5 and before (Python 2.6+
-    # understands tag=tag):
-    return Variable(*representation, **{'tag': tag})
-
+#!! Whenever the documentation below is updated, setup.py should be
+# checked for consistency.
+
+'''
+Calculations with full error propagation for quantities with uncertainties.
+Derivatives can also be calculated.
+
+Web user guide: http://packages.python.org/uncertainties/.
+
+Example of possible calculation: (0.2 +/- 0.01)**2 = 0.04 +/- 0.004.
+
+Correlations between expressions are correctly taken into account (for
+instance, with x = 0.2+/-0.01, 2*x-x-x is exactly zero, as is y-x-x
+with y = 2*x).
+
+Examples:
+
+  import uncertainties
+  from uncertainties import ufloat
+  from uncertainties.umath import *  # sin(), etc.
+
+  # Mathematical operations:
+  x = ufloat((0.20, 0.01))  # x = 0.20+/-0.01
+  x = ufloat("0.20+/-0.01")  # Other representation
+  x = ufloat("0.20(1)")  # Other representation
+  x = ufloat("0.20")  # Implicit uncertainty of +/-1 on the last digit
+  print x**2  # Square: prints "0.04+/-0.004"
+  print sin(x**2)  # Prints "0.0399...+/-0.00399..."
+
+  print x.std_score(0.17)  # Prints "-3.0": deviation of -3 sigmas
+
+  # Access to the nominal value, and to the uncertainty:
+  square = x**2  # Square
+  print square  # Prints "0.04+/-0.004"
+  print square.nominal_value  # Prints "0.04"
+  print square.std_dev()  # Prints "0.004..."
+
+  print square.derivatives[x]  # Partial derivative: 0.4 (= 2*0.20)
+
+  # Correlations:
+  u = ufloat((1, 0.05), "u variable")  # Tag
+  v = ufloat((10, 0.1), "v variable")
+  sum_value = u+v
+
+  u.set_std_dev(0.1)  # Standard deviations can be updated on the fly
+  print sum_value - u - v  # Prints "0.0" (exact result)
+
+  # List of all sources of error:
+  print sum_value  # Prints "11+/-0.1414..."
+  for (var, error) in sum_value.error_components().items():
+      print "%s: %f" % (var.tag, error)  # Individual error components
+
+  # Covariance matrices:
+  cov_matrix = uncertainties.covariance_matrix([u, v, sum_value])
+  print cov_matrix  # 3x3 matrix
+
+  # Correlated variables can be constructed from a covariance matrix, if
+  # NumPy is available:
+  (u2, v2, sum2) = uncertainties.correlated_values([1, 10, 11],
+                                                   cov_matrix)
+  print u2  # Value and uncertainty of u: correctly recovered (1+/-0.1)
+  print uncertainties.covariance_matrix([u2, v2, sum2])  # == cov_matrix
+
+- The main function provided by this module is ufloat, which creates
+numbers with uncertainties (Variable objects).  Variable objects can
+be used as if they were regular Python numbers.  The main attributes
+and methods of Variable objects are defined in the documentation of
+the Variable class.
+
+- Valid operations on numbers with uncertainties include basic
+mathematical functions (addition, etc.).
+
+Most operations from the standard math module (sin, etc.) can be applied
+on numbers with uncertainties by using their generalization from the
+uncertainties.umath module:
+
+  from uncertainties.umath import sin
+  print sin(ufloat("1+/-0.01"))  # 0.841...+/-0.005...
+  print sin(1)  # umath.sin() also works on floats, exactly like math.sin()
+
+Logical operations (>, ==, etc.) are also supported.
+
+Basic operations on NumPy arrays or matrices of numbers with
+uncertainties can be performed:
+
+  2*numpy.array([ufloat((1, 0.01)), ufloat((2, 0.1))])
+
+More complex operations on NumPy arrays can be performed through the
+dedicated uncertainties.unumpy sub-module (see its documentation).
+
+Calculations that are performed through non-Python code (Fortran, C,
+etc.) can handle numbers with uncertainties instead of floats through
+the provided wrap() wrapper:
+
+  import uncertainties
+
+  # wrapped_f is a version of f that can take arguments with
+  # uncertainties, even if f only takes floats:
+  wrapped_f = uncertainties.wrap(f)
+
+If some derivatives of the wrapped function f are known (analytically,
+or numerically), they can be given to wrap()--see the documentation
+for wrap().
+
+- Utility functions are also provided: the covariance matrix between
+random variables can be calculated with covariance_matrix(), or used
+as input for the definition of correlated quantities (correlated_values()
+function--defined only if the NumPy module is available).
+
+- Mathematical expressions involving numbers with uncertainties
+generally return AffineScalarFunc objects, which also print as a value
+with uncertainty.  Their most useful attributes and methods are
+described in the documentation for AffineScalarFunc.  Note that
+Variable objects are also AffineScalarFunc objects.  UFloat is an
+alias for AffineScalarFunc, provided as a convenience: testing whether
+a value carries an uncertainty handled by this module should be done
+with insinstance(my_value, UFloat).
+
+- Mathematically, numbers with uncertainties are, in this package,
+probability distributions.  These probabilities are reduced to two
+numbers: a nominal value and an uncertainty.  Thus, both variables
+(Variable objects) and the result of mathematical operations
+(AffineScalarFunc objects) contain these two values (respectively in
+their nominal_value attribute and through their std_dev() method).
+
+The uncertainty of a number with uncertainty is simply defined in
+this package as the standard deviation of the underlying probability
+distribution.
+
+The numbers with uncertainties manipulated by this package are assumed
+to have a probability distribution mostly contained around their
+nominal value, in an interval of about the size of their standard
+deviation.  This should cover most practical cases.  A good choice of
+nominal value for a number with uncertainty is thus the median of its
+probability distribution, the location of highest probability, or the
+average value.
+
+- When manipulating ensembles of numbers, some of which contain
+uncertainties, it can be useful to access the nominal value and
+uncertainty of all numbers in a uniform manner:
+
+  x = ufloat("3+/-0.1")
+  print nominal_value(x)  # Prints 3
+  print std_dev(x)  # Prints 0.1
+  print nominal_value(3)  # Prints 3: nominal_value works on floats
+  print std_dev(3)  # Prints 0: std_dev works on floats
+
+- Probability distributions (random variables and calculation results)
+are printed as:
+
+  nominal value +/- standard deviation
+
+but this does not imply any property on the nominal value (beyond the
+fact that the nominal value is normally inside the region of high
+probability density), or that the probability distribution of the
+result is symmetrical (this is rarely strictly the case).
+
+- Linear approximations of functions (around the nominal values) are
+used for the calculation of the standard deviation of mathematical
+expressions with this package.
+
+The calculated standard deviations and nominal values are thus
+meaningful approximations as long as the functions involved have
+precise linear expansions in the region where the probability
+distribution of their variables is the largest.  It is therefore
+important that uncertainties be small.  Mathematically, this means
+that the linear term of functions around the nominal values of their
+variables should be much larger than the remaining higher-order terms
+over the region of significant probability.
+
+For instance, sin(0+/-0.01) yields a meaningful standard deviation
+since it is quite linear over 0+/-0.01.  However, cos(0+/-0.01) yields
+an approximate standard deviation of 0 (because the cosine is not well
+approximated by a line around 0), which might not be precise enough
+for all applications.
+
+- Comparison operations (>, ==, etc.) on numbers with uncertainties
+have a pragmatic semantics, in this package: numbers with
+uncertainties can be used wherever Python numbers are used, most of
+the time with a result identical to the one that would be obtained
+with their nominal value only.  However, since the objects defined in
+this module represent probability distributions and not pure numbers,
+comparison operator are interpreted in a specific way.
+
+The result of a comparison operation ("==", ">", etc.) is defined so as
+to be essentially consistent with the requirement that uncertainties
+be small: the value of a comparison operation is True only if the
+operation yields True for all infinitesimal variations of its random
+variables, except, possibly, for an infinitely small number of cases.
+
+Example:
+
+  "x = 3.14; y = 3.14" is such that x == y
+
+but
+
+  x = ufloat((3.14, 0.01))
+  y = ufloat((3.14, 0.01))
+
+is not such that x == y, since x and y are independent random
+variables that almost never give the same value.  However, x == x
+still holds.
+
+The boolean value (bool(x), "if x...") of a number with uncertainty x
+is the result of x != 0.
+
+- The uncertainties package is for Python 2.5 and above.
+
+- This package contains tests.  They can be run either manually or
+automatically with the nose unit testing framework (nosetests).
+
+(c) 2009-2013 by Eric O. LEBIGOT (EOL) <eric.lebigot at normalesup.org>.
+Please send feature requests, bug reports, or feedback to this address.
+
+Please support future development by donating $5 or more through PayPal!
+
+This software is released under a dual license.  (1) The BSD license.
+(2) Any other license, as long as it is obtained from the original
+author.'''
+
+# The idea behind this module is to replace the result of mathematical
+# operations by a local approximation of the defining function.  For
+# example, sin(0.2+/-0.01) becomes the affine function
+# (AffineScalarFunc object) whose nominal value is sin(0.2) and
+# whose variations are given by sin(0.2+delta) = 0.98...*delta.
+# Uncertainties can then be calculated by using this local linear
+# approximation of the original function.
+
+from __future__ import division  # Many analytical derivatives depend on this
+
+import re
+import math
+from math import sqrt, log  # Optimization: no attribute look-up
+import copy
+import warnings
+
+# Numerical version:
+__version_info__ = (1, 9)
+__version__ = '.'.join(map(str, __version_info__))
+
+__author__ = 'Eric O. LEBIGOT (EOL) <eric.lebigot at normalesup.org>'
+
+# Attributes that are always exported (some other attributes are
+# exported only if the NumPy module is available...):
+__all__ = [
+
+    # All sub-modules and packages are not imported by default,
+    # in particular because NumPy might be unavailable.
+    'ufloat',  # Main function: returns a number with uncertainty
+
+    # Uniform access to nominal values and standard deviations:
+    'nominal_value',
+    'std_dev',
+
+    # Utility functions (more are exported if NumPy is present):
+    'covariance_matrix',
+
+    # Class for testing whether an object is a number with
+    # uncertainty.  Not usually created by users (except through the
+    # Variable subclass), but possibly manipulated by external code
+    # ['derivatives()' method, etc.].
+    'UFloat',
+
+    # Wrapper for allowing non-pure-Python function to handle
+    # quantities with uncertainties:
+    'wrap',
+
+    # The documentation for wrap() indicates that numerical
+    # derivatives are calculated through partial_derivative().  The
+    # user might also want to change the size of the numerical
+    # differentiation step.
+    'partial_derivative'
+    ]
+
+###############################################################################
+
+def set_doc(doc_string):
+    """
+    Decorator function that sets the docstring to the given text.
+
+    It is useful for functions whose docstring is calculated
+    (including string substitutions).
+    """
+    def set_doc_string(func):
+        func.__doc__ = doc_string
+        return func
+    return set_doc_string
+
+# Some types known to not depend on Variable objects are put in
+# CONSTANT_TYPES.  The most common types can be put in front, as this
+# may slightly improve the execution speed.
+CONSTANT_TYPES = (float, int, complex) # , long)
+
+###############################################################################
+# Utility for issuing deprecation warnings
+
+def deprecation(message):
+    '''
+    Warns the user with the given message, by issuing a
+    DeprecationWarning.
+    '''
+    warnings.warn(message, DeprecationWarning, stacklevel=2)
+
+
+###############################################################################
+
+## Definitions that depend on the availability of NumPy:
+
+
+try:
+    import numpy
+except ImportError:
+    pass
+else:
+
+    # NumPy numbers do not depend on Variable objects:
+    CONSTANT_TYPES += (numpy.number,)
+
+    # Entering variables as a block of correlated values.  Only available
+    # if NumPy is installed.
+
+    #! It would be possible to dispense with NumPy, but a routine should be
+    # written for obtaining the eigenvectors of a symmetric matrix.  See
+    # for instance Numerical Recipes: (1) reduction to tri-diagonal
+    # [Givens or Householder]; (2) QR / QL decomposition.
+
+    def correlated_values(nom_values, covariance_mat, tags=None):
+        """
+        Returns numbers with uncertainties (AffineScalarFunc objects)
+        that correctly reproduce the given covariance matrix, and have
+        the given (float) values as their nominal value.
+
+        The correlated_values_norm() function returns the same result,
+        but takes a correlation matrix instead of a covariance matrix.
+
+        The list of values and the covariance matrix must have the
+        same length, and the matrix must be a square (symmetric) one.
+
+        The numbers with uncertainties returned depend on newly
+        created, independent variables (Variable objects).
+
+        If 'tags' is not None, it must list the tag of each new
+        independent variable.
+
+        nom_values -- sequence with the nominal (real) values of the
+        numbers with uncertainties to be returned.
+
+        covariance_mat -- full covariance matrix of the returned
+        numbers with uncertainties (not the statistical correlation
+        matrix, i.e., not the normalized covariance matrix). For
+        example, the first element of this matrix is the variance of
+        the first returned number with uncertainty.
+        """
+
+        # If no tags were given, we prepare tags for the newly created
+        # variables:
+        if tags is None:
+            tags = (None,) * len(nom_values)
+
+        # The covariance matrix is diagonalized in order to define
+        # the independent variables that model the given values:
+
+        (variances, transform) = numpy.linalg.eigh(covariance_mat)
+
+        # Numerical errors might make some variances negative: we set
+        # them to zero:
+        variances[variances < 0] = 0.
+
+        # Creation of new, independent variables:
+
+        # We use the fact that the eigenvectors in 'transform' are
+        # special: 'transform' is unitary: its inverse is its transpose:
+
+        variables = tuple(
+            # The variables represent "pure" uncertainties:
+            Variable(0, sqrt(variance), tag)
+            for (variance, tag) in zip(variances, tags))
+
+        # Representation of the initial correlated values:
+        values_funcs = tuple(
+            AffineScalarFunc(value, dict(zip(variables, coords)))
+            for (coords, value) in zip(transform, nom_values))
+
+        return values_funcs
+
+    __all__.append('correlated_values')
+
+    def correlated_values_norm(values_with_std_dev, correlation_mat,
+                               tags=None):
+        '''
+        Returns correlated values like correlated_values(), but takes
+        instead as input:
+
+        - nominal (float) values along with their standard deviation, and
+
+        - a correlation matrix (i.e. a normalized covariance matrix
+          normalized with individual standard deviations).
+
+        values_with_std_dev -- sequence of (nominal value, standard
+        deviation) pairs. The returned, correlated values have these
+        nominal values and standard deviations.
+
+        correlation_mat -- correlation matrix (i.e. the normalized
+        covariance matrix, a matrix with ones on its diagonal).
+        '''
+
+        (nominal_values, std_devs) = numpy.transpose(values_with_std_dev)
+
+        return correlated_values(
+            nominal_values,
+            correlation_mat*std_devs*std_devs[numpy.newaxis].T,
+            tags)
+
+    __all__.append('correlated_values_norm')
+
+###############################################################################
+
+# Mathematical operations with local approximations (affine scalar
+# functions)
+
+class NotUpcast(Exception):
+    'Raised when an object cannot be converted to a number with uncertainty'
+
+def to_affine_scalar(x):
+    """
+    Transforms x into a constant affine scalar function
+    (AffineScalarFunc), unless it is already an AffineScalarFunc (in
+    which case x is returned unchanged).
+
+    Raises an exception unless 'x' belongs to some specific classes of
+    objects that are known not to depend on AffineScalarFunc objects
+    (which then cannot be considered as constants).
+    """
+
+    if isinstance(x, AffineScalarFunc):
+        return x
+
+    #! In Python 2.6+, numbers.Number could be used instead, here:
+    if isinstance(x, CONSTANT_TYPES):
+        # No variable => no derivative to define:
+        return AffineScalarFunc(x, {})
+
+    # Case of lists, etc.
+    raise NotUpcast("%s cannot be converted to a number with"
+                    " uncertainty" % type(x))
+
+def partial_derivative(f, param_num):
+    """
+    Returns a function that numerically calculates the partial
+    derivative of function f with respect to its argument number
+    param_num.
+
+    The step parameter represents the shift of the parameter used in
+    the numerical approximation.
+    """
+
+    def partial_derivative_of_f(*args, **kws):
+        """
+        Partial derivative, calculated with the (-epsilon, +epsilon)
+        method, which is more precise than the (0, +epsilon) method.
+        """
+        # f_nominal_value = f(*args)
+        param_kw = None
+        if '__param__kw__' in kws:
+            param_kw = kws.pop('__param__kw__')
+        shifted_args = list(args)  # Copy, and conversion to a mutable
+        shifted_kws  = {}
+        for k, v in kws.items():
+            shifted_kws[k] = v
+        step = 1.e-8
+        if param_kw in shifted_kws:
+            step = step*abs(shifted_kws[param_kw])
+        elif param_num < len(shifted_args):
+            # The step is relative to the parameter being varied, so that
+            # shsifting it does not suffer from finite precision:
+            step = step*abs(shifted_args[param_num])
+
+        if param_kw in shifted_kws:
+            shifted_kws[param_kw] += step
+        elif param_num < len(shifted_args):
+            shifted_args[param_num] += step
+
+        shifted_f_plus = f(*shifted_args, **shifted_kws)
+
+        if param_kw in shifted_kws:
+            shifted_kws[param_kw] -= 2*step
+        elif param_num < len(shifted_args):
+            shifted_args[param_num] -= 2*step
+        shifted_f_minus = f(*shifted_args, **shifted_kws)
+
+        return (shifted_f_plus - shifted_f_minus)/2/step
+
+    return partial_derivative_of_f
+
+class NumericalDerivatives(object):
+    """
+    Convenient access to the partial derivatives of a function,
+    calculated numerically.
+    """
+    # This is not a list because the number of arguments of the
+    # function is not known in advance, in general.
+
+    def __init__(self, function):
+        """
+        'function' is the function whose derivatives can be computed.
+        """
+        self._function = function
+
+    def __getitem__(self, n):
+        """
+        Returns the n-th numerical derivative of the function.
+        """
+        return partial_derivative(self._function, n)
+
+def wrap(f, derivatives_iter=None):
+    """
+    Wraps a function f into a function that also accepts numbers with
+    uncertainties (UFloat objects) and returns a number with
+    uncertainties.  Doing so may be necessary when function f cannot
+    be expressed analytically (with uncertainties-compatible operators
+    and functions like +, *, umath.sin(), etc.).
+
+    f must return a scalar (not a list, etc.).
+
+    In the wrapped function, the standard Python scalar arguments of f
+    (float, int, etc.) can be replaced by numbers with
+    uncertainties. The result will contain the appropriate
+    uncertainty.
+
+    If no argument to the wrapped function has an uncertainty, f
+    simply returns its usual, scalar result.
+
+    If supplied, derivatives_iter can be an iterable that generally
+    contains functions; each successive function is the partial
+    derivative of f with respect to the corresponding variable (one
+    function for each argument of f, which takes as many arguments as
+    f).  If instead of a function, an element of derivatives_iter
+    contains None, then it is automatically replaced by the relevant
+    numerical derivative; this can be used for non-scalar arguments of
+    f (like string arguments).
+
+    If derivatives_iter is None, or if derivatives_iter contains a
+    fixed (and finite) number of elements, then any missing derivative
+    is calculated numerically.
+
+    An infinite number of derivatives can be specified by having
+    derivatives_iter be an infinite iterator; this can for instance
+    be used for specifying the derivatives of functions with a
+    undefined number of argument (like sum(), whose partial
+    derivatives all return 1).
+
+    Example (for illustration purposes only, as
+    uncertainties.umath.sin() runs faster than the examples that
+    follow): wrap(math.sin) is a sine function that can be applied to
+    numbers with uncertainties.  Its derivative will be calculated
+    numerically.  wrap(math.sin, [None]) would have produced the same
+    result.  wrap(math.sin, [math.cos]) is the same function, but with
+    an analytically defined derivative.
+    """
+
+    if derivatives_iter is None:
+        derivatives_iter = NumericalDerivatives(f)
+    else:
+        # Derivatives that are not defined are calculated numerically,
+        # if there is a finite number of them (the function lambda
+        # *args: fsum(args) has a non-defined number of arguments, as
+        # it just performs a sum):
+        try:  # Is the number of derivatives fixed?
+            len(derivatives_iter)
+        except TypeError:
+            pass
+        else:
+            derivatives_iter = [
+                partial_derivative(f, k) if derivative is None
+                else derivative
+                for (k, derivative) in enumerate(derivatives_iter)]
+
+    #! Setting the doc string after "def f_with...()" does not
+    # seem to work.  We define it explicitly:
+    @set_doc("""\
+    Version of %s(...) that returns an affine approximation
+    (AffineScalarFunc object), if its result depends on variables
+    (Variable objects).  Otherwise, returns a simple constant (when
+    applied to constant arguments).
+
+    Warning: arguments of the function that are not AffineScalarFunc
+    objects must not depend on uncertainties.Variable objects in any
+    way.  Otherwise, the dependence of the result in
+    uncertainties.Variable objects will be incorrect.
+
+    Original documentation:
+    %s""" % (f.__name__, f.__doc__))
+    def f_with_affine_output(*args, **kwargs):
+        # Can this function perform the calculation of an
+        # AffineScalarFunc (or maybe float) result?
+        try:
+            old_funcs = map(to_affine_scalar, args)
+            aff_funcs = [to_affine_scalar(a) for a in args]
+            aff_kws = kwargs
+            aff_varkws = []
+            for key, val in kwargs.items():
+                if isinstance(val, Variable):
+                    aff_kws[key] = to_affine_scalar(val)
+                    aff_varkws.append(key)
+
+        except NotUpcast:
+
+            # This function does not know how to itself perform
+            # calculations with non-float-like arguments (as they
+            # might for instance be objects whose value really changes
+            # if some Variable objects had different values):
+
+            # Is it clear that we can't delegate the calculation?
+
+            if any(isinstance(arg, AffineScalarFunc) for arg in args):
+                # This situation arises for instance when calculating
+                # AffineScalarFunc(...)*numpy.array(...).  In this
+                # case, we must let NumPy handle the multiplication
+                # (which is then performed element by element):
+                return NotImplemented
+            else:
+                # If none of the arguments is an AffineScalarFunc, we
+                # can delegate the calculation to the original
+                # function.  This can be useful when it is called with
+                # only one argument (as in
+                # numpy.log10(numpy.ndarray(...)):
+                return f(*args, **kwargs)
+
+        ########################################
+        # Nominal value of the constructed AffineScalarFunc:
+        args_values = [e.nominal_value for e in aff_funcs]
+        kw_values = {}
+        for key, val in aff_kws.items():
+            kw_values[key] = val
+            if key in aff_varkws:
+                kw_values[key] = val.nominal_value
+        f_nominal_value = f(*args_values, **kw_values)
+
+        ########################################
+
+        # List of involved variables (Variable objects):
+        variables = set()
+        for expr in aff_funcs:
+            variables |= set(expr.derivatives)
+        for vname  in aff_varkws:
+            variables |= set(aff_kws[vname].derivatives)
+        ## It is sometimes useful to only return a regular constant:
+
+        # (1) Optimization / convenience behavior: when 'f' is called
+        # on purely constant values (e.g., sin(2)), there is no need
+        # for returning a more complex AffineScalarFunc object.
+
+        # (2) Functions that do not return a "float-like" value might
+        # not have a relevant representation as an AffineScalarFunc.
+        # This includes boolean functions, since their derivatives are
+        # either 0 or are undefined: they are better represented as
+        # Python constants than as constant AffineScalarFunc functions.
+
+        if not variables or isinstance(f_nominal_value, bool):
+            return f_nominal_value
+
+        # The result of 'f' does depend on 'variables'...
+
+        ########################################
+
+        # Calculation of the derivatives with respect to the arguments
+        # of f (aff_funcs):
+
+        # The chain rule is applied.  This is because, in the case of
+        # numerical derivatives, it allows for a better-controlled
+        # numerical stability than numerically calculating the partial
+        # derivatives through '[f(x + dx, y + dy, ...) -
+        # f(x,y,...)]/da' where dx, dy,... are calculated by varying
+        # 'a'.  In fact, it is numerically better to control how big
+        # (dx, dy,...) are: 'f' is a simple mathematical function and
+        # it is possible to know how precise the df/dx are (which is
+        # not possible with the numerical df/da calculation above).
+
+        # We use numerical derivatives, if we don't already have a
+        # list of derivatives:
+
+        #! Note that this test could be avoided by requiring the
+        # caller to always provide derivatives.  When changing the
+        # functions of the math module, this would force this module
+        # to know about all the math functions.  Another possibility
+        # would be to force derivatives_iter to contain, say, the
+        # first 3 derivatives of f.  But any of these two ideas has a
+        # chance to break, one day... (if new functions are added to
+        # the math module, or if some function has more than 3
+        # arguments).
+
+        derivatives_wrt_args = []
+        for (arg, derivative) in zip(aff_funcs, derivatives_iter):
+            derivatives_wrt_args.append(derivative(*args_values, **aff_kws)
+                                        if arg.derivatives
+                                        else 0)
+
+
+        kws_values = []
+        for vname in aff_varkws:
+            kws_values.append( aff_kws[vname].nominal_value)
+        for (vname, derivative) in zip(aff_varkws, derivatives_iter):
+            derivatives_wrt_args.append(derivative(__param__kw__=vname,
+                                                   **kw_values)
+                                        if aff_kws[vname].derivatives
+                                        else 0)
+
+        ########################################
+        # Calculation of the derivative of f with respect to all the
+        # variables (Variable) involved.
+
+        # Initial value (is updated below):
+        derivatives_wrt_vars = dict((var, 0.) for var in variables)
+
+        # The chain rule is used (we already have
+        # derivatives_wrt_args):
+
+        for (func, f_derivative) in zip(aff_funcs, derivatives_wrt_args):
+            for (var, func_derivative) in func.derivatives.items():
+                derivatives_wrt_vars[var] += f_derivative * func_derivative
+
+        for (vname, f_derivative) in zip(aff_varkws, derivatives_wrt_args):
+            func = aff_kws[vname]
+            for (var, func_derivative) in func.derivatives.items():
+                derivatives_wrt_vars[var] += f_derivative * func_derivative
+
+        # The function now returns an AffineScalarFunc object:
+        return AffineScalarFunc(f_nominal_value, derivatives_wrt_vars)
+
+    # It is easier to work with f_with_affine_output, which represents
+    # a wrapped version of 'f', when it bears the same name as 'f':
+    f_with_affine_output.__name__ = f.__name__
+
+    return f_with_affine_output
+
+def _force_aff_func_args(func):
+    """
+    Takes an operator op(x, y) and wraps it.
+
+    The constructed operator returns func(x, to_affine_scalar(y)) if y
+    can be upcast with to_affine_scalar(); otherwise, it returns
+    NotImplemented.
+
+    Thus, func() is only called on two AffineScalarFunc objects, if
+    its first argument is an AffineScalarFunc.
+    """
+
+    def op_on_upcast_args(x, y):
+        """
+        Returns %s(self, to_affine_scalar(y)) if y can be upcast
+        through to_affine_scalar.  Otherwise returns NotImplemented.
+        """ % func.__name__
+
+        try:
+            y_with_uncert = to_affine_scalar(y)
+        except NotUpcast:
+            # This module does not know how to handle the comparison:
+            # (example: y is a NumPy array, in which case the NumPy
+            # array will decide that func() should be applied
+            # element-wise between x and all the elements of y):
+            return NotImplemented
+        else:
+            return func(x, y_with_uncert)
+
+    return op_on_upcast_args
+
+########################################
+
+# Definition of boolean operators, that assume that self and
+# y_with_uncert are AffineScalarFunc.
+
+# The fact that uncertainties must be smalled is used, here: the
+# comparison functions are supposed to be constant for most values of
+# the random variables.
+
+# Even though uncertainties are supposed to be small, comparisons
+# between 3+/-0.1 and 3.0 are handled (even though x == 3.0 is not a
+# constant function in the 3+/-0.1 interval).  The comparison between
+# x and x is handled too, when x has an uncertainty.  In fact, as
+# explained in the main documentation, it is possible to give a useful
+# meaning to the comparison operators, in these cases.
+
+def _eq_on_aff_funcs(self, y_with_uncert):
+    """
+    __eq__ operator, assuming that both self and y_with_uncert are
+    AffineScalarFunc objects.
+    """
+    difference = self - y_with_uncert
+    # Only an exact zero difference means that self and y are
+    # equal numerically:
+    return not(difference._nominal_value or difference.std_dev())
+
+def _ne_on_aff_funcs(self, y_with_uncert):
+    """
+    __ne__ operator, assuming that both self and y_with_uncert are
+    AffineScalarFunc objects.
+    """
+
+    return not _eq_on_aff_funcs(self, y_with_uncert)
+
+def _gt_on_aff_funcs(self, y_with_uncert):
+    """
+    __gt__ operator, assuming that both self and y_with_uncert are
+    AffineScalarFunc objects.
+    """
+    return self._nominal_value > y_with_uncert._nominal_value
+
+def _ge_on_aff_funcs(self, y_with_uncert):
+    """
+    __ge__ operator, assuming that both self and y_with_uncert are
+    AffineScalarFunc objects.
+    """
+
+    return (_gt_on_aff_funcs(self, y_with_uncert)
+            or _eq_on_aff_funcs(self, y_with_uncert))
+
+def _lt_on_aff_funcs(self, y_with_uncert):
+    """
+    __lt__ operator, assuming that both self and y_with_uncert are
+    AffineScalarFunc objects.
+    """
+    return self._nominal_value < y_with_uncert._nominal_value
+
+def _le_on_aff_funcs(self, y_with_uncert):
+    """
+    __le__ operator, assuming that both self and y_with_uncert are
+    AffineScalarFunc objects.
+    """
+
+    return (_lt_on_aff_funcs(self, y_with_uncert)
+            or _eq_on_aff_funcs(self, y_with_uncert))
+
+########################################
+
+class AffineScalarFunc(object):
+    """
+    Affine functions that support basic mathematical operations
+    (addition, etc.).  Such functions can for instance be used for
+    representing the local (linear) behavior of any function.
+
+    This class is mostly meant to be used internally.
+
+    This class can also be used to represent constants.
+
+    The variables of affine scalar functions are Variable objects.
+
+    AffineScalarFunc objects include facilities for calculating the
+    'error' on the function, from the uncertainties on its variables.
+
+    Main attributes and methods:
+
+    - nominal_value, std_dev(): value at the origin / nominal value,
+      and standard deviation.
+
+    - error_components(): error_components()[x] is the error due to
+      Variable x.
+
+    - derivatives: derivatives[x] is the (value of the) derivative
+      with respect to Variable x.  This attribute is a dictionary
+      whose keys are the Variable objects on which the function
+      depends.
+
+      All the Variable objects on which the function depends are in
+      'derivatives'.
+
+    - std_score(x): position of number x with respect to the
+      nominal value, in units of the standard deviation.
+    """
+
+    # To save memory in large arrays:
+    __slots__ = ('_nominal_value', 'derivatives')
+
+    #! The code could be modify in order to accommodate for non-float
+    # nominal values.  This could for instance be done through
+    # the operator module: instead of delegating operations to
+    # float.__*__ operations, they could be delegated to
+    # operator.__*__ functions (while taking care of properly handling
+    # reverse operations: __radd__, etc.).
+
+    def __init__(self, nominal_value, derivatives):
+        """
+        nominal_value -- value of the function at the origin.
+        nominal_value must not depend in any way of the Variable
+        objects in 'derivatives' (the value at the origin of the
+        function being defined is a constant).
+
+        derivatives -- maps each Variable object on which the function
+        being defined depends to the value of the derivative with
+        respect to that variable, taken at the nominal value of all
+        variables.
+
+        Warning: the above constraint is not checked, and the user is
+        responsible for complying with it.
+        """
+
+        # Defines the value at the origin:
+
+        # Only float-like values are handled.  One reason is that it
+        # does not make sense for a scalar function to be affine to
+        # not yield float values.  Another reason is that it would not
+        # make sense to have a complex nominal value, here (it would
+        # not be handled correctly at all): converting to float should
+        # be possible.
+        self._nominal_value = float(nominal_value)
+        self.derivatives = derivatives
+
+    # The following prevents the 'nominal_value' attribute from being
+    # modified by the user:
+    @property
+    def nominal_value(self):
+        "Nominal value of the random number."
+        return self._nominal_value
+
+    ############################################################
+
+
+    ### Operators: operators applied to AffineScalarFunc and/or
+    ### float-like objects only are supported.  This is why methods
+    ### from float are used for implementing these operators.
+
+    # Operators with no reflection:
+
+    ########################################
+
+    # __nonzero__() is supposed to return a boolean value (it is used
+    # by bool()).  It is for instance used for converting the result
+    # of comparison operators to a boolean, in sorted().  If we want
+    # to be able to sort AffineScalarFunc objects, __nonzero__ cannot
+    # return a AffineScalarFunc object.  Since boolean results (such
+    # as the result of bool()) don't have a very meaningful
+    # uncertainty unless it is zero, this behavior is fine.
+
+    def __nonzero__(self):
+        """
+        Equivalent to self != 0.
+        """
+        #! This might not be relevant for AffineScalarFunc objects
+        # that contain values in a linear space which does not convert
+        # the float 0 into the null vector (see the __eq__ function:
+        # __nonzero__ works fine if subtracting the 0 float from a
+        # vector of the linear space works as if 0 were the null
+        # vector of that space):
+        return self != 0.  # Uses the AffineScalarFunc.__ne__ function
+
+    ########################################
+
+    ## Logical operators: warning: the resulting value cannot always
+    ## be differentiated.
+
+    # The boolean operations are not differentiable everywhere, but
+    # almost...
+
+    # (1) I can rely on the assumption that the user only has "small"
+    # errors on variables, as this is used in the calculation of the
+    # standard deviation (which performs linear approximations):
+
+    # (2) However, this assumption is not relevant for some
+    # operations, and does not have to hold, in some cases.  This
+    # comes from the fact that logical operations (e.g. __eq__(x,y))
+    # are not differentiable for many usual cases.  For instance, it
+    # is desirable to have x == x for x = n+/-e, whatever the size of e.
+    # Furthermore, n+/-e != n+/-e', if e != e', whatever the size of e or
+    # e'.
+
+    # (3) The result of logical operators does not have to be a
+    # function with derivatives, as these derivatives are either 0 or
+    # don't exist (i.e., the user should probably not rely on
+    # derivatives for his code).
+
+    # __eq__ is used in "if data in [None, ()]", for instance.  It is
+    # therefore important to be able to handle this case too, which is
+    # taken care of when _force_aff_func_args(_eq_on_aff_funcs)
+    # returns NotImplemented.
+    __eq__ = _force_aff_func_args(_eq_on_aff_funcs)
+
+    __ne__ = _force_aff_func_args(_ne_on_aff_funcs)
+    __gt__ = _force_aff_func_args(_gt_on_aff_funcs)
+
+    # __ge__ is not the opposite of __lt__ because these operators do
+    # not always yield a boolean (for instance, 0 <= numpy.arange(10)
+    # yields an array).
+    __ge__ = _force_aff_func_args(_ge_on_aff_funcs)
+
+    __lt__ = _force_aff_func_args(_lt_on_aff_funcs)
+    __le__ = _force_aff_func_args(_le_on_aff_funcs)
+
+    ########################################
+
+    # Uncertainties handling:
+
+    def error_components(self):
+        """
+        Individual components of the standard deviation of the affine
+        function (in absolute value), returned as a dictionary with
+        Variable objects as keys.
+
+        This method assumes that the derivatives contained in the
+        object take scalar values (and are not a tuple, like what
+        math.frexp() returns, for instance).
+        """
+
+        # Calculation of the variance:
+        error_components = {}
+        for (variable, derivative) in self.derivatives.items():
+            # Individual standard error due to variable:
+            error_components[variable] = abs(derivative*variable._std_dev)
+
+        return error_components
+
+    def std_dev(self):
+        """
+        Standard deviation of the affine function.
+
+        This method assumes that the function returns scalar results.
+
+        This returned standard deviation depends on the current
+        standard deviations [std_dev()] of the variables (Variable
+        objects) involved.
+        """
+        #! It would be possible to not allow the user to update the
+        #std dev of Variable objects, in which case AffineScalarFunc
+        #objects could have a pre-calculated or, better, cached
+        #std_dev value (in fact, many intermediate AffineScalarFunc do
+        #not need to have their std_dev calculated: only the final
+        #AffineScalarFunc returned to the user does).
+        return sqrt(sum(
+            delta**2 for delta in self.error_components().values()))
+
+    def _general_representation(self, to_string):
+        """
+        Uses the to_string() conversion function on both the nominal
+        value and the standard deviation, and returns a string that
+        describes them.
+
+        to_string() is typically repr() or str().
+        """
+
+        (nominal_value, std_dev) = (self._nominal_value, self.std_dev())
+
+        # String representation:
+
+        # Not putting spaces around "+/-" helps with arrays of
+        # Variable, as each value with an uncertainty is a
+        # block of signs (otherwise, the standard deviation can be
+        # mistaken for another element of the array).
+
+        return ("%s+/-%s" % (to_string(nominal_value), to_string(std_dev))
+                if std_dev
+                else to_string(nominal_value))
+
+    def __repr__(self):
+        return self._general_representation(repr)
+
+    def __str__(self):
+        return self._general_representation(str)
+
+    def std_score(self, value):
+        """
+        Returns 'value' - nominal value, in units of the standard
+        deviation.
+
+        Raises a ValueError exception if the standard deviation is zero.
+        """
+        try:
+            # The ._nominal_value is a float: there is no integer division,
+            # here:
+            return (value - self._nominal_value) / self.std_dev()
+        except ZeroDivisionError:
+            raise ValueError("The standard deviation is zero:"
+                             " undefined result.")
+
+    def __deepcopy__(self, memo):
+        """
+        Hook for the standard copy module.
+
+        The returned AffineScalarFunc is a completely fresh copy,
+        which is fully independent of any variable defined so far.
+        New variables are specially created for the returned
+        AffineScalarFunc object.
+        """
+        return AffineScalarFunc(
+            self._nominal_value,
+            dict((copy.deepcopy(var), deriv)
+                 for (var, deriv) in self.derivatives.items()))
+
+    def __getstate__(self):
+        """
+        Hook for the pickle module.
+        """
+        obj_slot_values = dict((k, getattr(self, k)) for k in
+                               # self.__slots__ would not work when
+                               # self is an instance of a subclass:
+                               AffineScalarFunc.__slots__)
+        return obj_slot_values
+
+    def __setstate__(self, data_dict):
+        """
+        Hook for the pickle module.
+        """
+        for (name, value) in data_dict.items():
+            setattr(self, name, value)
+
+# Nicer name, for users: isinstance(ufloat(...), UFloat) is True:
+UFloat = AffineScalarFunc
+
+def get_ops_with_reflection():
+
+    """
+    Returns operators with a reflection, along with their derivatives
+    (for float operands).
+    """
+
+    # Operators with a reflection:
+
+    # We do not include divmod().  This operator could be included, by
+    # allowing its result (a tuple) to be differentiated, in
+    # derivative_value().  However, a similar result can be achieved
+    # by the user by calculating separately the division and the
+    # result.
+
+    # {operator(x, y): (derivative wrt x, derivative wrt y)}:
+
+    # Note that unknown partial derivatives can be numerically
+    # calculated by expressing them as something like
+    # "partial_derivative(float.__...__, 1)(x, y)":
+
+    # String expressions are used, so that reversed operators are easy
+    # to code, and execute relatively efficiently:
+
+    derivatives_list = {
+        'add': ("1.", "1."),
+        # 'div' is the '/' operator when __future__.division is not in
+        # effect.  Since '/' is applied to
+        # AffineScalarFunc._nominal_value numbers, it is applied on
+        # floats, and is therefore the "usual" mathematical division.
+        'div': ("1/y", "-x/y**2"),
+        'floordiv': ("0.", "0."),  # Non exact: there is a discontinuities
+        # The derivative wrt the 2nd arguments is something like (..., x//y),
+        # but it is calculated numerically, for convenience:
+        'mod': ("1.", "partial_derivative(float.__mod__, 1)(x, y)"),
+        'mul': ("y", "x"),
+        'pow': ("y*x**(y-1)", "log(x)*x**y"),
+        'sub': ("1.", "-1."),
+        'truediv': ("1/y", "-x/y**2")
+        }
+
+    # Conversion to Python functions:
+    ops_with_reflection = {}
+    for (op, derivatives) in derivatives_list.items():
+        ops_with_reflection[op] = [
+            eval("lambda x, y: %s" % expr) for expr in derivatives ]
+
+        ops_with_reflection["r"+op] = [
+            eval("lambda y, x: %s" % expr) for expr in reversed(derivatives)]
+
+    return ops_with_reflection
+
+# Operators that have a reflection, along with their derivatives:
+_ops_with_reflection = get_ops_with_reflection()
+
+# Some effectively modified operators (for the automated tests):
+_modified_operators = []
+_modified_ops_with_reflection = []
+
+def add_operators_to_AffineScalarFunc():
+    """
+    Adds many operators (__add__, etc.) to the AffineScalarFunc class.
+    """
+
+    ########################################
+
+    #! Derivatives are set to return floats.  For one thing,
+    # uncertainties generally involve floats, as they are based on
+    # small variations of the parameters.  It is also better to
+    # protect the user from unexpected integer result that behave
+    # badly with the division.
+
+    ## Operators that return a numerical value:
+
+    # Single-argument operators that should be adapted from floats to
+    # AffineScalarFunc objects, associated to their derivative:
+    simple_numerical_operators_derivatives = {
+        'abs': lambda x: 1. if x>=0 else -1.,
+        'neg': lambda x: -1.,
+        'pos': lambda x: 1.,
+        'trunc': lambda x: 0.
+        }
+
+    for (op, derivative) in (
+          simple_numerical_operators_derivatives.items()):
+
+        attribute_name = "__%s__" % op
+        # float objects don't exactly have the same attributes between
+        # different versions of Python (for instance, __trunc__ was
+        # introduced with Python 2.6):
+        try:
+            setattr(AffineScalarFunc, attribute_name,
+                    wrap(getattr(float, attribute_name),
+                                 [derivative]))
+        except AttributeError:
+            pass
+        else:
+            _modified_operators.append(op)
+
+    ########################################
+
+    # Reversed versions (useful for float*AffineScalarFunc, for instance):
+    for (op, derivatives) in _ops_with_reflection.items():
+        attribute_name = '__%s__' % op
+        # float objects don't exactly have the same attributes between
+        # different versions of Python (for instance, __div__ and
+        # __rdiv__ were removed, in Python 3):
+        try:
+            setattr(AffineScalarFunc, attribute_name,
+                    wrap(getattr(float, attribute_name), derivatives))
+        except AttributeError:
+            pass
+        else:
+            _modified_ops_with_reflection.append(op)
+
+    ########################################
+    # Conversions to pure numbers are meaningless.  Note that the
+    # behavior of float(1j) is similar.
+    for coercion_type in ('complex', 'int', 'long', 'float'):
+        def raise_error(self):
+            raise TypeError("can't convert an affine function (%s)"
+                            ' to %s; use x.nominal_value'
+                            # In case AffineScalarFunc is sub-classed:
+                            % (self.__class__, coercion_type))
+
+        setattr(AffineScalarFunc, '__%s__' % coercion_type, raise_error)
+
+add_operators_to_AffineScalarFunc()  # Actual addition of class attributes
+
+class Variable(AffineScalarFunc):
+    """
+    Representation of a float-like scalar random variable, along with
+    its uncertainty.
+
+    Objects are meant to represent variables that are independent from
+    each other (correlations are handled through the AffineScalarFunc
+    class).
+    """
+
+    # To save memory in large arrays:
+    __slots__ = ('_std_dev', 'tag')
+
+    def __init__(self, value, std_dev, tag=None):
+        """
+        The nominal value and the standard deviation of the variable
+        are set.  These values must be scalars.
+
+        'tag' is a tag that the user can associate to the variable.  This
+        is useful for tracing variables.
+
+        The meaning of the nominal value is described in the main
+        module documentation.
+        """
+
+        #! The value, std_dev, and tag are assumed by __copy__() not to
+        # be copied.  Either this should be guaranteed here, or __copy__
+        # should be updated.
+
+        # Only float-like values are handled.  One reason is that the
+        # division operator on integers would not produce a
+        # differentiable functions: for instance, Variable(3, 0.1)/2
+        # has a nominal value of 3/2 = 1, but a "shifted" value
+        # of 3.1/2 = 1.55.
+        value = float(value)
+
+        # If the variable changes by dx, then the value of the affine
+        # function that gives its value changes by 1*dx:
+
+        # ! Memory cycles are created.  However, they are garbage
+        # collected, if possible.  Using a weakref.WeakKeyDictionary
+        # takes much more memory.  Thus, this implementation chooses
+        # more cycles and a smaller memory footprint instead of no
+        # cycles and a larger memory footprint.
+
+        # ! Using AffineScalarFunc instead of super() results only in
+        # a 3 % speed loss (Python 2.6, Mac OS X):
+        super(Variable, self).__init__(value, {self: 1.})
+
+        # We force the error to be float-like.  Since it is considered
+        # as a Gaussian standard deviation, it is semantically
+        # positive (even though there would be no problem defining it
+        # as a sigma, where sigma can be negative and still define a
+        # Gaussian):
+
+        assert std_dev >= 0, "the error must be a positive number"
+        # Since AffineScalarFunc.std_dev is a property, we cannot do
+        # "self.std_dev = ...":
+        self._std_dev = std_dev
+
+        self.tag = tag
+
+    # Standard deviations can be modified (this is a feature).
+    # AffineScalarFunc objects that depend on the Variable have their
+    # std_dev() automatically modified (recalculated with the new
+    # std_dev of their Variables):
+    def set_std_dev(self, value):
+        """
+        Updates the standard deviation of the variable to a new value.
+        """
+
+        # A zero variance is accepted.  Thus, it is possible to
+        # conveniently use infinitely precise variables, for instance
+        # to study special cases.
+
+        self._std_dev = value
+
+    # The following method is overridden so that we can represent the tag:
+    def _general_representation(self, to_string):
+        """
+        Uses the to_string() conversion function on both the nominal
+        value and standard deviation and returns a string that
+        describes the number.
+
+        to_string() is typically repr() or str().
+        """
+        num_repr  = super(Variable, self)._general_representation(to_string)
+
+        # Optional tag: only full representations (to_string == repr)
+        # contain the tag, as the tag is required in order to recreate
+        # the variable.  Outputting the tag for regular string ("print
+        # x") would be too heavy and produce an unusual representation
+        # of a number with uncertainty.
+        return (num_repr if ((self.tag is None) or (to_string != repr))
+                else "< %s = %s >" % (self.tag, num_repr))
+
+    def __hash__(self):
+        # All Variable objects are by definition independent
+        # variables, so they never compare equal; therefore, their
+        # id() are therefore allowed to differ
+        # (http://docs.python.org/reference/datamodel.html#object.__hash__):
+        return id(self)
+
+    def __copy__(self):
+        """
+        Hook for the standard copy module.
+        """
+
+        # This copy implicitly takes care of the reference of the
+        # variable to itself (in self.derivatives): the new Variable
+        # object points to itself, not to the original Variable.
+
+        # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html
+
+        #! The following assumes that the arguments to Variable are
+        # *not* copied upon construction, since __copy__ is not supposed
+        # to copy "inside" information:
+        return Variable(self.nominal_value, self.std_dev(), self.tag)
+
+    def __deepcopy__(self, memo):
+        """
+        Hook for the standard copy module.
+
+        A new variable is created.
+        """
+
+        # This deep copy implicitly takes care of the reference of the
+        # variable to itself (in self.derivatives): the new Variable
+        # object points to itself, not to the original Variable.
+
+        # Reference: http://www.doughellmann.com/PyMOTW/copy/index.html
+
+        return self.__copy__()
+
+    def __getstate__(self):
+        """
+        Hook for the standard pickle module.
+        """
+        obj_slot_values = dict((k, getattr(self, k)) for k in self.__slots__)
+        obj_slot_values.update(AffineScalarFunc.__getstate__(self))
+        # Conversion to a usual dictionary:
+        return obj_slot_values
+
+    def __setstate__(self, data_dict):
+        """
+        Hook for the standard pickle module.
+        """
+        for (name, value) in data_dict.items():
+            setattr(self, name, value)
+
+###############################################################################
+
+# Utilities
+
+def nominal_value(x):
+    """
+    Returns the nominal value of x if it is a quantity with
+    uncertainty (i.e., an AffineScalarFunc object); otherwise, returns
+    x unchanged.
+
+    This utility function is useful for transforming a series of
+    numbers, when only some of them generally carry an uncertainty.
+    """
+
+    return x.nominal_value if isinstance(x, AffineScalarFunc) else x
+
+def std_dev(x):
+    """
+    Returns the standard deviation of x if it is a quantity with
+    uncertainty (i.e., an AffineScalarFunc object); otherwise, returns
+    the float 0.
+
+    This utility function is useful for transforming a series of
+    numbers, when only some of them generally carry an uncertainty.
+    """
+
+    return x.std_dev() if isinstance(x, AffineScalarFunc) else 0.
+
+def covariance_matrix(nums_with_uncert):
+    """
+    Returns a matrix that contains the covariances between the given
+    sequence of numbers with uncertainties (AffineScalarFunc objects).
+    The resulting matrix implicitly depends on their ordering in
+    'nums_with_uncert'.
+
+    The covariances are floats (never int objects).
+
+    The returned covariance matrix is the exact linear approximation
+    result, if the nominal values of the numbers with uncertainties
+    and of their variables are their mean.  Otherwise, the returned
+    covariance matrix should be close to its linear approximation
+    value.
+
+    The returned matrix is a list of lists.
+    """
+    # See PSI.411 in EOL's notes.
+
+    covariance_matrix = []
+    for (i1, expr1) in enumerate(nums_with_uncert):
+        derivatives1 = expr1.derivatives  # Optimization
+        vars1 = set(derivatives1)
+        coefs_expr1 = []
+        for (i2, expr2) in enumerate(nums_with_uncert[:i1+1]):
+            derivatives2 = expr2.derivatives  # Optimization
+            coef = 0.
+            for var in vars1.intersection(derivatives2):
+                # var is a variable common to both numbers with
+                # uncertainties:
+                coef += (derivatives1[var]*derivatives2[var]*var._std_dev**2)
+            coefs_expr1.append(coef)
+        covariance_matrix.append(coefs_expr1)
+
+    # We symmetrize the matrix:
+    for (i, covariance_coefs) in enumerate(covariance_matrix):
+        covariance_coefs.extend(covariance_matrix[j][i]
+                                for j in range(i+1, len(covariance_matrix)))
+
+    return covariance_matrix
+
+try:
+    import numpy
+except ImportError:
+    pass
+else:
+    def correlation_matrix(nums_with_uncert):
+        '''
+        Returns the correlation matrix of the given sequence of
+        numbers with uncertainties, as a NumPy array of floats.
+        '''
+
+        cov_mat = numpy.array(covariance_matrix(nums_with_uncert))
+
+        std_devs = numpy.sqrt(cov_mat.diagonal())
+
+        return cov_mat/std_devs/std_devs[numpy.newaxis].T
+
+    __all__.append('correlation_matrix')
+
+###############################################################################
+# Parsing of values with uncertainties:
+
+POSITIVE_DECIMAL_UNSIGNED = r'(\d+)(\.\d*)?'
+
+# Regexp for a number with uncertainty (e.g., "-1.234(2)e-6"), where the
+# uncertainty is optional (in which case the uncertainty is implicit):
+NUMBER_WITH_UNCERT_RE_STR = '''
+    ([+-])?  # Sign
+    %s  # Main number
+    (?:\(%s\))?  # Optional uncertainty
+    ([eE][+-]?\d+)?  # Optional exponent
+    ''' % (POSITIVE_DECIMAL_UNSIGNED, POSITIVE_DECIMAL_UNSIGNED)
+
+NUMBER_WITH_UNCERT_RE = re.compile(
+    "^%s$" % NUMBER_WITH_UNCERT_RE_STR, re.VERBOSE)
+
+def parse_error_in_parentheses(representation):
+    """
+    Returns (value, error) from a string representing a number with
+    uncertainty like 12.34(5), 12.34(142), 12.5(3.4) or 12.3(4.2)e3.
+    If no parenthesis is given, an uncertainty of one on the last
+    digit is assumed.
+
+    Raises ValueError if the string cannot be parsed.
+    """
+
+    match = NUMBER_WITH_UNCERT_RE.search(representation)
+
+    if match:
+        # The 'main' part is the nominal value, with 'int'eger part, and
+        # 'dec'imal part.  The 'uncert'ainty is similarly broken into its
+        # integer and decimal parts.
+        (sign, main_int, main_dec, uncert_int, uncert_dec,
+         exponent) = match.groups()
+    else:
+        raise ValueError("Unparsable number representation: '%s'."
+                         " Was expecting a string of the form 1.23(4)"
+                         " or 1.234" % representation)
+
+    # The value of the number is its nominal value:
+    value = float(''.join((sign or '',
+                           main_int,
+                           main_dec or '.0',
+                           exponent or '')))
+
+    if uncert_int is None:
+        # No uncertainty was found: an uncertainty of 1 on the last
+        # digit is assumed:
+        uncert_int = '1'
+
+    # Do we have a fully explicit uncertainty?
+    if uncert_dec is not None:
+        uncert = float("%s%s" % (uncert_int, uncert_dec or ''))
+    else:
+        # uncert_int represents an uncertainty on the last digits:
+
+        # The number of digits after the period defines the power of
+        # 10 than must be applied to the provided uncertainty:
+        num_digits_after_period = (0 if main_dec is None
+                                   else len(main_dec)-1)
+        uncert = int(uncert_int)/10**num_digits_after_period
+
+    # We apply the exponent to the uncertainty as well:
+    uncert *= float("1%s" % (exponent or ''))
+
+    return (value, uncert)
+
+
+# The following function is not exposed because it can in effect be
+# obtained by doing x = ufloat(representation) and
+# x.nominal_value and x.std_dev():
+def str_to_number_with_uncert(representation):
+    """
+    Given a string that represents a number with uncertainty, returns the
+    nominal value and the uncertainty.
+
+    The string can be of the form:
+    - 124.5+/-0.15
+    - 124.50(15)
+    - 124.50(123)
+    - 124.5
+
+    When no numerical error is given, an uncertainty of 1 on the last
+    digit is implied.
+
+    Raises ValueError if the string cannot be parsed.
+    """
+
+    try:
+        # Simple form 1234.45+/-1.2:
+        (value, uncert) = representation.split('+/-')
+    except ValueError:
+        # Form with parentheses or no uncertainty:
+        parsed_value = parse_error_in_parentheses(representation)
+    else:
+        try:
+            parsed_value = (float(value), float(uncert))
+        except ValueError:
+            raise ValueError('Cannot parse %s: was expecting a number'
+                             ' like 1.23+/-0.1' % representation)
+
+    return parsed_value
+
+def ufloat(representation, tag=None):
+    """
+    Returns a random variable (Variable object).
+
+    Converts the representation of a number into a number with
+    uncertainty (a random variable, defined by a nominal value and
+    a standard deviation).
+
+    The representation can be a (value, standard deviation) sequence,
+    or a string.
+
+    Strings of the form '12.345+/-0.015', '12.345(15)', or '12.3' are
+    recognized (see full list below).  In the last case, an
+    uncertainty of +/-1 is assigned to the last digit.
+
+    'tag' is an optional string tag for the variable.  Variables
+    don't have to have distinct tags.  Tags are useful for tracing
+    what values (and errors) enter in a given result (through the
+    error_components() method).
+
+    Examples of valid string representations:
+
+        -1.23(3.4)
+        -1.34(5)
+        1(6)
+        3(4.2)
+        -9(2)
+        1234567(1.2)
+        12.345(15)
+        -12.3456(78)e-6
+        12.3(0.4)e-5
+        0.29
+        31.
+        -31.
+        31
+        -3.1e10
+        169.0(7)
+        169.1(15)
+    """
+
+    # This function is somewhat optimized so as to help with the
+    # creation of lots of Variable objects (through unumpy.uarray, for
+    # instance).
+
+    # representations is "normalized" so as to be a valid sequence of
+    # 2 arguments for Variable().
+
+    #! Accepting strings and any kind of sequence slows down the code
+    # by about 5 %.  On the other hand, massive initializations of
+    # numbers with uncertainties are likely to be performed with
+    # unumpy.uarray, which does not support parsing from strings and
+    # thus does not have any overhead.
+
+    #! Different, in Python 3:
+    if isinstance(representation, basestring):
+        representation = str_to_number_with_uncert(representation)
+
+    #! The tag is forced to be a string, so that the user does not
+    # create a Variable(2.5, 0.5) in order to represent 2.5 +/- 0.5.
+    # Forcing 'tag' to be a string prevents numerical uncertainties
+    # from being considered as tags, here:
+    if tag is not None:
+        #! 'unicode' is removed in Python3:
+        assert isinstance(tag, (str, unicode)), "The tag can only be a string."
+
+    #! The special ** syntax is for Python 2.5 and before (Python 2.6+
+    # understands tag=tag):
+    return Variable(*representation, **{'tag': tag})
+
diff --git a/lmfit/uncertainties/umath.py b/lmfit/uncertainties/umath.py
index 5487b63..e0608c8 100644
--- a/lmfit/uncertainties/umath.py
+++ b/lmfit/uncertainties/umath.py
@@ -1,350 +1,350 @@
-'''
-Mathematical operations that generalize many operations from the
-standard math module so that they also work on numbers with
-uncertainties.
-
-Examples:
-
-  from umath import sin
-
-  # Manipulation of numbers with uncertainties:
-  x = uncertainties.ufloat((3, 0.1))
-  print sin(x)  # prints 0.141120008...+/-0.098999...
-
-  # The umath functions also work on regular Python floats:
-  print sin(3)  # prints 0.141120008...  This is a Python float.
-
-Importing all the functions from this module into the global namespace
-is possible.  This is encouraged when using a Python shell as a
-calculator.  Example:
-
-  import uncertainties
-  from uncertainties.umath import *  # Imports tan(), etc.
-
-  x = uncertainties.ufloat((3, 0.1))
-  print tan(x)  # tan() is the uncertainties.umath.tan function
-
-The numbers with uncertainties handled by this module are objects from
-the uncertainties module, from either the Variable or the
-AffineScalarFunc class.
-
-(c) 2009-2013 by Eric O. LEBIGOT (EOL) <eric.lebigot at normalesup.org>.
-Please send feature requests, bug reports, or feedback to this address.
-
-This software is released under a dual license.  (1) The BSD license.
-(2) Any other license, as long as it is obtained from the original
-author.'''
-
-from __future__ import division  # Many analytical derivatives depend on this
-
-# Standard modules
-import math
-import sys
-import itertools
-import functools
-
-# Local modules
-from __init__ import wrap, set_doc, __author__, to_affine_scalar, AffineScalarFunc
-
-###############################################################################
-
-# We wrap the functions from the math module so that they keep track of
-# uncertainties by returning a AffineScalarFunc object.
-
-# Some functions from the math module cannot be adapted in a standard
-# way so to work with AffineScalarFunc objects (either as their result
-# or as their arguments):
-
-# (1) Some functions return a result of a type whose value and
-# variations (uncertainties) cannot be represented by AffineScalarFunc
-# (e.g., math.frexp, which returns a tuple).  The exception raised
-# when not wrapping them with wrap() is more obvious than the
-# one obtained when wrapping them (in fact, the wrapped functions
-# attempts operations that are not supported, such as calculation a
-# subtraction on a result of type tuple).
-
-# (2) Some functions don't take continuous scalar arguments (which can
-# be varied during differentiation): math.fsum, math.factorial...
-# Such functions can either be:
-
-# - wrapped in a special way.
-
-# - excluded from standard wrapping by adding their name to
-# no_std_wrapping
-
-# Math functions that have a standard interface: they take
-# one or more float arguments, and return a scalar:
-many_scalars_to_scalar_funcs = []
-
-# Some functions require a specific treatment and must therefore be
-# excluded from standard wrapping.  Functions
-# no_std_wrapping = ['modf', 'frexp', 'ldexp', 'fsum', 'factorial']
-
-# Functions with numerical derivatives:
-num_deriv_funcs = ['fmod', 'gamma', 'isinf', 'isnan',
-                   'lgamma', 'trunc']
-
-# Functions that do not belong in many_scalars_to_scalar_funcs, but
-# that have a version that handles uncertainties:
-non_std_wrapped_funcs = []
-
-# Function that copies the relevant attributes from generalized
-# functions from the math module:
-wraps = functools.partial(functools.update_wrapper,
-                          assigned=('__doc__', '__name__'))
-
-########################################
-# Wrapping of math functions:
-
-# Fixed formulas for the derivatives of some functions from the math
-# module (some functions might not be present in all version of
-# Python).  Singular points are not taken into account.  The user
-# should never give "large" uncertainties: problems could only appear
-# if this assumption does not hold.
-
-# Functions not mentioned in _fixed_derivatives have their derivatives
-# calculated numerically.
-
-# Functions that have singularities (possibly at infinity) benefit
-# from analytical calculations (instead of the default numerical
-# calculation) because their derivatives generally change very fast.
-# Even slowly varying functions (e.g., abs()) yield more precise
-# results when differentiated analytically, because of the loss of
-# precision in numerical calculations.
-
-#def log_1arg_der(x):
-#    """
-#    Derivative of log(x) (1-argument form).
-#    """
-#    return 1/x
-
-
-def log_der0(*args):
-    """
-    Derivative of math.log() with respect to its first argument.
-
-    Works whether 1 or 2 arguments are given.
-    """
-    if len(args) == 1:
-        return 1/args[0]
-    else:
-        return 1/args[0]/math.log(args[1])  # 2-argument form
-
-    # The following version goes about as fast:
-
-    ## A 'try' is used for the most common case because it is fast when no
-    ## exception is raised:
-    #try:
-    #    return log_1arg_der(*args)  # Argument number check
-    #except TypeError:
-    #    return 1/args[0]/math.log(args[1])  # 2-argument form
-
-_erf_coef = 2/math.sqrt(math.pi)  # Optimization for erf()
-
-fixed_derivatives = {
-    # In alphabetical order, here:
-    'acos': [lambda x: -1/math.sqrt(1-x**2)],
-    'acosh': [lambda x: 1/math.sqrt(x**2-1)],
-    'asin': [lambda x: 1/math.sqrt(1-x**2)],
-    'asinh': [lambda x: 1/math.sqrt(1+x**2)],
-    'atan': [lambda x: 1/(1+x**2)],
-    'atan2': [lambda y, x: x/(x**2+y**2),  # Correct for x == 0
-              lambda y, x: -y/(x**2+y**2)],  # Correct for x == 0
-    'atanh': [lambda x: 1/(1-x**2)],
-    'ceil': [lambda x: 0],
-    'copysign': [lambda x, y: (1 if x >= 0 else -1) * math.copysign(1, y),
-                 lambda x, y: 0],
-    'cos': [lambda x: -math.sin(x)],
-    'cosh': [math.sinh],
-    'degrees': [lambda x: math.degrees(1)],
-    'erf': [lambda x: math.exp(-x**2)*_erf_coef],
-    'erfc': [lambda x: -math.exp(-x**2)*_erf_coef],
-    'exp': [math.exp],
-    'expm1': [math.exp],
-    'fabs': [lambda x: 1 if x >= 0 else -1],
-    'floor': [lambda x: 0],
-    'hypot': [lambda x, y: x/math.hypot(x, y),
-              lambda x, y: y/math.hypot(x, y)],
-    'log': [log_der0,
-            lambda x, y: -math.log(x, y)/y/math.log(y)],
-    'log10': [lambda x: 1/x/math.log(10)],
-    'log1p': [lambda x: 1/(1+x)],
-    'pow': [lambda x, y: y*math.pow(x, y-1),
-            lambda x, y: math.log(x) * math.pow(x, y)],
-    'radians': [lambda x: math.radians(1)],
-    'sin': [math.cos],
-    'sinh': [math.cosh],
-    'sqrt': [lambda x: 0.5/math.sqrt(x)],
-    'tan': [lambda x: 1+math.tan(x)**2],
-    'tanh': [lambda x: 1-math.tanh(x)**2]
-    }
-
-# Many built-in functions in the math module are wrapped with a
-# version which is uncertainty aware:
-
-this_module = sys.modules[__name__]
-
-# for (name, attr) in vars(math).items():
-for name in dir(math):
-
-    if name in fixed_derivatives:  # Priority to functions in fixed_derivatives
-        derivatives = fixed_derivatives[name]
-    elif name in num_deriv_funcs:
-        # Functions whose derivatives are calculated numerically by
-        # this module fall here (isinf, fmod,...):
-        derivatives = None  # Means: numerical calculation required
-    else:
-        continue  # 'name' not wrapped by this module (__doc__, e, etc.)
-
-    func = getattr(math, name)
-
-    setattr(this_module, name,
-            wraps(wrap(func, derivatives), func))
-
-    many_scalars_to_scalar_funcs.append(name)
-
-###############################################################################
-
-########################################
-# Special cases: some of the functions from no_std_wrapping:
-
-##########
-# The math.factorial function is not converted to an uncertainty-aware
-# function, because it does not handle non-integer arguments: it does
-# not make sense to give it an argument with a numerical error
-# (whereas this would be relevant for the gamma function).
-
-##########
-
-# fsum takes a single argument, which cannot be differentiated.
-# However, each of the arguments inside this single list can
-# be a variable.  We handle this in a specific way:
-
-if sys.version_info[:2] >= (2, 6):
-
-    # For drop-in compatibility with the math module:
-    factorial = math.factorial
-    non_std_wrapped_funcs.append('factorial')
-
-
-    # We wrap math.fsum
-    original_func = math.fsum  # For optimization purposes
-
-    # The function below exists so that temporary variables do not
-    # pollute the module namespace:
-    def wrapped_fsum():
-        """
-        Returns an uncertainty-aware version of math.fsum, which must
-        be contained in _original_func.
-        """
-
-        # The fsum function is flattened, in order to use the
-        # wrap() wrapper:
-
-        flat_fsum = lambda *args: original_func(args)
-
-        flat_fsum_wrap = wrap(
-            flat_fsum, itertools.repeat(lambda *args: 1))
-
-        return wraps(lambda arg_list: flat_fsum_wrap(*arg_list),
-                     original_func)
-
-    fsum = wrapped_fsum()
-    non_std_wrapped_funcs.append('fsum')
-
-
- at set_doc(math.modf.__doc__)
-def modf(x):
-    """
-    Version of modf that works for numbers with uncertainty, and also
-    for regular numbers.
-    """
-
-    # The code below is inspired by wrap().  It is
-    # simpler because only 1 argument is given, and there is no
-    # delegation to other functions involved (as for __mul__, etc.).
-
-    aff_func = to_affine_scalar(x)
-
-    (frac_part, int_part) = math.modf(aff_func.nominal_value)
-
-    if aff_func.derivatives:
-        # The derivative of the fractional part is simply 1: the
-        # derivatives of modf(x)[0] are the derivatives of x:
-        return (AffineScalarFunc(frac_part, aff_func.derivatives), int_part)
-    else:
-        # This function was not called with an AffineScalarFunc
-        # argument: there is no need to return numbers with uncertainties:
-        return (frac_part, int_part)
-
-many_scalars_to_scalar_funcs.append('modf')
-
-
- at set_doc(math.ldexp.__doc__)
-def ldexp(x, y):
-    # The code below is inspired by wrap().  It is
-    # simpler because only 1 argument is given, and there is no
-    # delegation to other functions involved (as for __mul__, etc.).
-
-    # Another approach would be to add an additional argument to
-    # wrap() so that some arguments are automatically
-    # considered as constants.
-
-    aff_func = to_affine_scalar(x)  # y must be an integer, for math.ldexp
-
-    if aff_func.derivatives:
-        factor = 2**y
-        return AffineScalarFunc(
-            math.ldexp(aff_func.nominal_value, y),
-            # Chain rule:
-            dict((var, factor*deriv)
-                 for (var, deriv) in aff_func.derivatives.iteritems()))
-    else:
-        # This function was not called with an AffineScalarFunc
-        # argument: there is no need to return numbers with uncertainties:
-
-        # aff_func.nominal_value is not passed instead of x, because
-        # we do not have to care about the type of the return value of
-        # math.ldexp, this way (aff_func.nominal_value might be the
-        # value of x coerced to a difference type [int->float, for
-        # instance]):
-        return math.ldexp(x, y)
-many_scalars_to_scalar_funcs.append('ldexp')
-
-
- at set_doc(math.frexp.__doc__)
-def frexp(x):
-    """
-    Version of frexp that works for numbers with uncertainty, and also
-    for regular numbers.
-    """
-
-    # The code below is inspired by wrap().  It is
-    # simpler because only 1 argument is given, and there is no
-    # delegation to other functions involved (as for __mul__, etc.).
-
-    aff_func = to_affine_scalar(x)
-
-    if aff_func.derivatives:
-        result = math.frexp(aff_func.nominal_value)
-        # With frexp(x) = (m, e), dm/dx = 1/(2**e):
-        factor = 1/(2**result[1])
-        return (
-            AffineScalarFunc(
-                result[0],
-                # Chain rule:
-                dict((var, factor*deriv)
-                     for (var, deriv) in aff_func.derivatives.iteritems())),
-            # The exponent is an integer and is supposed to be
-            # continuous (small errors):
-            result[1])
-    else:
-        # This function was not called with an AffineScalarFunc
-        # argument: there is no need to return numbers with uncertainties:
-        return math.frexp(x)
-non_std_wrapped_funcs.append('frexp')
-
-###############################################################################
-# Exported functions:
-
-__all__ = many_scalars_to_scalar_funcs + non_std_wrapped_funcs
+'''
+Mathematical operations that generalize many operations from the
+standard math module so that they also work on numbers with
+uncertainties.
+
+Examples:
+
+  from umath import sin
+
+  # Manipulation of numbers with uncertainties:
+  x = uncertainties.ufloat((3, 0.1))
+  print sin(x)  # prints 0.141120008...+/-0.098999...
+
+  # The umath functions also work on regular Python floats:
+  print sin(3)  # prints 0.141120008...  This is a Python float.
+
+Importing all the functions from this module into the global namespace
+is possible.  This is encouraged when using a Python shell as a
+calculator.  Example:
+
+  import uncertainties
+  from uncertainties.umath import *  # Imports tan(), etc.
+
+  x = uncertainties.ufloat((3, 0.1))
+  print tan(x)  # tan() is the uncertainties.umath.tan function
+
+The numbers with uncertainties handled by this module are objects from
+the uncertainties module, from either the Variable or the
+AffineScalarFunc class.
+
+(c) 2009-2013 by Eric O. LEBIGOT (EOL) <eric.lebigot at normalesup.org>.
+Please send feature requests, bug reports, or feedback to this address.
+
+This software is released under a dual license.  (1) The BSD license.
+(2) Any other license, as long as it is obtained from the original
+author.'''
+
+from __future__ import division  # Many analytical derivatives depend on this
+
+# Standard modules
+import math
+import sys
+import itertools
+import functools
+
+# Local modules
+from __init__ import wrap, set_doc, __author__, to_affine_scalar, AffineScalarFunc
+
+###############################################################################
+
+# We wrap the functions from the math module so that they keep track of
+# uncertainties by returning a AffineScalarFunc object.
+
+# Some functions from the math module cannot be adapted in a standard
+# way so to work with AffineScalarFunc objects (either as their result
+# or as their arguments):
+
+# (1) Some functions return a result of a type whose value and
+# variations (uncertainties) cannot be represented by AffineScalarFunc
+# (e.g., math.frexp, which returns a tuple).  The exception raised
+# when not wrapping them with wrap() is more obvious than the
+# one obtained when wrapping them (in fact, the wrapped functions
+# attempts operations that are not supported, such as calculation a
+# subtraction on a result of type tuple).
+
+# (2) Some functions don't take continuous scalar arguments (which can
+# be varied during differentiation): math.fsum, math.factorial...
+# Such functions can either be:
+
+# - wrapped in a special way.
+
+# - excluded from standard wrapping by adding their name to
+# no_std_wrapping
+
+# Math functions that have a standard interface: they take
+# one or more float arguments, and return a scalar:
+many_scalars_to_scalar_funcs = []
+
+# Some functions require a specific treatment and must therefore be
+# excluded from standard wrapping.  Functions
+# no_std_wrapping = ['modf', 'frexp', 'ldexp', 'fsum', 'factorial']
+
+# Functions with numerical derivatives:
+num_deriv_funcs = ['fmod', 'gamma', 'isinf', 'isnan',
+                   'lgamma', 'trunc']
+
+# Functions that do not belong in many_scalars_to_scalar_funcs, but
+# that have a version that handles uncertainties:
+non_std_wrapped_funcs = []
+
+# Function that copies the relevant attributes from generalized
+# functions from the math module:
+wraps = functools.partial(functools.update_wrapper,
+                          assigned=('__doc__', '__name__'))
+
+########################################
+# Wrapping of math functions:
+
+# Fixed formulas for the derivatives of some functions from the math
+# module (some functions might not be present in all version of
+# Python).  Singular points are not taken into account.  The user
+# should never give "large" uncertainties: problems could only appear
+# if this assumption does not hold.
+
+# Functions not mentioned in _fixed_derivatives have their derivatives
+# calculated numerically.
+
+# Functions that have singularities (possibly at infinity) benefit
+# from analytical calculations (instead of the default numerical
+# calculation) because their derivatives generally change very fast.
+# Even slowly varying functions (e.g., abs()) yield more precise
+# results when differentiated analytically, because of the loss of
+# precision in numerical calculations.
+
+#def log_1arg_der(x):
+#    """
+#    Derivative of log(x) (1-argument form).
+#    """
+#    return 1/x
+
+
+def log_der0(*args):
+    """
+    Derivative of math.log() with respect to its first argument.
+
+    Works whether 1 or 2 arguments are given.
+    """
+    if len(args) == 1:
+        return 1/args[0]
+    else:
+        return 1/args[0]/math.log(args[1])  # 2-argument form
+
+    # The following version goes about as fast:
+
+    ## A 'try' is used for the most common case because it is fast when no
+    ## exception is raised:
+    #try:
+    #    return log_1arg_der(*args)  # Argument number check
+    #except TypeError:
+    #    return 1/args[0]/math.log(args[1])  # 2-argument form
+
+_erf_coef = 2/math.sqrt(math.pi)  # Optimization for erf()
+
+fixed_derivatives = {
+    # In alphabetical order, here:
+    'acos': [lambda x: -1/math.sqrt(1-x**2)],
+    'acosh': [lambda x: 1/math.sqrt(x**2-1)],
+    'asin': [lambda x: 1/math.sqrt(1-x**2)],
+    'asinh': [lambda x: 1/math.sqrt(1+x**2)],
+    'atan': [lambda x: 1/(1+x**2)],
+    'atan2': [lambda y, x: x/(x**2+y**2),  # Correct for x == 0
+              lambda y, x: -y/(x**2+y**2)],  # Correct for x == 0
+    'atanh': [lambda x: 1/(1-x**2)],
+    'ceil': [lambda x: 0],
+    'copysign': [lambda x, y: (1 if x >= 0 else -1) * math.copysign(1, y),
+                 lambda x, y: 0],
+    'cos': [lambda x: -math.sin(x)],
+    'cosh': [math.sinh],
+    'degrees': [lambda x: math.degrees(1)],
+    'erf': [lambda x: math.exp(-x**2)*_erf_coef],
+    'erfc': [lambda x: -math.exp(-x**2)*_erf_coef],
+    'exp': [math.exp],
+    'expm1': [math.exp],
+    'fabs': [lambda x: 1 if x >= 0 else -1],
+    'floor': [lambda x: 0],
+    'hypot': [lambda x, y: x/math.hypot(x, y),
+              lambda x, y: y/math.hypot(x, y)],
+    'log': [log_der0,
+            lambda x, y: -math.log(x, y)/y/math.log(y)],
+    'log10': [lambda x: 1/x/math.log(10)],
+    'log1p': [lambda x: 1/(1+x)],
+    'pow': [lambda x, y: y*math.pow(x, y-1),
+            lambda x, y: math.log(x) * math.pow(x, y)],
+    'radians': [lambda x: math.radians(1)],
+    'sin': [math.cos],
+    'sinh': [math.cosh],
+    'sqrt': [lambda x: 0.5/math.sqrt(x)],
+    'tan': [lambda x: 1+math.tan(x)**2],
+    'tanh': [lambda x: 1-math.tanh(x)**2]
+    }
+
+# Many built-in functions in the math module are wrapped with a
+# version which is uncertainty aware:
+
+this_module = sys.modules[__name__]
+
+# for (name, attr) in vars(math).items():
+for name in dir(math):
+
+    if name in fixed_derivatives:  # Priority to functions in fixed_derivatives
+        derivatives = fixed_derivatives[name]
+    elif name in num_deriv_funcs:
+        # Functions whose derivatives are calculated numerically by
+        # this module fall here (isinf, fmod,...):
+        derivatives = None  # Means: numerical calculation required
+    else:
+        continue  # 'name' not wrapped by this module (__doc__, e, etc.)
+
+    func = getattr(math, name)
+
+    setattr(this_module, name,
+            wraps(wrap(func, derivatives), func))
+
+    many_scalars_to_scalar_funcs.append(name)
+
+###############################################################################
+
+########################################
+# Special cases: some of the functions from no_std_wrapping:
+
+##########
+# The math.factorial function is not converted to an uncertainty-aware
+# function, because it does not handle non-integer arguments: it does
+# not make sense to give it an argument with a numerical error
+# (whereas this would be relevant for the gamma function).
+
+##########
+
+# fsum takes a single argument, which cannot be differentiated.
+# However, each of the arguments inside this single list can
+# be a variable.  We handle this in a specific way:
+
+if sys.version_info[:2] >= (2, 6):
+
+    # For drop-in compatibility with the math module:
+    factorial = math.factorial
+    non_std_wrapped_funcs.append('factorial')
+
+
+    # We wrap math.fsum
+    original_func = math.fsum  # For optimization purposes
+
+    # The function below exists so that temporary variables do not
+    # pollute the module namespace:
+    def wrapped_fsum():
+        """
+        Returns an uncertainty-aware version of math.fsum, which must
+        be contained in _original_func.
+        """
+
+        # The fsum function is flattened, in order to use the
+        # wrap() wrapper:
+
+        flat_fsum = lambda *args: original_func(args)
+
+        flat_fsum_wrap = wrap(
+            flat_fsum, itertools.repeat(lambda *args: 1))
+
+        return wraps(lambda arg_list: flat_fsum_wrap(*arg_list),
+                     original_func)
+
+    fsum = wrapped_fsum()
+    non_std_wrapped_funcs.append('fsum')
+
+
+ at set_doc(math.modf.__doc__)
+def modf(x):
+    """
+    Version of modf that works for numbers with uncertainty, and also
+    for regular numbers.
+    """
+
+    # The code below is inspired by wrap().  It is
+    # simpler because only 1 argument is given, and there is no
+    # delegation to other functions involved (as for __mul__, etc.).
+
+    aff_func = to_affine_scalar(x)
+
+    (frac_part, int_part) = math.modf(aff_func.nominal_value)
+
+    if aff_func.derivatives:
+        # The derivative of the fractional part is simply 1: the
+        # derivatives of modf(x)[0] are the derivatives of x:
+        return (AffineScalarFunc(frac_part, aff_func.derivatives), int_part)
+    else:
+        # This function was not called with an AffineScalarFunc
+        # argument: there is no need to return numbers with uncertainties:
+        return (frac_part, int_part)
+
+many_scalars_to_scalar_funcs.append('modf')
+
+
+ at set_doc(math.ldexp.__doc__)
+def ldexp(x, y):
+    # The code below is inspired by wrap().  It is
+    # simpler because only 1 argument is given, and there is no
+    # delegation to other functions involved (as for __mul__, etc.).
+
+    # Another approach would be to add an additional argument to
+    # wrap() so that some arguments are automatically
+    # considered as constants.
+
+    aff_func = to_affine_scalar(x)  # y must be an integer, for math.ldexp
+
+    if aff_func.derivatives:
+        factor = 2**y
+        return AffineScalarFunc(
+            math.ldexp(aff_func.nominal_value, y),
+            # Chain rule:
+            dict((var, factor*deriv)
+                 for (var, deriv) in aff_func.derivatives.iteritems()))
+    else:
+        # This function was not called with an AffineScalarFunc
+        # argument: there is no need to return numbers with uncertainties:
+
+        # aff_func.nominal_value is not passed instead of x, because
+        # we do not have to care about the type of the return value of
+        # math.ldexp, this way (aff_func.nominal_value might be the
+        # value of x coerced to a difference type [int->float, for
+        # instance]):
+        return math.ldexp(x, y)
+many_scalars_to_scalar_funcs.append('ldexp')
+
+
+ at set_doc(math.frexp.__doc__)
+def frexp(x):
+    """
+    Version of frexp that works for numbers with uncertainty, and also
+    for regular numbers.
+    """
+
+    # The code below is inspired by wrap().  It is
+    # simpler because only 1 argument is given, and there is no
+    # delegation to other functions involved (as for __mul__, etc.).
+
+    aff_func = to_affine_scalar(x)
+
+    if aff_func.derivatives:
+        result = math.frexp(aff_func.nominal_value)
+        # With frexp(x) = (m, e), dm/dx = 1/(2**e):
+        factor = 1/(2**result[1])
+        return (
+            AffineScalarFunc(
+                result[0],
+                # Chain rule:
+                dict((var, factor*deriv)
+                     for (var, deriv) in aff_func.derivatives.iteritems())),
+            # The exponent is an integer and is supposed to be
+            # continuous (small errors):
+            result[1])
+    else:
+        # This function was not called with an AffineScalarFunc
+        # argument: there is no need to return numbers with uncertainties:
+        return math.frexp(x)
+non_std_wrapped_funcs.append('frexp')
+
+###############################################################################
+# Exported functions:
+
+__all__ = many_scalars_to_scalar_funcs + non_std_wrapped_funcs
diff --git a/publish_docs.sh b/publish_docs.sh
index e3badfe..5ffd2ba 100644
--- a/publish_docs.sh
+++ b/publish_docs.sh
@@ -1,59 +1,59 @@
-installdir='/www/apache/htdocs/software/python/lmfit'
-docbuild='doc/_build'
-
-cd doc 
-echo '# Making docs'
-make all
-cd ../
-
-echo '# Building tarball of docs'
-mkdir _tmpdoc
-cp -pr doc/lmfit.pdf     _tmpdoc/lmfit.pdf
-cp -pr doc/_build/html/*    _tmpdoc/.
-cd _tmpdoc
-tar czf ../../lmfit_docs.tar.gz .
-cd ..
-rm -rf _tmpdoc 
-
-# 
-echo "# Switching to gh-pages branch"
-git checkout gh-pages
-
-if  [ $? -ne 0 ]  ; then 
-  echo ' failed.'
-  exit 
-fi
-
-tar xzf ../lmfit_docs.tar.gz .
-
-echo "# commit changes to gh-pages branch"
-git commit -am "changed docs"
-
-if  [ $? -ne 0 ]  ; then 
-  echo ' failed.'
-  exit 
-fi
-
-echo "# Pushing docs to github"
-git push
-
-
-echo "# switch back to master branch"
-git checkout master
-
-if  [ $? -ne 0 ]  ; then 
-  echo ' failed.'
-  exit 
-fi
-
-# install locally
-echo "# Installing docs to CARS web pages"
-cp ../lmfit_docs.tar.gz $installdir/..
-
-cd $installdir
-if  [ $? -ne 0 ]  ; then 
-  echo ' failed.'
-  exit 
-fi
-
-tar xvzf ../lmfit_docs.tar.gz
+installdir='/www/apache/htdocs/software/python/lmfit'
+docbuild='doc/_build'
+
+cd doc 
+echo '# Making docs'
+make all
+cd ../
+
+echo '# Building tarball of docs'
+mkdir _tmpdoc
+cp -pr doc/lmfit.pdf     _tmpdoc/lmfit.pdf
+cp -pr doc/_build/html/*    _tmpdoc/.
+cd _tmpdoc
+tar czf ../../lmfit_docs.tar.gz .
+cd ..
+rm -rf _tmpdoc 
+
+# 
+echo "# Switching to gh-pages branch"
+git checkout gh-pages
+
+if  [ $? -ne 0 ]  ; then 
+  echo ' failed.'
+  exit 
+fi
+
+tar xzf ../lmfit_docs.tar.gz .
+
+echo "# commit changes to gh-pages branch"
+git commit -am "changed docs"
+
+if  [ $? -ne 0 ]  ; then 
+  echo ' failed.'
+  exit 
+fi
+
+echo "# Pushing docs to github"
+git push
+
+
+echo "# switch back to master branch"
+git checkout master
+
+if  [ $? -ne 0 ]  ; then 
+  echo ' failed.'
+  exit 
+fi
+
+# install locally
+echo "# Installing docs to CARS web pages"
+cp ../lmfit_docs.tar.gz $installdir/..
+
+cd $installdir
+if  [ $? -ne 0 ]  ; then 
+  echo ' failed.'
+  exit 
+fi
+
+tar xvzf ../lmfit_docs.tar.gz
diff --git a/requirements.txt b/requirements.txt
index fecf23c..fe73b4f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,2 @@
-numpy>=1.5
-scipy>=0.13
+numpy>=1.5
+scipy>=0.13
diff --git a/setup.py b/setup.py
index faa4469..ac98539 100644
--- a/setup.py
+++ b/setup.py
@@ -1,54 +1,54 @@
-#!/usr/bin/env python
-# from distutils.core import setup
-from setuptools import setup
-
-import versioneer
-versioneer.VCS = 'git'
-versioneer.versionfile_source = 'lmfit/_version.py'
-versioneer.versionfile_build = 'lmfit/_version.py'
-versioneer.tag_prefix = ''
-versioneer.parentdir_prefix = 'lmfit-'
-
-
-long_desc = """A library for least-squares minimization and data fitting in
-Python.  Built on top of scipy.optimize, lmfit provides a Parameter object
-which can be set as fixed or free, can have upper and/or lower bounds, or
-can be written in terms of algebraic constraints of other Parameters.  The
-user writes a function to be minimized as a function of these Parameters,
-and the scipy.optimize methods are used to find the optimal values for the
-Parameters.  The Levenberg-Marquardt (leastsq) is the default minimization
-algorithm, and provides estimated standard errors and correlations between
-varied Parameters.  Other minimization methods, including Nelder-Mead's
-downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
-others are also supported.  Bounds and contraints can be placed on
-Parameters for all of these methods.
-
-In addition, methods for explicitly calculating confidence intervals are
-provided for exploring minmization problems where the approximation of
-estimating Parameter uncertainties from the covariance matrix is
-questionable. """
-
-
-setup(name = 'lmfit',
-      version = versioneer.get_version(),
-      cmdclass = versioneer.get_cmdclass(),
-      author = 'LMFit Development Team',
-      author_email = 'matt.newville at gmail.com',
-      url          = 'http://lmfit.github.io/lmfit-py/',
-      download_url = 'http://lmfit.github.io//lmfit-py/',
-      install_requires = ['numpy', 'scipy'],
-      license = 'BSD',
-      description = "Least-Squares Minimization with Bounds and Constraints",
-      long_description = long_desc,
-      platforms = ['Windows', 'Linux', 'Mac OS X'],
-      classifiers=['Intended Audience :: Science/Research',
-                   'Operating System :: OS Independent',
-                   'Programming Language :: Python',
-                   'Topic :: Scientific/Engineering',
-                   ],
-      # test_suite='nose.collector',
-      # test_requires=['Nose'],
-      package_dir = {'lmfit': 'lmfit'},
-      packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],
-      )
-
+#!/usr/bin/env python
+# from distutils.core import setup
+from setuptools import setup
+
+import versioneer
+versioneer.VCS = 'git'
+versioneer.versionfile_source = 'lmfit/_version.py'
+versioneer.versionfile_build = 'lmfit/_version.py'
+versioneer.tag_prefix = ''
+versioneer.parentdir_prefix = 'lmfit-'
+
+
+long_desc = """A library for least-squares minimization and data fitting in
+Python.  Built on top of scipy.optimize, lmfit provides a Parameter object
+which can be set as fixed or free, can have upper and/or lower bounds, or
+can be written in terms of algebraic constraints of other Parameters.  The
+user writes a function to be minimized as a function of these Parameters,
+and the scipy.optimize methods are used to find the optimal values for the
+Parameters.  The Levenberg-Marquardt (leastsq) is the default minimization
+algorithm, and provides estimated standard errors and correlations between
+varied Parameters.  Other minimization methods, including Nelder-Mead's
+downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
+others are also supported.  Bounds and contraints can be placed on
+Parameters for all of these methods.
+
+In addition, methods for explicitly calculating confidence intervals are
+provided for exploring minmization problems where the approximation of
+estimating Parameter uncertainties from the covariance matrix is
+questionable. """
+
+
+setup(name = 'lmfit',
+      version = versioneer.get_version(),
+      cmdclass = versioneer.get_cmdclass(),
+      author = 'LMFit Development Team',
+      author_email = 'matt.newville at gmail.com',
+      url          = 'http://lmfit.github.io/lmfit-py/',
+      download_url = 'http://lmfit.github.io//lmfit-py/',
+      install_requires = ['numpy', 'scipy'],
+      license = 'BSD',
+      description = "Least-Squares Minimization with Bounds and Constraints",
+      long_description = long_desc,
+      platforms = ['Windows', 'Linux', 'Mac OS X'],
+      classifiers=['Intended Audience :: Science/Research',
+                   'Operating System :: OS Independent',
+                   'Programming Language :: Python',
+                   'Topic :: Scientific/Engineering',
+                   ],
+      # test_suite='nose.collector',
+      # test_requires=['Nose'],
+      package_dir = {'lmfit': 'lmfit'},
+      packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],
+      )
+
diff --git a/tests/NISTModels.py b/tests/NISTModels.py
index 1795653..197856f 100644
--- a/tests/NISTModels.py
+++ b/tests/NISTModels.py
@@ -1,198 +1,198 @@
-import os
-import sys
-from numpy import exp, log, log10, sin, cos, arctan, array
-from lmfit import Parameters
-thisdir, thisfile = os.path.split(__file__)
-NIST_DIR = os.path.join(thisdir, '..', 'NIST_STRD')
-
-def read_params(params):
-    if isinstance(params, Parameters):
-        return [par.value for par in params.values()]
-    else:
-        return params
-
-def Bennet5(b, x, y=0):
-    b = read_params(b)
-    return y - b[0] * (b[1]+x)**(-1/b[2])
-
-def BoxBOD(b, x, y=0):
-    b = read_params(b)
-    return y - b[0]*(1-exp(-b[1]*x))
-
-def Chwirut(b, x, y=0):
-    b = read_params(b)
-    return y - exp(-b[0]*x)/(b[1]+b[2]*x)
-
-def DanWood(b, x, y=0):
-    b = read_params(b)
-    return y - b[0]*x**b[1]
-
-def ENSO(b, x, y=0):
-    b = read_params(b)
-    pi = 3.141592653589793238462643383279
-
-    return y - b[0] + (b[1]*cos( 2*pi*x/12 ) + b[2]*sin( 2*pi*x/12 ) +
-                       b[4]*cos( 2*pi*x/b[3] ) + b[5]*sin( 2*pi*x/b[3] ) +
-                       b[7]*cos( 2*pi*x/b[6] ) + b[8]*sin( 2*pi*x/b[6] ) )
-
-def Eckerle4(b, x, y=0):
-    b = read_params(b)
-    return y - (b[0]/b[1]) * exp(-0.5*((x-b[2])/b[1])**2)
-
-def Gauss(b, x, y=0):
-    b = read_params(b)
-    return y - b[0]*exp( -b[1]*x ) + (b[2]*exp( -(x-b[3])**2 / b[4]**2 ) +
-                                      b[5]*exp( -(x-b[6])**2 / b[7]**2 ) )
-
-def Hahn1(b, x, y=0):
-    b = read_params(b)
-    return y - ((b[0]+b[1]*x+b[2]*x**2+b[3]*x**3) /
-                (1+b[4]*x+b[5]*x**2+b[6]*x**3)  )
-
-def Kirby(b, x, y=0):
-    b = read_params(b)
-    return y - (b[0] + b[1]*x + b[2]*x**2) / (1 + b[3]*x + b[4]*x**2)
-
-def Lanczos(b, x, y=0):
-    b = read_params(b)
-    return y - b[0]*exp(-b[1]*x) + b[2]*exp(-b[3]*x) + b[4]*exp(-b[5]*x)
-
-def MGH09(b, x, y=0):
-    b = read_params(b)
-    return y - b[0]*(x**2+x*b[1]) / (x**2+x*b[2]+b[3])
-
-def MGH10(b, x, y=0):
-    b = read_params(b)
-    return y - b[0] * exp( b[1]/(x+b[2]) )
-
-def MGH17(b, x, y=0):
-    b = read_params(b)
-    return y - b[0] + b[1]*exp(-x*b[3]) + b[2]*exp(-x*b[4])
-
-def Misra1a(b, x, y=0):
-    b = read_params(b)
-    return y - b[0]*(1-exp(-b[1]*x))
-
-def Misra1b(b, x, y=0):
-    b = read_params(b)
-    return y - b[0] * (1-(1+b[1]*x/2)**(-2))
-
-def Misra1c(b, x, y=0):
-    b = read_params(b)
-    return y - b[0] * (1-(1+2*b[1]*x)**(-.5))
-
-def Misra1d(b, x, y=0):
-    b = read_params(b)
-    return y - b[0]*b[1]*x*((1+b[1]*x)**(-1))
-
-def Nelson(b, x, y=None):
-    b = read_params(b)
-    x1 = x[:,0]
-    x2 = x[:,1]
-    if y is None:
-        return  - exp(b[0] - b[1]*x1 * exp(-b[2]*x2))
-    return log(y) - (b[0] - b[1]*x1 * exp(-b[2]*x2) )
-
-def Rat42(b, x, y=0):
-    b = read_params(b)
-    return  y - b[0] / (1+exp(b[1]-b[2]*x))
-
-def Rat43(b, x, y=0):
-    b = read_params(b)
-    return  y - b[0] / ((1+exp(b[1]-b[2]*x))**(1/b[3]))
-
-def Roszman1(b, x, y=0):
-    b = read_params(b)
-    pi = 3.141592653589793238462643383279
-    return y - b[0] - b[1]*x - arctan(b[2]/(x-b[3]))/pi
-
-def Thurber(b, x, y=0):
-    b = read_params(b)
-    return y - ( (b[0] + b[1]*x + b[2]*x**2 + b[3]*x**3) /
-                 (1 + b[4]*x + b[5]*x**2 + b[6]*x**3) )
-
-#  Model name        fcn,    #fitting params, dim of x
-Models = {'Bennett5':  (Bennet5,  3, 1),
-          'BoxBOD':    (BoxBOD,   2, 1),
-          'Chwirut1':  (Chwirut,  3, 1),
-          'Chwirut2':  (Chwirut,  3, 1),
-          'DanWood':   (DanWood,  2, 1),
-          'ENSO':      (ENSO,     9, 1),
-          'Eckerle4':  (Eckerle4, 3, 1),
-          'Gauss1':    (Gauss,    8, 1),
-          'Gauss2':    (Gauss,    8, 1),
-          'Gauss3':    (Gauss,    8, 1),
-          'Hahn1':     (Hahn1,    7, 1),
-          'Kirby2':    (Kirby,    5, 1),
-          'Lanczos1':  (Lanczos,  6, 1),
-          'Lanczos2':  (Lanczos,  6, 1),
-          'Lanczos3':  (Lanczos,  6, 1),
-          'MGH09':     (MGH09,    4, 1),
-          'MGH10':     (MGH10,    3, 1),
-          'MGH17':     (MGH17,    5, 1),
-          'Misra1a':   (Misra1a,  2, 1),
-          'Misra1b' :  (Misra1b,  2, 1),
-          'Misra1c' :  (Misra1c,  2, 1),
-          'Misra1d' :  (Misra1d,  2, 1),
-          'Nelson':    (Nelson,   3, 2),
-          'Rat42':     (Rat42,    3, 1),
-          'Rat43':     (Rat43,    4, 1),
-          'Roszman1':  (Roszman1, 4, 1),
-          'Thurber':   (Thurber,  7, 1) }
-
-def ReadNistData(dataset):
-    """NIST STRD data is in a simple, fixed format with
-    line numbers being significant!
-    """
-    finp = open(os.path.join(NIST_DIR, "%s.dat" % dataset), 'r')
-    lines = [l[:-1] for l in finp.readlines()]
-    finp.close()
-    ModelLines = lines[30:39]
-    ParamLines = lines[40:58]
-    DataLines = lines[60:]
-
-    words = ModelLines[1].strip().split()
-    nparams = int(words[0])
-
-    start1 = [0]*nparams
-    start2 = [0]*nparams
-    certval = [0]*nparams
-    certerr = [0]*nparams
-    for i, text in enumerate(ParamLines[:nparams]):
-        [s1, s2, val, err] = [float(x) for x in text.split('=')[1].split()]
-        start1[i] = s1
-        start2[i] = s2
-        certval[i] = val
-        certerr[i] = err
-
-    #
-    for t in ParamLines[nparams:]:
-        t =  t.strip()
-        if ':' not in t:
-            continue
-        val = float(t.split(':')[1])
-        if t.startswith('Residual Sum of Squares'):
-            sum_squares = val
-        elif t.startswith('Residual Standard Deviation'):
-            std_dev = val
-        elif t.startswith('Degrees of Freedom'):
-            nfree = int(val)
-        elif t.startswith('Number of Observations'):
-            ndata = int(val)
-
-    y, x = [], []
-    for d in DataLines:
-        vals = [float(i) for i in d.strip().split()]
-        y.append(vals[0])
-        if len(vals) > 2:
-            x.append(vals[1:])
-        else:
-            x.append(vals[1])
-
-    y = array(y)
-    x = array(x)
-    out = {'y': y, 'x': x, 'nparams': nparams, 'ndata': ndata,
-           'nfree': nfree, 'start1': start1, 'start2': start2,
-           'sum_squares': sum_squares, 'std_dev': std_dev,
-           'cert': certval,  'cert_values': certval,  'cert_stderr': certerr }
-    return out
+import os
+import sys
+from numpy import exp, log, log10, sin, cos, arctan, array
+from lmfit import Parameters
+thisdir, thisfile = os.path.split(__file__)
+NIST_DIR = os.path.join(thisdir, '..', 'NIST_STRD')
+
+def read_params(params):
+    if isinstance(params, Parameters):
+        return [par.value for par in params.values()]
+    else:
+        return params
+
+def Bennet5(b, x, y=0):
+    b = read_params(b)
+    return y - b[0] * (b[1]+x)**(-1/b[2])
+
+def BoxBOD(b, x, y=0):
+    b = read_params(b)
+    return y - b[0]*(1-exp(-b[1]*x))
+
+def Chwirut(b, x, y=0):
+    b = read_params(b)
+    return y - exp(-b[0]*x)/(b[1]+b[2]*x)
+
+def DanWood(b, x, y=0):
+    b = read_params(b)
+    return y - b[0]*x**b[1]
+
+def ENSO(b, x, y=0):
+    b = read_params(b)
+    pi = 3.141592653589793238462643383279
+
+    return y - b[0] + (b[1]*cos( 2*pi*x/12 ) + b[2]*sin( 2*pi*x/12 ) +
+                       b[4]*cos( 2*pi*x/b[3] ) + b[5]*sin( 2*pi*x/b[3] ) +
+                       b[7]*cos( 2*pi*x/b[6] ) + b[8]*sin( 2*pi*x/b[6] ) )
+
+def Eckerle4(b, x, y=0):
+    b = read_params(b)
+    return y - (b[0]/b[1]) * exp(-0.5*((x-b[2])/b[1])**2)
+
+def Gauss(b, x, y=0):
+    b = read_params(b)
+    return y - b[0]*exp( -b[1]*x ) + (b[2]*exp( -(x-b[3])**2 / b[4]**2 ) +
+                                      b[5]*exp( -(x-b[6])**2 / b[7]**2 ) )
+
+def Hahn1(b, x, y=0):
+    b = read_params(b)
+    return y - ((b[0]+b[1]*x+b[2]*x**2+b[3]*x**3) /
+                (1+b[4]*x+b[5]*x**2+b[6]*x**3)  )
+
+def Kirby(b, x, y=0):
+    b = read_params(b)
+    return y - (b[0] + b[1]*x + b[2]*x**2) / (1 + b[3]*x + b[4]*x**2)
+
+def Lanczos(b, x, y=0):
+    b = read_params(b)
+    return y - b[0]*exp(-b[1]*x) + b[2]*exp(-b[3]*x) + b[4]*exp(-b[5]*x)
+
+def MGH09(b, x, y=0):
+    b = read_params(b)
+    return y - b[0]*(x**2+x*b[1]) / (x**2+x*b[2]+b[3])
+
+def MGH10(b, x, y=0):
+    b = read_params(b)
+    return y - b[0] * exp( b[1]/(x+b[2]) )
+
+def MGH17(b, x, y=0):
+    b = read_params(b)
+    return y - b[0] + b[1]*exp(-x*b[3]) + b[2]*exp(-x*b[4])
+
+def Misra1a(b, x, y=0):
+    b = read_params(b)
+    return y - b[0]*(1-exp(-b[1]*x))
+
+def Misra1b(b, x, y=0):
+    b = read_params(b)
+    return y - b[0] * (1-(1+b[1]*x/2)**(-2))
+
+def Misra1c(b, x, y=0):
+    b = read_params(b)
+    return y - b[0] * (1-(1+2*b[1]*x)**(-.5))
+
+def Misra1d(b, x, y=0):
+    b = read_params(b)
+    return y - b[0]*b[1]*x*((1+b[1]*x)**(-1))
+
+def Nelson(b, x, y=None):
+    b = read_params(b)
+    x1 = x[:,0]
+    x2 = x[:,1]
+    if y is None:
+        return  - exp(b[0] - b[1]*x1 * exp(-b[2]*x2))
+    return log(y) - (b[0] - b[1]*x1 * exp(-b[2]*x2) )
+
+def Rat42(b, x, y=0):
+    b = read_params(b)
+    return  y - b[0] / (1+exp(b[1]-b[2]*x))
+
+def Rat43(b, x, y=0):
+    b = read_params(b)
+    return  y - b[0] / ((1+exp(b[1]-b[2]*x))**(1/b[3]))
+
+def Roszman1(b, x, y=0):
+    b = read_params(b)
+    pi = 3.141592653589793238462643383279
+    return y - b[0] - b[1]*x - arctan(b[2]/(x-b[3]))/pi
+
+def Thurber(b, x, y=0):
+    b = read_params(b)
+    return y - ( (b[0] + b[1]*x + b[2]*x**2 + b[3]*x**3) /
+                 (1 + b[4]*x + b[5]*x**2 + b[6]*x**3) )
+
+#  Model name        fcn,    #fitting params, dim of x
+Models = {'Bennett5':  (Bennet5,  3, 1),
+          'BoxBOD':    (BoxBOD,   2, 1),
+          'Chwirut1':  (Chwirut,  3, 1),
+          'Chwirut2':  (Chwirut,  3, 1),
+          'DanWood':   (DanWood,  2, 1),
+          'ENSO':      (ENSO,     9, 1),
+          'Eckerle4':  (Eckerle4, 3, 1),
+          'Gauss1':    (Gauss,    8, 1),
+          'Gauss2':    (Gauss,    8, 1),
+          'Gauss3':    (Gauss,    8, 1),
+          'Hahn1':     (Hahn1,    7, 1),
+          'Kirby2':    (Kirby,    5, 1),
+          'Lanczos1':  (Lanczos,  6, 1),
+          'Lanczos2':  (Lanczos,  6, 1),
+          'Lanczos3':  (Lanczos,  6, 1),
+          'MGH09':     (MGH09,    4, 1),
+          'MGH10':     (MGH10,    3, 1),
+          'MGH17':     (MGH17,    5, 1),
+          'Misra1a':   (Misra1a,  2, 1),
+          'Misra1b' :  (Misra1b,  2, 1),
+          'Misra1c' :  (Misra1c,  2, 1),
+          'Misra1d' :  (Misra1d,  2, 1),
+          'Nelson':    (Nelson,   3, 2),
+          'Rat42':     (Rat42,    3, 1),
+          'Rat43':     (Rat43,    4, 1),
+          'Roszman1':  (Roszman1, 4, 1),
+          'Thurber':   (Thurber,  7, 1) }
+
+def ReadNistData(dataset):
+    """NIST STRD data is in a simple, fixed format with
+    line numbers being significant!
+    """
+    finp = open(os.path.join(NIST_DIR, "%s.dat" % dataset), 'r')
+    lines = [l[:-1] for l in finp.readlines()]
+    finp.close()
+    ModelLines = lines[30:39]
+    ParamLines = lines[40:58]
+    DataLines = lines[60:]
+
+    words = ModelLines[1].strip().split()
+    nparams = int(words[0])
+
+    start1 = [0]*nparams
+    start2 = [0]*nparams
+    certval = [0]*nparams
+    certerr = [0]*nparams
+    for i, text in enumerate(ParamLines[:nparams]):
+        [s1, s2, val, err] = [float(x) for x in text.split('=')[1].split()]
+        start1[i] = s1
+        start2[i] = s2
+        certval[i] = val
+        certerr[i] = err
+
+    #
+    for t in ParamLines[nparams:]:
+        t =  t.strip()
+        if ':' not in t:
+            continue
+        val = float(t.split(':')[1])
+        if t.startswith('Residual Sum of Squares'):
+            sum_squares = val
+        elif t.startswith('Residual Standard Deviation'):
+            std_dev = val
+        elif t.startswith('Degrees of Freedom'):
+            nfree = int(val)
+        elif t.startswith('Number of Observations'):
+            ndata = int(val)
+
+    y, x = [], []
+    for d in DataLines:
+        vals = [float(i) for i in d.strip().split()]
+        y.append(vals[0])
+        if len(vals) > 2:
+            x.append(vals[1:])
+        else:
+            x.append(vals[1])
+
+    y = array(y)
+    x = array(x)
+    out = {'y': y, 'x': x, 'nparams': nparams, 'ndata': ndata,
+           'nfree': nfree, 'start1': start1, 'start2': start2,
+           'sum_squares': sum_squares, 'std_dev': std_dev,
+           'cert': certval,  'cert_values': certval,  'cert_stderr': certerr }
+    return out
diff --git a/tests/_test_ci.py b/tests/_test_ci.py
index bbb767e..86afcaf 100644
--- a/tests/_test_ci.py
+++ b/tests/_test_ci.py
@@ -1,58 +1,58 @@
-from __future__ import print_function
-from lmfit import minimize, Parameters, conf_interval, report_ci, report_errors
-import numpy as np
-pi = np.pi
-import nose
-
-def test_ci():
-    np.random.seed(1)
-    p_true = Parameters()
-    p_true.add('amp', value=14.0)
-    p_true.add('period', value=5.33)
-    p_true.add('shift', value=0.123)
-    p_true.add('decay', value=0.010)
-
-    def residual(pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
-
-        if abs(shift) > pi / 2:
-            shift = shift - np.sign(shift) * pi
-        model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
-        if data is None:
-            return model
-        return model - data
-
-
-    n = 2500
-    xmin = 0.
-    xmax = 250.0
-    noise = np.random.normal(scale=0.7215, size=n)
-    x = np.linspace(xmin, xmax, n)
-    data = residual(p_true, x) + noise
-
-    fit_params = Parameters()
-    fit_params.add('amp', value=13.0)
-    fit_params.add('period', value=4)
-    fit_params.add('shift', value=0.1)
-    fit_params.add('decay', value=0.02)
-
-    out = minimize(residual, fit_params, args=(x,), kws={'data': data})
-
-    fit = residual(fit_params, x)
-
-    print( ' N fev = ', out.nfev)
-    print( out.chisqr, out.redchi, out.nfree)
-
-    report_errors(fit_params)
-    ci, tr = conf_interval(out, sigmas=[0.674], trace=True)
-    report_ci(ci)
-    for p in out.params:
-        diff1 = ci[p][1][1] - ci[p][0][1]
-        diff2 = ci[p][2][1] - ci[p][1][1]
-        stderr = out.params[p].stderr
-        assert(abs(diff1 - stderr) / stderr < 0.05)
-        assert(abs(diff2 - stderr) / stderr < 0.05)
-
+from __future__ import print_function
+from lmfit import minimize, Parameters, conf_interval, report_ci, report_errors
+import numpy as np
+pi = np.pi
+import nose
+
+def test_ci():
+    np.random.seed(1)
+    p_true = Parameters()
+    p_true.add('amp', value=14.0)
+    p_true.add('period', value=5.33)
+    p_true.add('shift', value=0.123)
+    p_true.add('decay', value=0.010)
+
+    def residual(pars, x, data=None):
+        amp = pars['amp'].value
+        per = pars['period'].value
+        shift = pars['shift'].value
+        decay = pars['decay'].value
+
+        if abs(shift) > pi / 2:
+            shift = shift - np.sign(shift) * pi
+        model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
+        if data is None:
+            return model
+        return model - data
+
+
+    n = 2500
+    xmin = 0.
+    xmax = 250.0
+    noise = np.random.normal(scale=0.7215, size=n)
+    x = np.linspace(xmin, xmax, n)
+    data = residual(p_true, x) + noise
+
+    fit_params = Parameters()
+    fit_params.add('amp', value=13.0)
+    fit_params.add('period', value=4)
+    fit_params.add('shift', value=0.1)
+    fit_params.add('decay', value=0.02)
+
+    out = minimize(residual, fit_params, args=(x,), kws={'data': data})
+
+    fit = residual(fit_params, x)
+
+    print( ' N fev = ', out.nfev)
+    print( out.chisqr, out.redchi, out.nfree)
+
+    report_errors(fit_params)
+    ci, tr = conf_interval(out, sigmas=[0.674], trace=True)
+    report_ci(ci)
+    for p in out.params:
+        diff1 = ci[p][1][1] - ci[p][0][1]
+        diff2 = ci[p][2][1] - ci[p][1][1]
+        stderr = out.params[p].stderr
+        assert(abs(diff1 - stderr) / stderr < 0.05)
+        assert(abs(diff2 - stderr) / stderr < 0.05)
+
diff --git a/tests/_test_make_paras_and_func.py b/tests/_test_make_paras_and_func.py
index ee0a251..7a7b27f 100644
--- a/tests/_test_make_paras_and_func.py
+++ b/tests/_test_make_paras_and_func.py
@@ -1,31 +1,31 @@
-# -*- coding: utf-8 -*-
-
-import lmfit
-
-
-def test_wrap_function():
-    get_names = lambda p: [p_key for p_key in p ]
-
-    def func(A, b, c, d=5, e=10):
-        return A + b + c + d
-
-    x0 = [1, 2, 3]
-    para, f = lmfit.make_paras_and_func(func, x0)
-    assert(get_names(para) == ['A', 'b', 'c'])
-    y1 = f(para)
-    y2 = func(*x0)
-    assert(y1==y2)
-
-    x0 = [1, 2, 3, 4]
-    para, f = lmfit.make_paras_and_func(func, x0)
-    assert(get_names(para) == ['A', 'b', 'c', 'd'])
-    y1 = f(para)
-    y2 = func(*x0)
-    assert(y1==y2)
-
-    x0 = [1, 2, 3]
-    para, f = lmfit.make_paras_and_func(func, x0, {'e': 3})
-    assert(get_names(para) == ['A', 'b', 'c', 'e'])
-    y1 = f(para)
-    y2 = func(*x0)
-    assert(y1==y2)
+# -*- coding: utf-8 -*-
+
+import lmfit
+
+
+def test_wrap_function():
+    get_names = lambda p: [p_key for p_key in p ]
+
+    def func(A, b, c, d=5, e=10):
+        return A + b + c + d
+
+    x0 = [1, 2, 3]
+    para, f = lmfit.make_paras_and_func(func, x0)
+    assert(get_names(para) == ['A', 'b', 'c'])
+    y1 = f(para)
+    y2 = func(*x0)
+    assert(y1==y2)
+
+    x0 = [1, 2, 3, 4]
+    para, f = lmfit.make_paras_and_func(func, x0)
+    assert(get_names(para) == ['A', 'b', 'c', 'd'])
+    y1 = f(para)
+    y2 = func(*x0)
+    assert(y1==y2)
+
+    x0 = [1, 2, 3]
+    para, f = lmfit.make_paras_and_func(func, x0, {'e': 3})
+    assert(get_names(para) == ['A', 'b', 'c', 'e'])
+    y1 = f(para)
+    y2 = func(*x0)
+    assert(y1==y2)
diff --git a/tests/lmfit_testutils.py b/tests/lmfit_testutils.py
index f399f2c..79e89d4 100644
--- a/tests/lmfit_testutils.py
+++ b/tests/lmfit_testutils.py
@@ -1,18 +1,18 @@
-from lmfit import Parameter
-from numpy.testing import assert_allclose
-
-def assert_paramval(param, val, tol=1.e-3):
-    """assert that a named parameter's value is close to expected value"""
-
-    assert(isinstance(param, Parameter))
-    pval = param.value
-
-    assert_allclose([pval], [val], rtol=tol, atol=tol,
-                    err_msg='',verbose=True)
-
-def assert_paramattr(param, attr, val):
-    """assert that a named parameter's value is a value"""
-    assert(isinstance(param, Parameter))
-    assert(hasattr(param, attr))
-    assert(getattr(param, attr) == val)
-
+from lmfit import Parameter
+from numpy.testing import assert_allclose
+
+def assert_paramval(param, val, tol=1.e-3):
+    """assert that a named parameter's value is close to expected value"""
+
+    assert(isinstance(param, Parameter))
+    pval = param.value
+
+    assert_allclose([pval], [val], rtol=tol, atol=tol,
+                    err_msg='',verbose=True)
+
+def assert_paramattr(param, attr, val):
+    """assert that a named parameter's value is a value"""
+    assert(isinstance(param, Parameter))
+    assert(hasattr(param, attr))
+    assert(getattr(param, attr) == val)
+
diff --git a/tests/test_1variable.py b/tests/test_1variable.py
index a832f79..3e3d530 100644
--- a/tests/test_1variable.py
+++ b/tests/test_1variable.py
@@ -1,57 +1,57 @@
-# test of fitting one variable
-# From Nick Schurch
-
-import lmfit, numpy
-from numpy.testing import assert_allclose
-
-def linear_chisq(params, x, data, errs=None):
-
-    ''' Calcs chi-squared residuals linear model (weighted by errors if given)
-    '''
-
-    if type(params) is not lmfit.parameter.Parameters:
-        msg = "Params argument is not a lmfit parameter set"
-        raise TypeError(msg)
-
-    if "m" not in params.keys():
-        msg = "No slope parameter (m) defined in the model"
-        raise KeyError(msg)
-
-    if "c" not in params.keys():
-        msg = "No intercept parameter (c) defined in the model"
-        raise KeyError(msg)
-
-    m = params["m"].value
-    c = params["c"].value
-
-    model = m*x+c
-
-    residuals = (data-model)
-    if errs is not None:
-        residuals = residuals/errs
-
-    return(residuals)
-
-def test_1var():
-    rands = [-0.21698284, 0.41900591, 0.02349374, -0.218552, -0.3513699,
-             0.33418304, 0.04226855, 0.213303, 0.45948731, 0.33587736]
-
-    x = numpy.arange(10)+1
-    y = numpy.arange(10)+1+rands
-    y_errs = numpy.sqrt(y)/2
-
-    params = lmfit.Parameters()
-    params.add(name="m", value=1.0, vary=True)
-    params.add(name="c", value=0.0, vary=False)
-
-    out = lmfit.minimize(linear_chisq, params, args=(x, y))
-
-    lmfit.report_fit(out)
-    assert_allclose(params['m'].value, 1.025, rtol=0.02, atol=0.02)
-    assert(len(params)==2)
-    assert(out.nvarys == 1)
-    assert(out.chisqr > 0.01)
-    assert(out.chisqr < 5.00)
-
-if __name__ == '__main__':
-    test_1var()
+# test of fitting one variable
+# From Nick Schurch
+
+import lmfit, numpy
+from numpy.testing import assert_allclose
+
+def linear_chisq(params, x, data, errs=None):
+
+    ''' Calcs chi-squared residuals linear model (weighted by errors if given)
+    '''
+
+    if type(params) is not lmfit.parameter.Parameters:
+        msg = "Params argument is not a lmfit parameter set"
+        raise TypeError(msg)
+
+    if "m" not in params.keys():
+        msg = "No slope parameter (m) defined in the model"
+        raise KeyError(msg)
+
+    if "c" not in params.keys():
+        msg = "No intercept parameter (c) defined in the model"
+        raise KeyError(msg)
+
+    m = params["m"].value
+    c = params["c"].value
+
+    model = m*x+c
+
+    residuals = (data-model)
+    if errs is not None:
+        residuals = residuals/errs
+
+    return(residuals)
+
+def test_1var():
+    rands = [-0.21698284, 0.41900591, 0.02349374, -0.218552, -0.3513699,
+             0.33418304, 0.04226855, 0.213303, 0.45948731, 0.33587736]
+
+    x = numpy.arange(10)+1
+    y = numpy.arange(10)+1+rands
+    y_errs = numpy.sqrt(y)/2
+
+    params = lmfit.Parameters()
+    params.add(name="m", value=1.0, vary=True)
+    params.add(name="c", value=0.0, vary=False)
+
+    out = lmfit.minimize(linear_chisq, params, args=(x, y))
+
+    lmfit.report_fit(out)
+    assert_allclose(params['m'].value, 1.025, rtol=0.02, atol=0.02)
+    assert(len(params)==2)
+    assert(out.nvarys == 1)
+    assert(out.chisqr > 0.01)
+    assert(out.chisqr < 5.00)
+
+if __name__ == '__main__':
+    test_1var()
diff --git a/tests/test_NIST_Strd.py b/tests/test_NIST_Strd.py
index 0ce77f6..aec9a8f 100644
--- a/tests/test_NIST_Strd.py
+++ b/tests/test_NIST_Strd.py
@@ -1,267 +1,267 @@
-from __future__ import print_function
-import sys
-import math
-from optparse import OptionParser
-
-from lmfit import Parameters, minimize
-
-from NISTModels import Models, ReadNistData
-
-HASPYLAB = False
-for arg in sys.argv:
-    if 'nose' in arg:
-        HASPYLAB = False
-
-if HASPYLAB:
-    try:
-        import matplotlib
-        import pylab
-        HASPYLAB = True
-    except ImportError:
-        HASPYLAB = False
-
-def ndig(a, b):
-    "precision for NIST values"
-    return round(-math.log10((abs(abs(a)-abs(b)) +1.e-15)/ abs(b)))
-
-ABAR = ' |----------------+----------------+------------------+-------------------|'
-def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
-    buff = [' ======================================',
- ' %s: ' % DataSet,
- ' | Parameter Name |  Value Found   |  Certified Value | # Matching Digits |']
-    buff.append(ABAR)
-
-    val_dig_min = 200
-    err_dig_min = 200
-    fmt = ' | %s | % -.7e | % -.7e   | %2i                |'
-    for i in range(NISTdata['nparams']):
-        parname = 'b%i' % (i+1)
-        par = params[parname]
-        thisval = par.value
-        certval = NISTdata['cert_values'][i]
-        vdig    = ndig(thisval, certval)
-        pname   = (parname + ' value ' + ' '*14)[:14]
-        buff.append(fmt % (pname, thisval, certval, vdig))
-        val_dig_min = min(val_dig_min, vdig)
-
-        thiserr = par.stderr
-        certerr = NISTdata['cert_stderr'][i]
-        if thiserr is not None and myfit.errorbars:
-            edig   = ndig(thiserr, certerr)
-            ename = (parname + ' stderr' + ' '*14)[:14]
-            buff.append(fmt % (ename, thiserr, certerr, edig))
-            err_dig_min = min(err_dig_min, edig)
-
-    buff.append(ABAR)
-    sumsq = NISTdata['sum_squares']
-    try:
-        chi2 = myfit.chisqr
-        buff.append(' | Sum of Squares | %.7e  | %.7e    |  %2i               |'
-                    % (chi2, sumsq, ndig(chi2, sumsq)))
-    except:
-        pass
-    buff.append(ABAR)
-    if not myfit.errorbars:
-        buff.append(' |          * * * * COULD NOT ESTIMATE UNCERTAINTIES * * * *              |')
-        err_dig_min = 0
-    if err_dig_min < 199:
-        buff.append(' Worst agreement: %i digits for value, %i digits for error '
-                    % (val_dig_min, err_dig_min))
-    else:
-        buff.append(' Worst agreement: %i digits' % (val_dig_min))
-    return val_dig_min, '\n'.join(buff)
-
-def NIST_Dataset(DataSet, method='leastsq', start='start2',
-                 plot=True, verbose=False):
-
-    NISTdata = ReadNistData(DataSet)
-    resid, npar, dimx = Models[DataSet]
-    y = NISTdata['y']
-    x = NISTdata['x']
-
-    params = Parameters()
-    for i in range(npar):
-        pname = 'b%i' % (i+1)
-        cval  = NISTdata['cert_values'][i]
-        cerr  = NISTdata['cert_stderr'][i]
-        pval1 = NISTdata[start][i]
-        params.add(pname, value=pval1)
-
-    myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y})
-    digs, buff = Compare_NIST_Results(DataSet, myfit, myfit.params, NISTdata)
-    if verbose:
-        print(buff)
-    if plot and HASPYLAB:
-        fit = -resid(myfit.params, x, )
-        pylab.plot(x, y, 'ro')
-        pylab.plot(x, fit, 'k+-')
-        pylab.show()
-
-    return digs > 1
-
-def build_usage():
-    modelnames = []
-    ms = ''
-    for d in sorted(Models.keys()):
-        ms = ms + ' %s ' % d
-        if len(ms) > 55:
-            modelnames.append(ms)
-            ms = '    '
-    modelnames.append(ms)
-    modelnames = '\n'.join(modelnames)
-
-    usage = """
- === Test Fit to NIST StRD Models ===
-
-usage:
-------
-    python fit_NIST.py [options] Model Start
-
-where Start is one of 'start1','start2' or 'cert', for different
-starting values, and Model is one of
-
-    %s
-
-if Model = 'all', all models and starting values will be run.
-
-options:
---------
-  -m  name of fitting method.  One of:
-          leastsq, nelder, powell, lbfgsb, bfgs,
-          tnc, cobyla, slsqp, cg, newto-cg
-      leastsq (Levenberg-Marquardt) is the default
-""" % modelnames
-    return usage
-
-############################
-def run_interactive():
-    usage = build_usage()
-    parser = OptionParser(usage=usage, prog="fit-NIST.py")
-
-    parser.add_option("-m", "--method", dest="method",
-                      metavar='METH',
-                      default='leastsq',
-                      help="set method name, default = 'leastsq'")
-
-    (opts, args) = parser.parse_args()
-    dset = ''
-    start = 'start2'
-    if len(args) > 0:
-        dset = args[0]
-    if len(args) > 1:
-        start = args[1]
-
-    if dset.lower() == 'all':
-        tpass = 0
-        tfail = 0
-        failures = []
-        dsets = sorted(Models.keys())
-        for dset in dsets:
-            for start in ('start1', 'start2', 'cert'):
-                if NIST_Dataset(dset, method=opts.method, start=start,
-                                plot=False, verbose=True):
-                    tpass += 1
-                else:
-                    tfail += 1
-                    failures.append("   %s (starting at '%s')" % (dset, start))
-        print('--------------------------------------')
-        print(' Fit Method: %s ' %  opts.method)
-        print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
-        print(' Tests Failed for:\n %s' % '\n '.join(failures))
-        print('--------------------------------------')
-    elif dset not in Models:
-        print(usage)
-    else:
-        return NIST_Dataset(dset, method=opts.method,
-                            start=start, plot=True, verbose=True)
-
-def RunNIST_Model(model):
-    out1 = NIST_Dataset(model, start='start1', plot=False, verbose=False)
-    out2 = NIST_Dataset(model, start='start2', plot=False, verbose=False)
-    print("NIST Test" , model, out1, out2)
-    assert(out1 or out2)
-    return out1 or out2
-
-def test_Bennett5():
-    return RunNIST_Model('Bennett5')
-
-def test_BoxBOD():
-    return RunNIST_Model('BoxBOD')
-
-def test_Chwirut1():
-    return RunNIST_Model('Chwirut1')
-
-def test_Chwirut2():
-    return RunNIST_Model('Chwirut2')
-
-def test_DanWood():
-    return RunNIST_Model('DanWood')
-
-def test_ENSO():
-    return RunNIST_Model('ENSO')
-
-def test_Eckerle4():
-    return RunNIST_Model('Eckerle4')
-
-def test_Gauss1():
-    return RunNIST_Model('Gauss1')
-
-def test_Gauss2():
-    return RunNIST_Model('Gauss2')
-
-def test_Gauss3():
-    return RunNIST_Model('Gauss3')
-
-def test_Hahn1():
-    return RunNIST_Model('Hahn1')
-
-def test_Kirby2():
-    return RunNIST_Model('Kirby2')
-
-def test_Lanczos1():
-    return RunNIST_Model('Lanczos1')
-
-def test_Lanczos2():
-    return RunNIST_Model('Lanczos2')
-
-def test_Lanczos3():
-    return RunNIST_Model('Lanczos3')
-
-def test_MGH09():
-    return RunNIST_Model('MGH09')
-
-def test_MGH10():
-    return RunNIST_Model('MGH10')
-
-def test_MGH17():
-    return RunNIST_Model('MGH17')
-
-def test_Misra1a():
-    return RunNIST_Model('Misra1a')
-
-def test_Misra1b():
-    return RunNIST_Model('Misra1b')
-
-def test_Misra1c():
-    return RunNIST_Model('Misra1c')
-
-def test_Misra1d():
-    return RunNIST_Model('Misra1d')
-
-def test_Nelson():
-    return RunNIST_Model('Nelson')
-
-def test_Rat42():
-    return RunNIST_Model('Rat42')
-
-def test_Rat43():
-    return RunNIST_Model('Rat43')
-
-def test_Roszman1():
-    return RunNIST_Model('Roszman1')
-
-def test_Thurber():
-    return RunNIST_Model('Thurber')
-
-if __name__ == '__main__':
-    run_interactive()
+from __future__ import print_function
+import sys
+import math
+from optparse import OptionParser
+
+from lmfit import Parameters, minimize
+
+from NISTModels import Models, ReadNistData
+
+HASPYLAB = False
+for arg in sys.argv:
+    if 'nose' in arg:
+        HASPYLAB = False
+
+if HASPYLAB:
+    try:
+        import matplotlib
+        import pylab
+        HASPYLAB = True
+    except ImportError:
+        HASPYLAB = False
+
+def ndig(a, b):
+    "precision for NIST values"
+    return round(-math.log10((abs(abs(a)-abs(b)) +1.e-15)/ abs(b)))
+
+ABAR = ' |----------------+----------------+------------------+-------------------|'
+def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
+    buff = [' ======================================',
+ ' %s: ' % DataSet,
+ ' | Parameter Name |  Value Found   |  Certified Value | # Matching Digits |']
+    buff.append(ABAR)
+
+    val_dig_min = 200
+    err_dig_min = 200
+    fmt = ' | %s | % -.7e | % -.7e   | %2i                |'
+    for i in range(NISTdata['nparams']):
+        parname = 'b%i' % (i+1)
+        par = params[parname]
+        thisval = par.value
+        certval = NISTdata['cert_values'][i]
+        vdig    = ndig(thisval, certval)
+        pname   = (parname + ' value ' + ' '*14)[:14]
+        buff.append(fmt % (pname, thisval, certval, vdig))
+        val_dig_min = min(val_dig_min, vdig)
+
+        thiserr = par.stderr
+        certerr = NISTdata['cert_stderr'][i]
+        if thiserr is not None and myfit.errorbars:
+            edig   = ndig(thiserr, certerr)
+            ename = (parname + ' stderr' + ' '*14)[:14]
+            buff.append(fmt % (ename, thiserr, certerr, edig))
+            err_dig_min = min(err_dig_min, edig)
+
+    buff.append(ABAR)
+    sumsq = NISTdata['sum_squares']
+    try:
+        chi2 = myfit.chisqr
+        buff.append(' | Sum of Squares | %.7e  | %.7e    |  %2i               |'
+                    % (chi2, sumsq, ndig(chi2, sumsq)))
+    except:
+        pass
+    buff.append(ABAR)
+    if not myfit.errorbars:
+        buff.append(' |          * * * * COULD NOT ESTIMATE UNCERTAINTIES * * * *              |')
+        err_dig_min = 0
+    if err_dig_min < 199:
+        buff.append(' Worst agreement: %i digits for value, %i digits for error '
+                    % (val_dig_min, err_dig_min))
+    else:
+        buff.append(' Worst agreement: %i digits' % (val_dig_min))
+    return val_dig_min, '\n'.join(buff)
+
+def NIST_Dataset(DataSet, method='leastsq', start='start2',
+                 plot=True, verbose=False):
+
+    NISTdata = ReadNistData(DataSet)
+    resid, npar, dimx = Models[DataSet]
+    y = NISTdata['y']
+    x = NISTdata['x']
+
+    params = Parameters()
+    for i in range(npar):
+        pname = 'b%i' % (i+1)
+        cval  = NISTdata['cert_values'][i]
+        cerr  = NISTdata['cert_stderr'][i]
+        pval1 = NISTdata[start][i]
+        params.add(pname, value=pval1)
+
+    myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y})
+    digs, buff = Compare_NIST_Results(DataSet, myfit, myfit.params, NISTdata)
+    if verbose:
+        print(buff)
+    if plot and HASPYLAB:
+        fit = -resid(myfit.params, x, )
+        pylab.plot(x, y, 'ro')
+        pylab.plot(x, fit, 'k+-')
+        pylab.show()
+
+    return digs > 1
+
+def build_usage():
+    modelnames = []
+    ms = ''
+    for d in sorted(Models.keys()):
+        ms = ms + ' %s ' % d
+        if len(ms) > 55:
+            modelnames.append(ms)
+            ms = '    '
+    modelnames.append(ms)
+    modelnames = '\n'.join(modelnames)
+
+    usage = """
+ === Test Fit to NIST StRD Models ===
+
+usage:
+------
+    python fit_NIST.py [options] Model Start
+
+where Start is one of 'start1','start2' or 'cert', for different
+starting values, and Model is one of
+
+    %s
+
+if Model = 'all', all models and starting values will be run.
+
+options:
+--------
+  -m  name of fitting method.  One of:
+          leastsq, nelder, powell, lbfgsb, bfgs,
+          tnc, cobyla, slsqp, cg, newto-cg
+      leastsq (Levenberg-Marquardt) is the default
+""" % modelnames
+    return usage
+
+############################
+def run_interactive():
+    usage = build_usage()
+    parser = OptionParser(usage=usage, prog="fit-NIST.py")
+
+    parser.add_option("-m", "--method", dest="method",
+                      metavar='METH',
+                      default='leastsq',
+                      help="set method name, default = 'leastsq'")
+
+    (opts, args) = parser.parse_args()
+    dset = ''
+    start = 'start2'
+    if len(args) > 0:
+        dset = args[0]
+    if len(args) > 1:
+        start = args[1]
+
+    if dset.lower() == 'all':
+        tpass = 0
+        tfail = 0
+        failures = []
+        dsets = sorted(Models.keys())
+        for dset in dsets:
+            for start in ('start1', 'start2', 'cert'):
+                if NIST_Dataset(dset, method=opts.method, start=start,
+                                plot=False, verbose=True):
+                    tpass += 1
+                else:
+                    tfail += 1
+                    failures.append("   %s (starting at '%s')" % (dset, start))
+        print('--------------------------------------')
+        print(' Fit Method: %s ' %  opts.method)
+        print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
+        print(' Tests Failed for:\n %s' % '\n '.join(failures))
+        print('--------------------------------------')
+    elif dset not in Models:
+        print(usage)
+    else:
+        return NIST_Dataset(dset, method=opts.method,
+                            start=start, plot=True, verbose=True)
+
+def RunNIST_Model(model):
+    out1 = NIST_Dataset(model, start='start1', plot=False, verbose=False)
+    out2 = NIST_Dataset(model, start='start2', plot=False, verbose=False)
+    print("NIST Test" , model, out1, out2)
+    assert(out1 or out2)
+    return out1 or out2
+
+def test_Bennett5():
+    return RunNIST_Model('Bennett5')
+
+def test_BoxBOD():
+    return RunNIST_Model('BoxBOD')
+
+def test_Chwirut1():
+    return RunNIST_Model('Chwirut1')
+
+def test_Chwirut2():
+    return RunNIST_Model('Chwirut2')
+
+def test_DanWood():
+    return RunNIST_Model('DanWood')
+
+def test_ENSO():
+    return RunNIST_Model('ENSO')
+
+def test_Eckerle4():
+    return RunNIST_Model('Eckerle4')
+
+def test_Gauss1():
+    return RunNIST_Model('Gauss1')
+
+def test_Gauss2():
+    return RunNIST_Model('Gauss2')
+
+def test_Gauss3():
+    return RunNIST_Model('Gauss3')
+
+def test_Hahn1():
+    return RunNIST_Model('Hahn1')
+
+def test_Kirby2():
+    return RunNIST_Model('Kirby2')
+
+def test_Lanczos1():
+    return RunNIST_Model('Lanczos1')
+
+def test_Lanczos2():
+    return RunNIST_Model('Lanczos2')
+
+def test_Lanczos3():
+    return RunNIST_Model('Lanczos3')
+
+def test_MGH09():
+    return RunNIST_Model('MGH09')
+
+def test_MGH10():
+    return RunNIST_Model('MGH10')
+
+def test_MGH17():
+    return RunNIST_Model('MGH17')
+
+def test_Misra1a():
+    return RunNIST_Model('Misra1a')
+
+def test_Misra1b():
+    return RunNIST_Model('Misra1b')
+
+def test_Misra1c():
+    return RunNIST_Model('Misra1c')
+
+def test_Misra1d():
+    return RunNIST_Model('Misra1d')
+
+def test_Nelson():
+    return RunNIST_Model('Nelson')
+
+def test_Rat42():
+    return RunNIST_Model('Rat42')
+
+def test_Rat43():
+    return RunNIST_Model('Rat43')
+
+def test_Roszman1():
+    return RunNIST_Model('Roszman1')
+
+def test_Thurber():
+    return RunNIST_Model('Thurber')
+
+if __name__ == '__main__':
+    run_interactive()
diff --git a/tests/test_algebraic_constraint.py b/tests/test_algebraic_constraint.py
index a256169..1764b7f 100644
--- a/tests/test_algebraic_constraint.py
+++ b/tests/test_algebraic_constraint.py
@@ -1,135 +1,159 @@
-from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
-from lmfit import Parameters, Parameter, Minimizer
-from lmfit.lineshapes import gaussian, lorentzian, pvoigt
-from lmfit.printfuncs import report_fit
-
-def test_constraints1():
-    def residual(pars, x, sigma=None, data=None):
-        yg = gaussian(x, pars['amp_g'].value,
-                      pars['cen_g'].value, pars['wid_g'].value)
-        yl = lorentzian(x, pars['amp_l'].value,
-                   pars['cen_l'].value, pars['wid_l'].value)
-
-        slope = pars['line_slope'].value
-        offset = pars['line_off'].value
-        model =  yg +  yl + offset + x * slope
-        if data is None:
-            return model
-        if sigma is None:
-            return (model - data)
-        return (model - data)/sigma
-
-
-    n = 601
-    xmin = 0.
-    xmax = 20.0
-    x = linspace(xmin, xmax, n)
-
-    data = (gaussian(x, 21, 8.1, 1.2) +
-            lorentzian(x, 10, 9.6, 2.4) +
-            random.normal(scale=0.23,  size=n) +
-            x*0.5)
-
-
-    pfit = Parameters()
-    pfit.add(name='amp_g',  value=10)
-    pfit.add(name='cen_g',  value=9)
-    pfit.add(name='wid_g',  value=1)
-    
-    pfit.add(name='amp_tot',  value=20)
-    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
-    pfit.add(name='cen_l',  expr='1.5+cen_g')
-    pfit.add(name='wid_l',  expr='2*wid_g')
-    
-    pfit.add(name='line_slope', value=0.0)
-    pfit.add(name='line_off', value=0.0)
-            
-    sigma = 0.021  # estimate of data error (for all data points)
-
-    myfit = Minimizer(residual, pfit,
-                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
-                      scale_covar=True)
-
-    myfit.prepare_fit()
-    init = residual(myfit.params, x)
-
-    result = myfit.leastsq()
-
-    print(' Nfev = ', result.nfev)
-    print( result.chisqr, result.redchi, result.nfree)
-
-    report_fit(result.params)
-    pfit= result.params
-    fit = residual(result.params, x)
-    assert(pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
-    assert(pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
-    assert(pfit['wid_l'].value == 2 * pfit['wid_g'].value)
-
-
-def test_constraints2():
-    """add a user-defined function to symbol table"""
-    def residual(pars, x, sigma=None, data=None):
-        yg = gaussian(x, pars['amp_g'].value,
-                      pars['cen_g'].value, pars['wid_g'].value)
-        yl = lorentzian(x, pars['amp_l'].value,
-                   pars['cen_l'].value, pars['wid_l'].value)
-
-        slope = pars['line_slope'].value
-        offset = pars['line_off'].value
-        model =  yg +  yl + offset + x * slope
-        if data is None:
-            return model
-        if sigma is None:
-            return (model - data)
-        return (model - data)/sigma
-
-
-    n = 601
-    xmin = 0.
-    xmax = 20.0
-    x = linspace(xmin, xmax, n)
-
-    data = (gaussian(x, 21, 8.1, 1.2) +
-            lorentzian(x, 10, 9.6, 2.4) +
-            random.normal(scale=0.23,  size=n) +
-            x*0.5)
-
-    pfit = Parameters()
-    pfit.add(name='amp_g',  value=10)
-    pfit.add(name='cen_g',  value=9)
-    pfit.add(name='wid_g',  value=1)
-    
-    pfit.add(name='amp_tot',  value=20)
-    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
-    pfit.add(name='cen_l',  expr='1.5+cen_g')
-    pfit.add(name='line_slope', value=0.0)
-    pfit.add(name='line_off', value=0.0)
-
-    sigma = 0.021  # estimate of data error (for all data points)
-
-    myfit = Minimizer(residual, pfit,
-                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
-                      scale_covar=True)
-
-    def width_func(wpar):
-        """ """
-        return 2*wpar
-
-    myfit.params._asteval.symtable['wfun'] = width_func
-    myfit.params.add(name='wid_l', expr='wfun(wid_g)')
-
-    result = myfit.leastsq()
-
-    print(' Nfev = ', result.nfev)
-    print( result.chisqr, result.redchi, result.nfree)
-
-    report_fit(result.params)
-    pfit= result.params
-    fit = residual(result.params, x)
-    assert(pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
-    assert(pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
-    assert(pfit['wid_l'].value == 2 * pfit['wid_g'].value)
-
-if __name__ == '__main__':
-    test_constraints1()
-    test_constraints2()
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+from lmfit import Parameters, Parameter, Minimizer, Model
+from lmfit.lineshapes import gaussian, lorentzian, pvoigt
+from lmfit.printfuncs import report_fit
+
+def test_constraints1():
+    def residual(pars, x, sigma=None, data=None):
+        yg = gaussian(x, pars['amp_g'].value,
+                      pars['cen_g'].value, pars['wid_g'].value)
+        yl = lorentzian(x, pars['amp_l'].value,
+                   pars['cen_l'].value, pars['wid_l'].value)
+
+        slope = pars['line_slope'].value
+        offset = pars['line_off'].value
+        model =  yg +  yl + offset + x * slope
+        if data is None:
+            return model
+        if sigma is None:
+            return (model - data)
+        return (model - data)/sigma
+
+
+    n = 601
+    xmin = 0.
+    xmax = 20.0
+    x = linspace(xmin, xmax, n)
+
+    data = (gaussian(x, 21, 8.1, 1.2) +
+            lorentzian(x, 10, 9.6, 2.4) +
+            random.normal(scale=0.23,  size=n) +
+            x*0.5)
+
+
+    pfit = Parameters()
+    pfit.add(name='amp_g',  value=10)
+    pfit.add(name='cen_g',  value=9)
+    pfit.add(name='wid_g',  value=1)
+
+    pfit.add(name='amp_tot',  value=20)
+    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
+    pfit.add(name='cen_l',  expr='1.5+cen_g')
+    pfit.add(name='wid_l',  expr='2*wid_g')
+
+    pfit.add(name='line_slope', value=0.0)
+    pfit.add(name='line_off', value=0.0)
+
+    sigma = 0.021  # estimate of data error (for all data points)
+
+    myfit = Minimizer(residual, pfit,
+                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
+                      scale_covar=True)
+
+    myfit.prepare_fit()
+    init = residual(myfit.params, x)
+
+    result = myfit.leastsq()
+
+    print(' Nfev = ', result.nfev)
+    print( result.chisqr, result.redchi, result.nfree)
+
+    report_fit(result.params)
+    pfit= result.params
+    fit = residual(result.params, x)
+    assert(pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
+    assert(pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
+    assert(pfit['wid_l'].value == 2 * pfit['wid_g'].value)
+
+def test_constraints2():
+    """add a user-defined function to symbol table"""
+    def residual(pars, x, sigma=None, data=None):
+        yg = gaussian(x, pars['amp_g'].value,
+                      pars['cen_g'].value, pars['wid_g'].value)
+        yl = lorentzian(x, pars['amp_l'].value,
+                   pars['cen_l'].value, pars['wid_l'].value)
+
+        slope = pars['line_slope'].value
+        offset = pars['line_off'].value
+        model =  yg +  yl + offset + x * slope
+        if data is None:
+            return model
+        if sigma is None:
+            return (model - data)
+        return (model - data)/sigma
+
+
+    n = 601
+    xmin = 0.
+    xmax = 20.0
+    x = linspace(xmin, xmax, n)
+
+    data = (gaussian(x, 21, 8.1, 1.2) +
+            lorentzian(x, 10, 9.6, 2.4) +
+            random.normal(scale=0.23,  size=n) +
+            x*0.5)
+
+    pfit = Parameters()
+    pfit.add(name='amp_g',  value=10)
+    pfit.add(name='cen_g',  value=9)
+    pfit.add(name='wid_g',  value=1)
+
+    pfit.add(name='amp_tot',  value=20)
+    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
+    pfit.add(name='cen_l',  expr='1.5+cen_g')
+    pfit.add(name='line_slope', value=0.0)
+    pfit.add(name='line_off', value=0.0)
+
+    sigma = 0.021  # estimate of data error (for all data points)
+
+    myfit = Minimizer(residual, pfit,
+                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
+                      scale_covar=True)
+
+    def width_func(wpar):
+        """ """
+        return 2*wpar
+
+    myfit.params._asteval.symtable['wfun'] = width_func
+
+    try:
+        myfit.params.add(name='wid_l', expr='wfun(wid_g)')
+    except:
+        assert(False)
+
+    result = myfit.leastsq()
+
+    print(' Nfev = ', result.nfev)
+    print( result.chisqr, result.redchi, result.nfree)
+    report_fit(result.params)
+    pfit= result.params
+    fit = residual(result.params, x)
+    assert(pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
+    assert(pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
+    assert(pfit['wid_l'].value == 2 * pfit['wid_g'].value)
+
+
+def test_constraints3():
+    """test a constraint with simple function call"""
+    x = [1723, 1773, 1823, 1523, 1773, 1033.03078,
+         1042.98077, 1047.90937, 1053.95899, 1057.94906,
+         1063.13788, 1075.74218, 1086.03102]
+    y = [0.79934, -0.31876, -0.46852, 0.05, -0.21,
+         11.1708, 10.31844, 9.73069, 9.21319, 9.12457,
+         9.05243, 8.66407, 8.29664]
+
+    def VFT(T, ninf=-3, A=5e3, T0=800):
+        return ninf + A/(T-T0)
+
+    vftModel = Model(VFT)
+    vftModel.set_param_hint('D', vary=False, expr=r'A*log(10)/T0')
+    result = vftModel.fit(y, T=x)
+    assert(result.params['A'].value > 2600.0)
+    assert(result.params['A'].value < 2650.0)
+    assert(result.params['D'].value > 7.0)
+    assert(result.params['D'].value < 7.5)
+
+if __name__ == '__main__':
+    test_constraints1()
+    test_constraints2()
+    test_constraints3()
diff --git a/tests/test_algebraic_constraint2.py b/tests/test_algebraic_constraint2.py
index 9b4cd88..45a9b6a 100644
--- a/tests/test_algebraic_constraint2.py
+++ b/tests/test_algebraic_constraint2.py
@@ -1,103 +1,103 @@
-from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
-from lmfit import Parameters, Parameter, Minimizer
-from lmfit.lineshapes import gaussian, lorentzian, pvoigt
-from lmfit.printfuncs import report_fit
-import sys
-
-
-# Turn off plotting if run by nosetests.
-WITHPLOT = True
-for arg in sys.argv:
-    if 'nose' in arg:
-        WITHPLOT = False
-
-if WITHPLOT:
-    try:
-        import matplotlib
-        import pylab
-    except ImportError:
-        WITHPLOT = False
-
-
-def test_constraints(with_plot=True):
-    with_plot = with_plot and WITHPLOT
-
-    def residual(pars, x, sigma=None, data=None):
-        yg = gaussian(x, pars['amp_g'].value,
-                   pars['cen_g'].value, pars['wid_g'].value)
-        yl = lorentzian(x, pars['amp_l'].value,
-                   pars['cen_l'].value, pars['wid_l'].value)
-
-        slope = pars['line_slope'].value
-        offset = pars['line_off'].value
-        model =  yg +  yl + offset + x * slope
-        if data is None:
-            return model
-        if sigma is None:
-            return (model - data)
-        return (model - data) / sigma
-
-
-    n = 201
-    xmin = 0.
-    xmax = 20.0
-    x = linspace(xmin, xmax, n)
-
-    data = (gaussian(x, 21, 8.1, 1.2) +
-            lorentzian(x, 10, 9.6, 2.4) +
-            random.normal(scale=0.23,  size=n) +
-            x*0.5)
-
-    if with_plot:
-        pylab.plot(x, data, 'r+')
-
-    pfit = Parameters()
-    pfit.add(name='amp_g',  value=10)
-    pfit.add(name='cen_g',  value=9)
-    pfit.add(name='wid_g',  value=1)
-    
-    pfit.add(name='amp_tot',  value=20)
-    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
-    pfit.add(name='cen_l',  expr='1.5+cen_g')
-    pfit.add(name='wid_l',  expr='2*wid_g')
-    
-    pfit.add(name='line_slope', value=0.0)
-    pfit.add(name='line_off', value=0.0)
-
-    sigma = 0.021  # estimate of data error (for all data points)
-
-    myfit = Minimizer(residual, pfit,
-                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
-                      scale_covar=True)
-
-    myfit.prepare_fit()
-    init = residual(myfit.params, x)
-
-    result = myfit.leastsq()
-
-    print(' Nfev = ', result.nfev)
-    print( result.chisqr, result.redchi, result.nfree)
-
-    report_fit(result.params, min_correl=0.3)
-
-    fit = residual(result.params, x)
-    if with_plot:
-        pylab.plot(x, fit, 'b-')
-    assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
-    assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value)
-    assert(result.params['wid_l'].value == 2 * result.params['wid_g'].value)
-
-    # now, change fit slightly and re-run
-    myfit.params['wid_l'].expr = '1.25*wid_g'
-    result = myfit.leastsq()
-    report_fit(result.params, min_correl=0.4)
-    fit2 = residual(result.params, x)
-    if with_plot:
-        pylab.plot(x, fit2, 'k')
-        pylab.show()
-
-    assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
-    assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value)
-    assert(result.params['wid_l'].value == 1.25 * result.params['wid_g'].value)
-
-test_constraints()
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+from lmfit import Parameters, Parameter, Minimizer
+from lmfit.lineshapes import gaussian, lorentzian, pvoigt
+from lmfit.printfuncs import report_fit
+import sys
+
+
+# Turn off plotting if run by nosetests.
+WITHPLOT = True
+for arg in sys.argv:
+    if 'nose' in arg:
+        WITHPLOT = False
+
+if WITHPLOT:
+    try:
+        import matplotlib
+        import pylab
+    except ImportError:
+        WITHPLOT = False
+
+
+def test_constraints(with_plot=True):
+    with_plot = with_plot and WITHPLOT
+
+    def residual(pars, x, sigma=None, data=None):
+        yg = gaussian(x, pars['amp_g'].value,
+                   pars['cen_g'].value, pars['wid_g'].value)
+        yl = lorentzian(x, pars['amp_l'].value,
+                   pars['cen_l'].value, pars['wid_l'].value)
+
+        slope = pars['line_slope'].value
+        offset = pars['line_off'].value
+        model =  yg +  yl + offset + x * slope
+        if data is None:
+            return model
+        if sigma is None:
+            return (model - data)
+        return (model - data) / sigma
+
+
+    n = 201
+    xmin = 0.
+    xmax = 20.0
+    x = linspace(xmin, xmax, n)
+
+    data = (gaussian(x, 21, 8.1, 1.2) +
+            lorentzian(x, 10, 9.6, 2.4) +
+            random.normal(scale=0.23,  size=n) +
+            x*0.5)
+
+    if with_plot:
+        pylab.plot(x, data, 'r+')
+
+    pfit = Parameters()
+    pfit.add(name='amp_g',  value=10)
+    pfit.add(name='cen_g',  value=9)
+    pfit.add(name='wid_g',  value=1)
+    
+    pfit.add(name='amp_tot',  value=20)
+    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
+    pfit.add(name='cen_l',  expr='1.5+cen_g')
+    pfit.add(name='wid_l',  expr='2*wid_g')
+    
+    pfit.add(name='line_slope', value=0.0)
+    pfit.add(name='line_off', value=0.0)
+
+    sigma = 0.021  # estimate of data error (for all data points)
+
+    myfit = Minimizer(residual, pfit,
+                      fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
+                      scale_covar=True)
+
+    myfit.prepare_fit()
+    init = residual(myfit.params, x)
+
+    result = myfit.leastsq()
+
+    print(' Nfev = ', result.nfev)
+    print( result.chisqr, result.redchi, result.nfree)
+
+    report_fit(result.params, min_correl=0.3)
+
+    fit = residual(result.params, x)
+    if with_plot:
+        pylab.plot(x, fit, 'b-')
+    assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
+    assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value)
+    assert(result.params['wid_l'].value == 2 * result.params['wid_g'].value)
+
+    # now, change fit slightly and re-run
+    myfit.params['wid_l'].expr = '1.25*wid_g'
+    result = myfit.leastsq()
+    report_fit(result.params, min_correl=0.4)
+    fit2 = residual(result.params, x)
+    if with_plot:
+        pylab.plot(x, fit2, 'k')
+        pylab.show()
+
+    assert(result.params['cen_l'].value == 1.5 + result.params['cen_g'].value)
+    assert(result.params['amp_l'].value == result.params['amp_tot'].value - result.params['amp_g'].value)
+    assert(result.params['wid_l'].value == 1.25 * result.params['wid_g'].value)
+
+test_constraints()
diff --git a/tests/test_basicfit.py b/tests/test_basicfit.py
index bcad50c..98b6338 100644
--- a/tests/test_basicfit.py
+++ b/tests/test_basicfit.py
@@ -1,47 +1,47 @@
-import numpy as np
-from lmfit import minimize, Parameters, Parameter, report_fit
-from lmfit_testutils import assert_paramval, assert_paramattr
-
-
-def test_basic():
-    # create data to be fitted
-    x = np.linspace(0, 15, 301)
-    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
-            np.random.normal(size=len(x), scale=0.2) )
-
-    # define objective function: returns the array to be minimized
-    def fcn2min(params, x, data):
-        """ model decaying sine wave, subtract data"""
-        amp = params['amp'].value
-        shift = params['shift'].value
-        omega = params['omega'].value
-        decay = params['decay'].value
-
-        model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
-        return model - data
-
-    # create a set of Parameters
-    params = Parameters()
-    params.add('amp',   value= 10,  min=0)
-    params.add('decay', value= 0.1)
-    params.add('shift', value= 0.0, min=-np.pi/2., max=np.pi/2)
-    params.add('omega', value= 3.0)
-
-    # do fit, here with leastsq model
-    result = minimize(fcn2min, params, args=(x, data))
-
-    # calculate final result
-    final = data + result.residual
-
-    # report_fit(result)
-
-    assert(result.nfev >   5)
-    assert(result.nfev < 500)
-    assert(result.chisqr > 1)
-    assert(result.nvarys == 4)
-    assert_paramval(result.params['amp'],   5.03, tol=0.05)
-    assert_paramval(result.params['omega'], 2.0, tol=0.05)
-
-
-if __name__ == '__main__':
-    test_basic()
+import numpy as np
+from lmfit import minimize, Parameters, Parameter, report_fit
+from lmfit_testutils import assert_paramval, assert_paramattr
+
+
+def test_basic():
+    # create data to be fitted
+    x = np.linspace(0, 15, 301)
+    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+            np.random.normal(size=len(x), scale=0.2) )
+
+    # define objective function: returns the array to be minimized
+    def fcn2min(params, x, data):
+        """ model decaying sine wave, subtract data"""
+        amp = params['amp'].value
+        shift = params['shift'].value
+        omega = params['omega'].value
+        decay = params['decay'].value
+
+        model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+        return model - data
+
+    # create a set of Parameters
+    params = Parameters()
+    params.add('amp',   value= 10,  min=0)
+    params.add('decay', value= 0.1)
+    params.add('shift', value= 0.0, min=-np.pi/2., max=np.pi/2)
+    params.add('omega', value= 3.0)
+
+    # do fit, here with leastsq model
+    result = minimize(fcn2min, params, args=(x, data))
+
+    # calculate final result
+    final = data + result.residual
+
+    # report_fit(result)
+
+    assert(result.nfev >   5)
+    assert(result.nfev < 500)
+    assert(result.chisqr > 1)
+    assert(result.nvarys == 4)
+    assert_paramval(result.params['amp'],   5.03, tol=0.05)
+    assert_paramval(result.params['omega'], 2.0, tol=0.05)
+
+
+if __name__ == '__main__':
+    test_basic()
diff --git a/tests/test_bounded_jacobian.py b/tests/test_bounded_jacobian.py
index a8fc2f4..810a505 100644
--- a/tests/test_bounded_jacobian.py
+++ b/tests/test_bounded_jacobian.py
@@ -1,43 +1,43 @@
-from lmfit import Parameters, minimize, fit_report
-from lmfit_testutils import assert_paramval, assert_paramattr
-
-import numpy as np
-
-
-def test_bounded_jacobian():
-    pars = Parameters()
-    pars.add('x0', value=2.0)
-    pars.add('x1', value=2.0, min=1.5)
-
-    global jac_count
-
-    jac_count = 0
-
-    def resid(params):
-        x0 = params['x0'].value
-        x1 = params['x1'].value
-        return np.array([10 * (x1 - x0*x0), 1-x0])
-
-    def jac(params):
-        global jac_count
-        jac_count += 1
-        x0 = params['x0'].value
-        return np.array([[-20*x0, 10], [-1, 0]])
-    
-    out0 = minimize(resid, pars, Dfun=None)
-
-    assert_paramval(out0.params['x0'], 1.2243, tol=0.02)
-    assert_paramval(out0.params['x1'], 1.5000, tol=0.02)
-    assert(jac_count == 0)
-
-    out1 = minimize(resid, pars, Dfun=jac)
-
-    assert_paramval(out1.params['x0'], 1.2243, tol=0.02)
-    assert_paramval(out1.params['x1'], 1.5000, tol=0.02)
-    assert(jac_count > 5)
-
-    print(fit_report(out1, show_correl=True))
-
-
-if __name__ == '__main__':
-    test_bounded_jacobian()
+from lmfit import Parameters, minimize, fit_report
+from lmfit_testutils import assert_paramval, assert_paramattr
+
+import numpy as np
+
+
+def test_bounded_jacobian():
+    pars = Parameters()
+    pars.add('x0', value=2.0)
+    pars.add('x1', value=2.0, min=1.5)
+
+    global jac_count
+
+    jac_count = 0
+
+    def resid(params):
+        x0 = params['x0'].value
+        x1 = params['x1'].value
+        return np.array([10 * (x1 - x0*x0), 1-x0])
+
+    def jac(params):
+        global jac_count
+        jac_count += 1
+        x0 = params['x0'].value
+        return np.array([[-20*x0, 10], [-1, 0]])
+    
+    out0 = minimize(resid, pars, Dfun=None)
+
+    assert_paramval(out0.params['x0'], 1.2243, tol=0.02)
+    assert_paramval(out0.params['x1'], 1.5000, tol=0.02)
+    assert(jac_count == 0)
+
+    out1 = minimize(resid, pars, Dfun=jac)
+
+    assert_paramval(out1.params['x0'], 1.2243, tol=0.02)
+    assert_paramval(out1.params['x1'], 1.5000, tol=0.02)
+    assert(jac_count > 5)
+
+    print(fit_report(out1, show_correl=True))
+
+
+if __name__ == '__main__':
+    test_bounded_jacobian()
diff --git a/tests/test_bounds.py b/tests/test_bounds.py
index c23ee94..99c962d 100644
--- a/tests/test_bounds.py
+++ b/tests/test_bounds.py
@@ -1,54 +1,54 @@
-from lmfit import Parameters, minimize, fit_report
-from lmfit_testutils import assert_paramval, assert_paramattr
-
-from numpy import linspace, zeros, sin, exp, random, pi, sign
-
-def test_bounds():
-    p_true = Parameters()
-    p_true.add('amp', value=14.0)
-    p_true.add('period', value=5.4321)
-    p_true.add('shift', value=0.12345)
-    p_true.add('decay', value=0.01000)
-
-    def residual(pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
-
-        if abs(shift) > pi/2:
-            shift = shift - sign(shift)*pi
-
-        model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
-        if data is None:
-            return model
-        return (model - data)
-
-    n = 1500
-    xmin = 0.
-    xmax = 250.0
-    random.seed(0)
-    noise = random.normal(scale=2.80, size=n)
-    x     = linspace(xmin, xmax, n)
-    data  = residual(p_true, x) + noise
-
-    fit_params = Parameters()
-    fit_params.add('amp', value=13.0, max=20, min=0.0)
-    fit_params.add('period', value=2, max=10)
-    fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.)
-    fit_params.add('decay', value=0.02, max=0.10, min=0.00)
-
-    out = minimize(residual, fit_params, args=(x,), kws={'data':data})
-
-    fit = residual(out.params, x)
-
-    assert(out.nfev  > 10)
-    assert(out.nfree > 50)
-    assert(out.chisqr > 1.0)
-
-    print(fit_report(out, show_correl=True, modelpars=p_true))
-    assert_paramval(out.params['decay'], 0.01, tol=1.e-2)
-    assert_paramval(out.params['shift'], 0.123, tol=1.e-2)
-
-if __name__ == '__main__':
-    test_bounds()
+from lmfit import Parameters, minimize, fit_report
+from lmfit_testutils import assert_paramval, assert_paramattr
+
+from numpy import linspace, zeros, sin, exp, random, pi, sign
+
+def test_bounds():
+    p_true = Parameters()
+    p_true.add('amp', value=14.0)
+    p_true.add('period', value=5.4321)
+    p_true.add('shift', value=0.12345)
+    p_true.add('decay', value=0.01000)
+
+    def residual(pars, x, data=None):
+        amp = pars['amp'].value
+        per = pars['period'].value
+        shift = pars['shift'].value
+        decay = pars['decay'].value
+
+        if abs(shift) > pi/2:
+            shift = shift - sign(shift)*pi
+
+        model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
+        if data is None:
+            return model
+        return (model - data)
+
+    n = 1500
+    xmin = 0.
+    xmax = 250.0
+    random.seed(0)
+    noise = random.normal(scale=2.80, size=n)
+    x     = linspace(xmin, xmax, n)
+    data  = residual(p_true, x) + noise
+
+    fit_params = Parameters()
+    fit_params.add('amp', value=13.0, max=20, min=0.0)
+    fit_params.add('period', value=2, max=10)
+    fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.)
+    fit_params.add('decay', value=0.02, max=0.10, min=0.00)
+
+    out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+
+    fit = residual(out.params, x)
+
+    assert(out.nfev  > 10)
+    assert(out.nfree > 50)
+    assert(out.chisqr > 1.0)
+
+    print(fit_report(out, show_correl=True, modelpars=p_true))
+    assert_paramval(out.params['decay'], 0.01, tol=1.e-2)
+    assert_paramval(out.params['shift'], 0.123, tol=1.e-2)
+
+if __name__ == '__main__':
+    test_bounds()
diff --git a/tests/test_confidence.py b/tests/test_confidence.py
index 7d90d00..2b5d290 100644
--- a/tests/test_confidence.py
+++ b/tests/test_confidence.py
@@ -1,44 +1,88 @@
-import numpy as np
-from numpy.testing import assert_allclose
-
-import lmfit
-from lmfit_testutils import assert_paramval
-
-def residual(params, x, data):
-    a = params['a'].value
-    b = params['b'].value
-    return data - 1.0/(a*x)+b
-
-def test_confidence1():
-    x = np.linspace(0.3,10,100)
-    np.random.seed(0)
-   
-    y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
-    
-    pars = lmfit.Parameters()
-    pars.add_many(('a', 0.1), ('b', 1))
-    
-    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(x, y) )
-    out = minimizer.leastsq()
-    # lmfit.report_fit(out)
-    
-    assert(out.nfev >   5)
-    assert(out.nfev < 500)
-    assert(out.chisqr < 3.0)
-    assert(out.nvarys == 2)
-
-    assert_paramval(out.params['a'],  0.1, tol=0.1)
-    assert_paramval(out.params['b'], -2.0, tol=0.1)
-
-    ci = lmfit.conf_interval(minimizer, out)
-    assert_allclose(ci['b'][0][0],  0.997,  rtol=0.01)
-    assert_allclose(ci['b'][0][1], -2.022,  rtol=0.01)
-    assert_allclose(ci['b'][2][0],  0.674,  rtol=0.01)
-    assert_allclose(ci['b'][2][1], -1.997,  rtol=0.01)
-    assert_allclose(ci['b'][5][0],  0.95,   rtol=0.01)
-    assert_allclose(ci['b'][5][1], -1.96,   rtol=0.01)
-
-   # lmfit.printfuncs.report_ci(ci)
-
-if __name__ == '__main__':
-    test_confidence1()
+import numpy as np
+from numpy.testing import assert_allclose
+
+import lmfit
+from lmfit_testutils import assert_paramval
+
+def residual(params, x, data):
+    a = params['a'].value
+    b = params['b'].value
+    return data - 1.0/(a*x)+b
+
+def residual2(params, x, data):
+    a = params['a'].value
+    b = params['b'].value
+    c = params['c'].value
+    return data - c/(a*x)+b
+
+def test_confidence1():
+    x = np.linspace(0.3,10,100)
+    np.random.seed(0)
+
+    y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
+
+    pars = lmfit.Parameters()
+    pars.add_many(('a', 0.1), ('b', 1))
+
+    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(x, y) )
+    out = minimizer.leastsq()
+    # lmfit.report_fit(out)
+
+    assert(out.nfev >   5)
+    assert(out.nfev < 500)
+    assert(out.chisqr < 3.0)
+    assert(out.nvarys == 2)
+
+    assert_paramval(out.params['a'],  0.1, tol=0.1)
+    assert_paramval(out.params['b'], -2.0, tol=0.1)
+
+    ci = lmfit.conf_interval(minimizer, out)
+    assert_allclose(ci['b'][0][0],  0.997,  rtol=0.01)
+    assert_allclose(ci['b'][0][1], -2.022,  rtol=0.01)
+    assert_allclose(ci['b'][2][0],  0.674,  rtol=0.01)
+    assert_allclose(ci['b'][2][1], -1.997,  rtol=0.01)
+    assert_allclose(ci['b'][5][0],  0.95,   rtol=0.01)
+    assert_allclose(ci['b'][5][1], -1.96,   rtol=0.01)
+
+   # lmfit.printfuncs.report_ci(ci)
+
+
+def test_confidence2():
+    x = np.linspace(0.3,10,100)
+    np.random.seed(0)
+
+    y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
+
+    pars = lmfit.Parameters()
+    pars.add_many(('a', 0.1), ('b', 1), ('c', 1.0))
+    pars['a'].max = 0.25
+    pars['a'].min = 0.00
+    pars['a'].value = 0.2
+    pars['c'].vary = False
+
+    minimizer = lmfit.Minimizer(residual2, pars, fcn_args=(x, y) )
+    out = minimizer.minimize(method='nelder')
+    out = minimizer.minimize(method='leastsq', params=out.params)
+    # lmfit.report_fit(out)
+
+    assert(out.nfev >   5)
+    assert(out.nfev < 500)
+    assert(out.chisqr < 3.0)
+    assert(out.nvarys == 2)
+
+    assert_paramval(out.params['a'],  0.1, tol=0.1)
+    assert_paramval(out.params['b'], -2.0, tol=0.1)
+
+    ci = lmfit.conf_interval(minimizer, out)
+    assert_allclose(ci['b'][0][0],  0.997,  rtol=0.01)
+    assert_allclose(ci['b'][0][1], -2.022,  rtol=0.01)
+    assert_allclose(ci['b'][2][0],  0.674,  rtol=0.01)
+    assert_allclose(ci['b'][2][1], -1.997,  rtol=0.01)
+    assert_allclose(ci['b'][5][0],  0.95,   rtol=0.01)
+    assert_allclose(ci['b'][5][1], -1.96,   rtol=0.01)
+
+    lmfit.printfuncs.report_ci(ci)
+
+if __name__ == '__main__':
+    test_confidence1()
+    test_confidence2()
diff --git a/tests/test_copy_params.py b/tests/test_copy_params.py
index d68387b..e17aa18 100644
--- a/tests/test_copy_params.py
+++ b/tests/test_copy_params.py
@@ -1,36 +1,36 @@
-import numpy as np
-from lmfit import Parameters, minimize, report_fit
-
-def get_data():
-    x = np.arange(0, 1, 0.01)
-    y1 = 1.5*np.exp(0.9*x) + np.random.normal(scale=0.001, size=len(x))
-    y2 = 2.0 + x + 1/2.*x**2 +1/3.*x**3
-    y2 = y2 + np.random.normal(scale=0.001, size=len(x))
-    return x, y1, y2
-
-def residual(params, x, data):
-    a = params['a'].value
-    b = params['b'].value
-
-    model = a*np.exp(b*x)
-    return (data-model)
-
-def test_copy_params():
-    x, y1, y2 = get_data()
-
-    params = Parameters()
-    params.add('a', value = 2.0)
-    params.add('b', value = 2.0)
-
-    # fit to first data set
-    out1 = minimize(residual, params, args=(x, y1))
-
-    # fit to second data set
-    out2 = minimize(residual, params, args=(x, y2))
-
-    adiff = out1.params['a'].value - out2.params['a'].value
-    bdiff = out1.params['b'].value - out2.params['b'].value
-
-    assert(abs(adiff) > 1.e-2)
-    assert(abs(bdiff) > 1.e-2)
-
+import numpy as np
+from lmfit import Parameters, minimize, report_fit
+
+def get_data():
+    x = np.arange(0, 1, 0.01)
+    y1 = 1.5*np.exp(0.9*x) + np.random.normal(scale=0.001, size=len(x))
+    y2 = 2.0 + x + 1/2.*x**2 +1/3.*x**3
+    y2 = y2 + np.random.normal(scale=0.001, size=len(x))
+    return x, y1, y2
+
+def residual(params, x, data):
+    a = params['a'].value
+    b = params['b'].value
+
+    model = a*np.exp(b*x)
+    return (data-model)
+
+def test_copy_params():
+    x, y1, y2 = get_data()
+
+    params = Parameters()
+    params.add('a', value = 2.0)
+    params.add('b', value = 2.0)
+
+    # fit to first data set
+    out1 = minimize(residual, params, args=(x, y1))
+
+    # fit to second data set
+    out2 = minimize(residual, params, args=(x, y2))
+
+    adiff = out1.params['a'].value - out2.params['a'].value
+    bdiff = out1.params['b'].value - out2.params['b'].value
+
+    assert(abs(adiff) > 1.e-2)
+    assert(abs(bdiff) > 1.e-2)
+
diff --git a/tests/test_default_kws.py b/tests/test_default_kws.py
index 93a1c8f..8ab835f 100644
--- a/tests/test_default_kws.py
+++ b/tests/test_default_kws.py
@@ -1,24 +1,24 @@
-import numpy as np
-from nose.tools import assert_true
-from lmfit.lineshapes import gaussian
-from lmfit.models import GaussianModel
-
-
-def test_default_inputs_gauss():
-
-    area = 1
-    cen = 0
-    std = 0.2
-    x = np.arange(-3, 3, 0.01)
-    y = gaussian(x, area, cen, std)
-
-    g = GaussianModel()
-
-    fit_option1 = {'maxfev': 5000, 'xtol': 1e-2}
-    result1 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5, fit_kws=fit_option1)
-
-    fit_option2 = {'maxfev': 5000, 'xtol': 1e-6}
-    result2 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5, fit_kws=fit_option2)
-
-    assert_true(result1.values!=result2.values)
-    return
+import numpy as np
+from nose.tools import assert_true
+from lmfit.lineshapes import gaussian
+from lmfit.models import GaussianModel
+
+
+def test_default_inputs_gauss():
+
+    area = 1
+    cen = 0
+    std = 0.2
+    x = np.arange(-3, 3, 0.01)
+    y = gaussian(x, area, cen, std)
+
+    g = GaussianModel()
+
+    fit_option1 = {'maxfev': 5000, 'xtol': 1e-2}
+    result1 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5, fit_kws=fit_option1)
+
+    fit_option2 = {'maxfev': 5000, 'xtol': 1e-6}
+    result2 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5, fit_kws=fit_option2)
+
+    assert_true(result1.values!=result2.values)
+    return
diff --git a/tests/test_itercb.py b/tests/test_itercb.py
index cefcc5d..f77eb48 100644
--- a/tests/test_itercb.py
+++ b/tests/test_itercb.py
@@ -1,29 +1,29 @@
-import numpy as np
-from lmfit import Parameters, minimize, report_fit
-from lmfit.models import LinearModel, GaussianModel
-from lmfit.lineshapes import gaussian
-
-def per_iteration(pars, iter, resid, *args, **kws):
-    """iteration callback, will abort at iteration 23
-    """
-    # print( iter, ', '.join(["%s=%.4f" % (p.name, p.value) for p in pars.values()]))
-    return iter == 23
-
-def test_itercb():
-    x = np.linspace(0, 20, 401)
-    y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
-    y = y  - .20*x + 3.333 + np.random.normal(scale=0.23,  size=len(x))
-    mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
-
-    pars = mod.make_params(peak_amplitude=21.0,
-                           peak_center=7.0,
-                           peak_sigma=2.0,
-                           bkg_intercept=2,
-                           bkg_slope=0.0)
-
-    out = mod.fit(y, pars, x=x, iter_cb=per_iteration)
-
-    assert(out.nfev == 23)
-    assert(out.aborted)
-    assert(not out.errorbars)
-    assert(not out.success)
+import numpy as np
+from lmfit import Parameters, minimize, report_fit
+from lmfit.models import LinearModel, GaussianModel
+from lmfit.lineshapes import gaussian
+
+def per_iteration(pars, iter, resid, *args, **kws):
+    """iteration callback, will abort at iteration 23
+    """
+    # print( iter, ', '.join(["%s=%.4f" % (p.name, p.value) for p in pars.values()]))
+    return iter == 23
+
+def test_itercb():
+    x = np.linspace(0, 20, 401)
+    y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
+    y = y  - .20*x + 3.333 + np.random.normal(scale=0.23,  size=len(x))
+    mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
+
+    pars = mod.make_params(peak_amplitude=21.0,
+                           peak_center=7.0,
+                           peak_sigma=2.0,
+                           bkg_intercept=2,
+                           bkg_slope=0.0)
+
+    out = mod.fit(y, pars, x=x, iter_cb=per_iteration)
+
+    assert(out.nfev == 23)
+    assert(out.aborted)
+    assert(not out.errorbars)
+    assert(not out.success)
diff --git a/tests/test_manypeaks_speed.py b/tests/test_manypeaks_speed.py
index 4f870ac..c756936 100644
--- a/tests/test_manypeaks_speed.py
+++ b/tests/test_manypeaks_speed.py
@@ -1,37 +1,37 @@
-#
-# test speed of building complex model
-#
-import time
-import sys
-import numpy as np
-from lmfit import Model
-from lmfit.lineshapes import gaussian
-from copy import deepcopy
-
-
-sys.setrecursionlimit(2000)
-
-def test_manypeaks_speed():
-    x  = np.linspace( -5, 5, 251)
-    model = None
-    t0 = time.time()
-    for i in np.arange(500):
-        g = Model(gaussian, prefix='g%i_' % i)
-        if model is None:
-            model = g
-        else:
-            model += g
-    t1 = time.time()
-    pars = model.make_params()
-    t2 = time.time()
-    cpars = deepcopy(pars)
-    t3 = time.time()
-
-    # these are very conservative tests that 
-    # should be satisfied on nearly any machine
-    assert((t3-t2) < 0.5)
-    assert((t2-t1) < 0.5)
-    assert((t1-t0) < 5.0)
-
-if __name__ == '__main__':
-    test_manypeaks_speed()
+#
+# test speed of building complex model
+#
+import time
+import sys
+import numpy as np
+from lmfit import Model
+from lmfit.lineshapes import gaussian
+from copy import deepcopy
+
+
+sys.setrecursionlimit(2000)
+
+def test_manypeaks_speed():
+    x  = np.linspace( -5, 5, 251)
+    model = None
+    t0 = time.time()
+    for i in np.arange(500):
+        g = Model(gaussian, prefix='g%i_' % i)
+        if model is None:
+            model = g
+        else:
+            model += g
+    t1 = time.time()
+    pars = model.make_params()
+    t2 = time.time()
+    cpars = deepcopy(pars)
+    t3 = time.time()
+
+    # these are very conservative tests that 
+    # should be satisfied on nearly any machine
+    assert((t3-t2) < 0.5)
+    assert((t2-t1) < 0.5)
+    assert((t1-t0) < 5.0)
+
+if __name__ == '__main__':
+    test_manypeaks_speed()
diff --git a/tests/test_model.py b/tests/test_model.py
index bcc389d..9cede12 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -1,503 +1,585 @@
-import unittest
-import warnings
-import nose
-from numpy.testing import assert_allclose
-from numpy.testing.decorators import knownfailureif
-import numpy as np
-
-from lmfit import Model, Parameter, models
-from lmfit.lineshapes import gaussian
-
-def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
-                         err_msg='', verbose=True):
-    for param_name, value in desired.items():
-         assert_allclose(actual[param_name], value, rtol, atol, err_msg, verbose)
-
-def _skip_if_no_pandas():
-    try:
-        import pandas
-    except ImportError:
-        raise nose.SkipTest("Skipping tests that require pandas.")
-
-
-class CommonTests(object):
-    # to be subclassed for testing predefined models
-
-    def setUp(self):
-        np.random.seed(1)
-        self.noise = 0.0001*np.random.randn(*self.x.shape)
-        # Some Models need args (e.g., polynomial order), and others don't.
-        try:
-            args = self.args
-        except AttributeError:
-            self.model = self.model_constructor()
-            self.model_drop = self.model_constructor(missing='drop')
-            self.model_raise = self.model_constructor(missing='raise')
-            self.model_explicit_var = self.model_constructor(['x'])
-            func = self.model.func
-        else:
-            self.model = self.model_constructor(*args)
-            self.model_drop = self.model_constructor(*args, missing='drop')
-            self.model_raise = self.model_constructor(*args, missing='raise')
-            self.model_explicit_var = self.model_constructor(
-                *args, independent_vars=['x'])
-            func = self.model.func
-        self.data = func(x=self.x, **self.true_values()) + self.noise
-
-    @property
-    def x(self):
-        return np.linspace(1, 10, num=1000)
-
-    def test_fit(self):
-        model = self.model
-
-        # Pass Parameters object.
-        params = model.make_params(**self.guess())
-        result = model.fit(self.data, params, x=self.x)
-        assert_results_close(result.values, self.true_values())
-
-        # Pass inidividual Parameter objects as kwargs.
-        kwargs = {name: p for name, p in params.items()}
-        result = self.model.fit(self.data, x=self.x, **kwargs)
-        assert_results_close(result.values, self.true_values())
-
-        # Pass guess values (not Parameter objects) as kwargs.
-        kwargs = {name: p.value for name, p in params.items()}
-        result = self.model.fit(self.data, x=self.x, **kwargs)
-        assert_results_close(result.values, self.true_values())
-
-    def test_explicit_independent_vars(self):
-        self.check_skip_independent_vars()
-        model = self.model_explicit_var
-        pars = model.make_params(**self.guess())
-        result = model.fit(self.data, pars, x=self.x)
-        assert_results_close(result.values, self.true_values())
-
-    def test_fit_with_weights(self):
-        model = self.model
-
-        # fit without weights
-        params = model.make_params(**self.guess())
-        out1 = model.fit(self.data, params, x=self.x)
-
-        # fit with weights
-        weights = 1.0/(0.5 + self.x**2)
-        out2 = model.fit(self.data, params, weights=weights, x=self.x)
-
-        max_diff = 0.0
-        for parname, val1 in out1.values.items():
-            val2 = out2.values[parname]
-            if max_diff < abs(val1-val2):
-                max_diff = abs(val1-val2)
-        assert(max_diff > 1.e-8)
-
-    def test_result_attributes(self):
-        pars = self.model.make_params(**self.guess())
-        result = self.model.fit(self.data, pars, x=self.x)
-
-        # result.init_values
-        assert_results_close(result.values, self.true_values())
-        self.assertEqual(result.init_values, self.guess())
-
-        # result.init_params
-        params = self.model.make_params()
-        for param_name, value in self.guess().items():
-            params[param_name].value = value
-        self.assertEqual(result.init_params, params)
-
-        # result.best_fit
-        assert_allclose(result.best_fit, self.data, atol=self.noise.max())
-
-        # result.init_fit
-        init_fit = self.model.func(x=self.x, **self.guess())
-        assert_allclose(result.init_fit, init_fit)
-
-        # result.model
-        self.assertTrue(result.model is self.model)
-
-    def test_result_eval(self):
-        # Check eval() output against init_fit and best_fit.
-        pars = self.model.make_params(**self.guess())
-        result = self.model.fit(self.data, pars, x=self.x)
-
-        assert_allclose(result.eval(x=self.x, **result.values),
-                        result.best_fit)
-        assert_allclose(result.eval(x=self.x, **result.init_values),
-                        result.init_fit)
-
-    def test_result_eval_custom_x(self):
-        self.check_skip_independent_vars()
-        pars = self.model.make_params(**self.guess())
-        result = self.model.fit(self.data, pars, x=self.x)
-
-        # Check that the independent variable is respected.
-        short_eval = result.eval(x=np.array([0, 1, 2]), **result.values)
-        self.assertEqual(len(short_eval), 3)
-
-    def test_data_alignment(self):
-        _skip_if_no_pandas()
-        from pandas import Series
-
-        # Align data and indep var of different lengths using pandas index.
-        data = Series(self.data.copy()).iloc[10:-10]
-        x = Series(self.x.copy())
-
-        model = self.model
-        params = model.make_params(**self.guess())
-        result = model.fit(data, params, x=x)
-        result = model.fit(data, params, x=x)
-        assert_results_close(result.values, self.true_values())
-
-        # Skip over missing (NaN) values, aligning via pandas index.
-        data.iloc[500:510] = np.nan
-        result = self.model_drop.fit(data, params, x=x)
-        assert_results_close(result.values, self.true_values())
-
-        # Raise if any NaN values are present.
-        raises = lambda: self.model_raise.fit(data, params, x=x)
-        self.assertRaises(ValueError, raises)
-
-    def check_skip_independent_vars(self):
-        # to be overridden for models that do not accept indep vars
-        pass
-
-    def test_aic(self):
-        model = self.model
-
-        # Pass Parameters object.
-        params = model.make_params(**self.guess())
-        result = model.fit(self.data, params, x=self.x)
-        aic = result.aic
-        self.assertTrue(aic < 0) # aic must be negative
-
-        # Pass extra unused Parameter.
-        params.add("unused_param", value=1.0, vary=True)
-        result = model.fit(self.data, params, x=self.x)
-        aic_extra = result.aic
-        self.assertTrue(aic_extra < 0)   # aic must be negative
-        self.assertTrue(aic < aic_extra) # the extra param should lower the aic
-
-
-    def test_bic(self):
-        model = self.model
-
-        # Pass Parameters object.
-        params = model.make_params(**self.guess())
-        result = model.fit(self.data, params, x=self.x)
-        bic = result.bic
-        self.assertTrue(bic < 0) # aic must be negative
-
-        # Compare to AIC
-        aic = result.aic
-        self.assertTrue(aic < bic) # aic should be lower than bic
-
-        # Pass extra unused Parameter.
-        params.add("unused_param", value=1.0, vary=True)
-        result = model.fit(self.data, params, x=self.x)
-        bic_extra = result.bic
-        self.assertTrue(bic_extra < 0)   # bic must be negative
-        self.assertTrue(bic < bic_extra) # the extra param should lower the bic
-
-
-class TestUserDefiniedModel(CommonTests, unittest.TestCase):
-    # mainly aimed at checking that the API does what it says it does
-    # and raises the right exceptions or warnings when things are not right
-
-    def setUp(self):
-        self.true_values = lambda: dict(amplitude=7.1, center=1.1, sigma=2.40)
-        self.guess = lambda: dict(amplitude=5, center=2, sigma=4)
-        # return a fresh copy
-        self.model_constructor = (
-            lambda *args, **kwargs: Model(gaussian, *args, **kwargs))
-        super(TestUserDefiniedModel, self).setUp()
-
-    @property
-    def x(self):
-        return np.linspace(-10, 10, num=1000)
-
-    def test_lists_become_arrays(self):
-        # smoke test
-        self.model.fit([1, 2, 3], x=[1, 2, 3], **self.guess())
-        self.model.fit([1, 2, None, 3], x=[1, 2, 3, 4], **self.guess())
-
-    def test_missing_param_raises_error(self):
-
-        # using keyword argument parameters
-        guess_missing_sigma = self.guess()
-        del guess_missing_sigma['sigma']
-        # f = lambda: self.model.fit(self.data, x=self.x, **guess_missing_sigma)
-        # self.assertRaises(ValueError, f)
-
-        # using Parameters
-        params = self.model.make_params()
-        for param_name, value in guess_missing_sigma.items():
-            params[param_name].value = value
-        f = lambda: self.model.fit(self.data, params, x=self.x)
-
-    def test_extra_param_issues_warning(self):
-        # The function accepts extra params, Model will warn but not raise.
-        def flexible_func(x, amplitude, center, sigma, **kwargs):
-            return gaussian(x, amplitude, center, sigma)
-
-        flexible_model = Model(flexible_func)
-        pars = flexible_model.make_params(**self.guess())
-        with warnings.catch_warnings(record=True) as w:
-            warnings.simplefilter("always")
-            flexible_model.fit(self.data, pars, x=self.x, extra=5)
-        self.assertTrue(len(w) == 1)
-        self.assertTrue(issubclass(w[-1].category, UserWarning))
-
-    def test_missing_independent_variable_raises_error(self):
-        pars = self.model.make_params(**self.guess())
-        f = lambda: self.model.fit(self.data, pars)
-        self.assertRaises(KeyError, f)
-
-    def test_bounding(self):
-        true_values = self.true_values()
-        true_values['center'] = 1.3  # as close as it's allowed to get
-        pars = self.model.make_params(**self.guess())
-        pars['center'].set(value=2, min=1.3)
-        result = self.model.fit(self.data, pars, x=self.x)
-        assert_results_close(result.values, true_values, rtol=0.05)
-
-    def test_vary_false(self):
-        true_values = self.true_values()
-        true_values['center'] = 1.3
-        pars = self.model.make_params(**self.guess())
-        pars['center'].set(value=1.3, vary=False)
-        result = self.model.fit(self.data, pars, x=self.x)
-        assert_results_close(result.values, true_values, rtol=0.05)
-
-    # testing model addition...
-
-    def test_user_defined_gaussian_plus_constant(self):
-        data = self.data + 5.0
-        model = self.model + models.ConstantModel()
-        guess = self.guess()
-        pars = model.make_params(c= 10.1, **guess)
-        true_values = self.true_values()
-        true_values['c'] = 5.0
-
-        result = model.fit(data, pars, x=self.x)
-        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
-
-    def test_model_with_prefix(self):
-        # model with prefix of 'a' and 'b'
-        mod = models.GaussianModel(prefix='a')
-        vals = {'center': 2.45, 'sigma':0.8, 'amplitude':3.15}
-        data = gaussian(x=self.x, **vals) + self.noise/3.0
-        pars = mod.guess(data, x=self.x)
-        self.assertTrue('aamplitude' in pars)
-        self.assertTrue('asigma' in pars)
-        out = mod.fit(data, pars, x=self.x)
-        self.assertTrue(out.params['aamplitude'].value > 2.0)
-        self.assertTrue(out.params['acenter'].value > 2.0)
-        self.assertTrue(out.params['acenter'].value < 3.0)
-
-        mod = models.GaussianModel(prefix='b')
-        data = gaussian(x=self.x, **vals) + self.noise/3.0
-        pars = mod.guess(data, x=self.x)
-        self.assertTrue('bamplitude' in pars)
-        self.assertTrue('bsigma' in pars)
-
-    def test_change_prefix(self):
-        "should fail"
-        mod = models.GaussianModel(prefix='b')
-        set_prefix_failed = None
-        try:
-            mod.prefix = 'c'
-            set_prefix_failed = False
-        except AttributeError:
-            set_prefix_failed = True
-        except:
-            set_prefix_failed = None
-        self.assertTrue(set_prefix_failed)
-
-
-    def test_sum_of_two_gaussians(self):
-        # two user-defined gaussians
-        model1 = self.model
-        f2 = lambda x, amp, cen, sig: gaussian(x, amplitude=amp, center=cen, sigma=sig)
-        model2 = Model(f2)
-        values1 = self.true_values()
-        values2 = {'cen': 2.45, 'sig':0.8, 'amp':3.15}
-
-        data  = gaussian(x=self.x, **values1) + f2(x=self.x, **values2) + self.noise/3.0
-        model = self.model + model2
-        pars = model.make_params()
-        pars['sigma'].set(value=2, min=0)
-        pars['center'].set(value=1, min=0.2, max=1.8)
-        pars['amplitude'].set(value=3, min=0)
-        pars['sig'].set(value=1, min=0)
-        pars['cen'].set(value=2.4, min=2, max=3.5)
-        pars['amp'].set(value=1, min=0)
-
-        true_values = dict(list(values1.items()) + list(values2.items()))
-        result = model.fit(data, pars, x=self.x)
-        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
-
-        # user-defined models with common parameter names
-        # cannot be added, and should raise
-        f = lambda: model1 + model1
-        self.assertRaises(NameError, f)
-
-        # two predefined_gaussians, using suffix to differentiate
-        model1 = models.GaussianModel(prefix='g1_')
-        model2 = models.GaussianModel(prefix='g2_')
-        model = model1 + model2
-        true_values = {'g1_center': values1['center'],
-                       'g1_amplitude': values1['amplitude'],
-                       'g1_sigma': values1['sigma'],
-                       'g2_center': values2['cen'],
-                       'g2_amplitude': values2['amp'],
-                       'g2_sigma': values2['sig']}
-        pars = model.make_params()
-        pars['g1_sigma'].set(2)
-        pars['g1_center'].set(1)
-        pars['g1_amplitude'].set(3)
-        pars['g2_sigma'].set(1)
-        pars['g2_center'].set(2.4)
-        pars['g2_amplitude'].set(1)
-
-        result = model.fit(data, pars, x=self.x)
-        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
-
-        # without suffix, the names collide and Model should raise
-        model1 = models.GaussianModel()
-        model2 = models.GaussianModel()
-        f = lambda: model1 + model2
-        self.assertRaises(NameError, f)
-
-    def test_sum_composite_models(self):
-        # test components of composite model created adding composite model
-        model1 = models.GaussianModel(prefix='g1_')
-        model2 = models.GaussianModel(prefix='g2_')
-        model3 = models.GaussianModel(prefix='g3_')
-        model4 = models.GaussianModel(prefix='g4_')
-
-        model_total1 = (model1 + model2) + model3
-        for mod in [model1, model2, model3]:
-            self.assertTrue(mod in model_total1.components)
-
-        model_total2 = model1 + (model2 + model3)
-        for mod in [model1, model2, model3]:
-            self.assertTrue(mod in model_total2.components)
-
-        model_total3 = (model1 + model2) + (model3 + model4)
-        for mod in [model1, model2, model3, model4]:
-            self.assertTrue(mod in model_total3.components)
-
-    def test_composite_has_bestvalues(self):
-        # test that a composite model has non-empty best_values
-        model1 = models.GaussianModel(prefix='g1_')
-        model2 = models.GaussianModel(prefix='g2_')
-
-        mod  = model1 + model2
-        pars = mod.make_params()
-
-        values1 = dict(amplitude=7.10, center=1.1, sigma=2.40)
-        values2 = dict(amplitude=12.2, center=2.5, sigma=0.5)
-        data  = gaussian(x=self.x, **values1) + gaussian(x=self.x, **values2) + 0.1*self.noise
-
-        pars['g1_sigma'].set(2)
-        pars['g1_center'].set(1, max=1.5)
-        pars['g1_amplitude'].set(3)
-        pars['g2_sigma'].set(1)
-        pars['g2_center'].set(2.6, min=2.0)
-        pars['g2_amplitude'].set(1)
-
-        result = mod.fit(data, params=pars, x=self.x)
-
-        self.assertTrue(len(result.best_values) == 6)
-
-        self.assertTrue(abs(result.params['g1_amplitude'].value -  7.1) < 0.5)
-        self.assertTrue(abs(result.params['g2_amplitude'].value - 12.2) < 0.5)
-        self.assertTrue(abs(result.params['g1_center'].value    -  1.1) < 0.2)
-        self.assertTrue(abs(result.params['g2_center'].value    -  2.5) < 0.2)
-
-
-    def test_hints_in_composite_models(self):
-        # test propagation of hints from base models to composite model
-        def func(x, amplitude):
-            pass
-
-        m1 = Model(func, prefix='p1_')
-        m2 = Model(func, prefix='p2_')
-
-        m1.set_param_hint('amplitude', value=1)
-        m2.set_param_hint('amplitude', value=2)
-
-        mx = (m1 + m2)
-        params = mx.make_params()
-        param_values = {name: p.value for name, p in params.items()}
-        self.assertEqual(param_values['p1_amplitude'], 1)
-        self.assertEqual(param_values['p2_amplitude'], 2)
-
-
-class TestLinear(CommonTests, unittest.TestCase):
-
-    def setUp(self):
-        self.true_values = lambda: dict(slope=5, intercept=2)
-        self.guess = lambda: dict(slope=10, intercept=6)
-        self.model_constructor = models.LinearModel
-        super(TestLinear, self).setUp()
-
-
-class TestParabolic(CommonTests, unittest.TestCase):
-
-    def setUp(self):
-        self.true_values = lambda: dict(a=5, b=2, c=8)
-        self.guess = lambda: dict(a=1, b=6, c=3)
-        self.model_constructor = models.ParabolicModel
-        super(TestParabolic, self).setUp()
-
-
-class TestPolynomialOrder2(CommonTests, unittest.TestCase):
-   # class Polynomial constructed with order=2
-
-    def setUp(self):
-        self.true_values = lambda: dict(c2=5, c1=2, c0=8)
-        self.guess = lambda: dict(c1=1, c2=6, c0=3)
-        self.model_constructor = models.PolynomialModel
-        self.args = (2,)
-        super(TestPolynomialOrder2, self).setUp()
-
-
-class TestPolynomialOrder3(CommonTests, unittest.TestCase):
-   # class Polynomial constructed with order=3
-
-    def setUp(self):
-        self.true_values = lambda: dict(c3=2, c2=5, c1=2, c0=8)
-        self.guess = lambda: dict(c3=1, c1=1, c2=6, c0=3)
-        self.model_constructor = models.PolynomialModel
-        self.args = (3,)
-        super(TestPolynomialOrder3, self).setUp()
-
-
-class TestConstant(CommonTests, unittest.TestCase):
-
-    def setUp(self):
-        self.true_values = lambda: dict(c=5)
-        self.guess = lambda: dict(c=2)
-        self.model_constructor = models.ConstantModel
-        super(TestConstant, self).setUp()
-
-    def check_skip_independent_vars(self):
-        raise nose.SkipTest("ConstantModel has not independent_vars.")
-
-class TestPowerlaw(CommonTests, unittest.TestCase):
-
-    def setUp(self):
-        self.true_values = lambda: dict(amplitude=5, exponent=3)
-        self.guess = lambda: dict(amplitude=2, exponent=8)
-        self.model_constructor = models.PowerLawModel
-        super(TestPowerlaw, self).setUp()
-
-
-class TestExponential(CommonTests, unittest.TestCase):
-
-    def setUp(self):
-        self.true_values = lambda: dict(amplitude=5, decay=3)
-        self.guess = lambda: dict(amplitude=2, decay=8)
-        self.model_constructor = models.ExponentialModel
-        super(TestExponential, self).setUp()
+import unittest
+import warnings
+import nose
+from numpy.testing import assert_allclose
+from numpy.testing.decorators import knownfailureif
+import numpy as np
+
+from lmfit import Model, Parameter, models
+from lmfit.lineshapes import gaussian
+
+def assert_results_close(actual, desired, rtol=1e-03, atol=1e-03,
+                         err_msg='', verbose=True):
+    for param_name, value in desired.items():
+         assert_allclose(actual[param_name], value, rtol, atol, err_msg, verbose)
+
+def _skip_if_no_pandas():
+    try:
+        import pandas
+    except ImportError:
+        raise nose.SkipTest("Skipping tests that require pandas.")
+
+
+class CommonTests(object):
+    # to be subclassed for testing predefined models
+
+    def setUp(self):
+        np.random.seed(1)
+        self.noise = 0.0001*np.random.randn(*self.x.shape)
+        # Some Models need args (e.g., polynomial order), and others don't.
+        try:
+            args = self.args
+        except AttributeError:
+            self.model = self.model_constructor()
+            self.model_drop = self.model_constructor(missing='drop')
+            self.model_raise = self.model_constructor(missing='raise')
+            self.model_explicit_var = self.model_constructor(['x'])
+            func = self.model.func
+        else:
+            self.model = self.model_constructor(*args)
+            self.model_drop = self.model_constructor(*args, missing='drop')
+            self.model_raise = self.model_constructor(*args, missing='raise')
+            self.model_explicit_var = self.model_constructor(
+                *args, independent_vars=['x'])
+            func = self.model.func
+        self.data = func(x=self.x, **self.true_values()) + self.noise
+
+    @property
+    def x(self):
+        return np.linspace(1, 10, num=1000)
+
+    def test_fit(self):
+        model = self.model
+
+        # Pass Parameters object.
+        params = model.make_params(**self.guess())
+        result = model.fit(self.data, params, x=self.x)
+        assert_results_close(result.values, self.true_values())
+
+        # Pass inidividual Parameter objects as kwargs.
+        kwargs = {name: p for name, p in params.items()}
+        result = self.model.fit(self.data, x=self.x, **kwargs)
+        assert_results_close(result.values, self.true_values())
+
+        # Pass guess values (not Parameter objects) as kwargs.
+        kwargs = {name: p.value for name, p in params.items()}
+        result = self.model.fit(self.data, x=self.x, **kwargs)
+        assert_results_close(result.values, self.true_values())
+
+    def test_explicit_independent_vars(self):
+        self.check_skip_independent_vars()
+        model = self.model_explicit_var
+        pars = model.make_params(**self.guess())
+        result = model.fit(self.data, pars, x=self.x)
+        assert_results_close(result.values, self.true_values())
+
+    def test_fit_with_weights(self):
+        model = self.model
+
+        # fit without weights
+        params = model.make_params(**self.guess())
+        out1 = model.fit(self.data, params, x=self.x)
+
+        # fit with weights
+        weights = 1.0/(0.5 + self.x**2)
+        out2 = model.fit(self.data, params, weights=weights, x=self.x)
+
+        max_diff = 0.0
+        for parname, val1 in out1.values.items():
+            val2 = out2.values[parname]
+            if max_diff < abs(val1-val2):
+                max_diff = abs(val1-val2)
+        assert(max_diff > 1.e-8)
+
+    def test_result_attributes(self):
+        pars = self.model.make_params(**self.guess())
+        result = self.model.fit(self.data, pars, x=self.x)
+
+        # result.init_values
+        assert_results_close(result.values, self.true_values())
+        self.assertEqual(result.init_values, self.guess())
+
+        # result.init_params
+        params = self.model.make_params()
+        for param_name, value in self.guess().items():
+            params[param_name].value = value
+        self.assertEqual(result.init_params, params)
+
+        # result.best_fit
+        assert_allclose(result.best_fit, self.data, atol=self.noise.max())
+
+        # result.init_fit
+        init_fit = self.model.func(x=self.x, **self.guess())
+        assert_allclose(result.init_fit, init_fit)
+
+        # result.model
+        self.assertTrue(result.model is self.model)
+
+    def test_result_eval(self):
+        # Check eval() output against init_fit and best_fit.
+        pars = self.model.make_params(**self.guess())
+        result = self.model.fit(self.data, pars, x=self.x)
+
+        assert_allclose(result.eval(x=self.x, **result.values),
+                        result.best_fit)
+        assert_allclose(result.eval(x=self.x, **result.init_values),
+                        result.init_fit)
+
+    def test_result_eval_custom_x(self):
+        self.check_skip_independent_vars()
+        pars = self.model.make_params(**self.guess())
+        result = self.model.fit(self.data, pars, x=self.x)
+
+        # Check that the independent variable is respected.
+        short_eval = result.eval(x=np.array([0, 1, 2]), **result.values)
+        self.assertEqual(len(short_eval), 3)
+
+    def test_data_alignment(self):
+        _skip_if_no_pandas()
+        from pandas import Series
+
+        # Align data and indep var of different lengths using pandas index.
+        data = Series(self.data.copy()).iloc[10:-10]
+        x = Series(self.x.copy())
+
+        model = self.model
+        params = model.make_params(**self.guess())
+        result = model.fit(data, params, x=x)
+        result = model.fit(data, params, x=x)
+        assert_results_close(result.values, self.true_values())
+
+        # Skip over missing (NaN) values, aligning via pandas index.
+        data.iloc[500:510] = np.nan
+        result = self.model_drop.fit(data, params, x=x)
+        assert_results_close(result.values, self.true_values())
+
+        # Raise if any NaN values are present.
+        raises = lambda: self.model_raise.fit(data, params, x=x)
+        self.assertRaises(ValueError, raises)
+
+    def check_skip_independent_vars(self):
+        # to be overridden for models that do not accept indep vars
+        pass
+
+    def test_aic(self):
+        model = self.model
+
+        # Pass Parameters object.
+        params = model.make_params(**self.guess())
+        result = model.fit(self.data, params, x=self.x)
+        aic = result.aic
+        self.assertTrue(aic < 0) # aic must be negative
+
+        # Pass extra unused Parameter.
+        params.add("unused_param", value=1.0, vary=True)
+        result = model.fit(self.data, params, x=self.x)
+        aic_extra = result.aic
+        self.assertTrue(aic_extra < 0)   # aic must be negative
+        self.assertTrue(aic < aic_extra) # the extra param should lower the aic
+
+
+    def test_bic(self):
+        model = self.model
+
+        # Pass Parameters object.
+        params = model.make_params(**self.guess())
+        result = model.fit(self.data, params, x=self.x)
+        bic = result.bic
+        self.assertTrue(bic < 0) # aic must be negative
+
+        # Compare to AIC
+        aic = result.aic
+        self.assertTrue(aic < bic) # aic should be lower than bic
+
+        # Pass extra unused Parameter.
+        params.add("unused_param", value=1.0, vary=True)
+        result = model.fit(self.data, params, x=self.x)
+        bic_extra = result.bic
+        self.assertTrue(bic_extra < 0)   # bic must be negative
+        self.assertTrue(bic < bic_extra) # the extra param should lower the bic
+
+
+class TestUserDefiniedModel(CommonTests, unittest.TestCase):
+    # mainly aimed at checking that the API does what it says it does
+    # and raises the right exceptions or warnings when things are not right
+
+    def setUp(self):
+        self.true_values = lambda: dict(amplitude=7.1, center=1.1, sigma=2.40)
+        self.guess = lambda: dict(amplitude=5, center=2, sigma=4)
+        # return a fresh copy
+        self.model_constructor = (
+            lambda *args, **kwargs: Model(gaussian, *args, **kwargs))
+        super(TestUserDefiniedModel, self).setUp()
+
+    @property
+    def x(self):
+        return np.linspace(-10, 10, num=1000)
+
+    def test_lists_become_arrays(self):
+        # smoke test
+        self.model.fit([1, 2, 3], x=[1, 2, 3], **self.guess())
+        self.model.fit([1, 2, None, 3], x=[1, 2, 3, 4], **self.guess())
+
+    def test_missing_param_raises_error(self):
+
+        # using keyword argument parameters
+        guess_missing_sigma = self.guess()
+        del guess_missing_sigma['sigma']
+        # f = lambda: self.model.fit(self.data, x=self.x, **guess_missing_sigma)
+        # self.assertRaises(ValueError, f)
+
+        # using Parameters
+        params = self.model.make_params()
+        for param_name, value in guess_missing_sigma.items():
+            params[param_name].value = value
+        f = lambda: self.model.fit(self.data, params, x=self.x)
+
+    def test_extra_param_issues_warning(self):
+        # The function accepts extra params, Model will warn but not raise.
+        def flexible_func(x, amplitude, center, sigma, **kwargs):
+            return gaussian(x, amplitude, center, sigma)
+
+        flexible_model = Model(flexible_func)
+        pars = flexible_model.make_params(**self.guess())
+        with warnings.catch_warnings(record=True) as w:
+            warnings.simplefilter("always")
+            flexible_model.fit(self.data, pars, x=self.x, extra=5)
+        self.assertTrue(len(w) == 1)
+        self.assertTrue(issubclass(w[-1].category, UserWarning))
+
+    def test_missing_independent_variable_raises_error(self):
+        pars = self.model.make_params(**self.guess())
+        f = lambda: self.model.fit(self.data, pars)
+        self.assertRaises(KeyError, f)
+
+    def test_bounding(self):
+        true_values = self.true_values()
+        true_values['center'] = 1.3  # as close as it's allowed to get
+        pars = self.model.make_params(**self.guess())
+        pars['center'].set(value=2, min=1.3)
+        result = self.model.fit(self.data, pars, x=self.x)
+        assert_results_close(result.values, true_values, rtol=0.05)
+
+    def test_vary_false(self):
+        true_values = self.true_values()
+        true_values['center'] = 1.3
+        pars = self.model.make_params(**self.guess())
+        pars['center'].set(value=1.3, vary=False)
+        result = self.model.fit(self.data, pars, x=self.x)
+        assert_results_close(result.values, true_values, rtol=0.05)
+
+    # testing model addition...
+
+    def test_user_defined_gaussian_plus_constant(self):
+        data = self.data + 5.0
+        model = self.model + models.ConstantModel()
+        guess = self.guess()
+        pars = model.make_params(c= 10.1, **guess)
+        true_values = self.true_values()
+        true_values['c'] = 5.0
+
+        result = model.fit(data, pars, x=self.x)
+        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
+
+    def test_model_with_prefix(self):
+        # model with prefix of 'a' and 'b'
+        mod = models.GaussianModel(prefix='a')
+        vals = {'center': 2.45, 'sigma':0.8, 'amplitude':3.15}
+        data = gaussian(x=self.x, **vals) + self.noise/3.0
+        pars = mod.guess(data, x=self.x)
+        self.assertTrue('aamplitude' in pars)
+        self.assertTrue('asigma' in pars)
+        out = mod.fit(data, pars, x=self.x)
+        self.assertTrue(out.params['aamplitude'].value > 2.0)
+        self.assertTrue(out.params['acenter'].value > 2.0)
+        self.assertTrue(out.params['acenter'].value < 3.0)
+
+        mod = models.GaussianModel(prefix='b')
+        data = gaussian(x=self.x, **vals) + self.noise/3.0
+        pars = mod.guess(data, x=self.x)
+        self.assertTrue('bamplitude' in pars)
+        self.assertTrue('bsigma' in pars)
+
+    def test_change_prefix(self):
+        "should fail"
+        mod = models.GaussianModel(prefix='b')
+        set_prefix_failed = None
+        try:
+            mod.prefix = 'c'
+            set_prefix_failed = False
+        except AttributeError:
+            set_prefix_failed = True
+        except:
+            set_prefix_failed = None
+        self.assertTrue(set_prefix_failed)
+
+
+    def test_sum_of_two_gaussians(self):
+        # two user-defined gaussians
+        model1 = self.model
+        f2 = lambda x, amp, cen, sig: gaussian(x, amplitude=amp, center=cen, sigma=sig)
+        model2 = Model(f2)
+        values1 = self.true_values()
+        values2 = {'cen': 2.45, 'sig':0.8, 'amp':3.15}
+
+        data  = gaussian(x=self.x, **values1) + f2(x=self.x, **values2) + self.noise/3.0
+        model = self.model + model2
+        pars = model.make_params()
+        pars['sigma'].set(value=2, min=0)
+        pars['center'].set(value=1, min=0.2, max=1.8)
+        pars['amplitude'].set(value=3, min=0)
+        pars['sig'].set(value=1, min=0)
+        pars['cen'].set(value=2.4, min=2, max=3.5)
+        pars['amp'].set(value=1, min=0)
+
+        true_values = dict(list(values1.items()) + list(values2.items()))
+        result = model.fit(data, pars, x=self.x)
+        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
+
+        # user-defined models with common parameter names
+        # cannot be added, and should raise
+        f = lambda: model1 + model1
+        self.assertRaises(NameError, f)
+
+        # two predefined_gaussians, using suffix to differentiate
+        model1 = models.GaussianModel(prefix='g1_')
+        model2 = models.GaussianModel(prefix='g2_')
+        model = model1 + model2
+        true_values = {'g1_center': values1['center'],
+                       'g1_amplitude': values1['amplitude'],
+                       'g1_sigma': values1['sigma'],
+                       'g2_center': values2['cen'],
+                       'g2_amplitude': values2['amp'],
+                       'g2_sigma': values2['sig']}
+        pars = model.make_params()
+        pars['g1_sigma'].set(2)
+        pars['g1_center'].set(1)
+        pars['g1_amplitude'].set(3)
+        pars['g2_sigma'].set(1)
+        pars['g2_center'].set(2.4)
+        pars['g2_amplitude'].set(1)
+
+        result = model.fit(data, pars, x=self.x)
+        assert_results_close(result.values, true_values, rtol=0.01, atol=0.01)
+
+        # without suffix, the names collide and Model should raise
+        model1 = models.GaussianModel()
+        model2 = models.GaussianModel()
+        f = lambda: model1 + model2
+        self.assertRaises(NameError, f)
+
+    def test_sum_composite_models(self):
+        # test components of composite model created adding composite model
+        model1 = models.GaussianModel(prefix='g1_')
+        model2 = models.GaussianModel(prefix='g2_')
+        model3 = models.GaussianModel(prefix='g3_')
+        model4 = models.GaussianModel(prefix='g4_')
+
+        model_total1 = (model1 + model2) + model3
+        for mod in [model1, model2, model3]:
+            self.assertTrue(mod in model_total1.components)
+
+        model_total2 = model1 + (model2 + model3)
+        for mod in [model1, model2, model3]:
+            self.assertTrue(mod in model_total2.components)
+
+        model_total3 = (model1 + model2) + (model3 + model4)
+        for mod in [model1, model2, model3, model4]:
+            self.assertTrue(mod in model_total3.components)
+
+    def test_composite_has_bestvalues(self):
+        # test that a composite model has non-empty best_values
+        model1 = models.GaussianModel(prefix='g1_')
+        model2 = models.GaussianModel(prefix='g2_')
+
+        mod  = model1 + model2
+        pars = mod.make_params()
+
+        values1 = dict(amplitude=7.10, center=1.1, sigma=2.40)
+        values2 = dict(amplitude=12.2, center=2.5, sigma=0.5)
+        data  = gaussian(x=self.x, **values1) + gaussian(x=self.x, **values2) + 0.1*self.noise
+
+        pars['g1_sigma'].set(2)
+        pars['g1_center'].set(1, max=1.5)
+        pars['g1_amplitude'].set(3)
+        pars['g2_sigma'].set(1)
+        pars['g2_center'].set(2.6, min=2.0)
+        pars['g2_amplitude'].set(1)
+
+        result = mod.fit(data, params=pars, x=self.x)
+
+        self.assertTrue(len(result.best_values) == 6)
+
+        self.assertTrue(abs(result.params['g1_amplitude'].value -  7.1) < 0.5)
+        self.assertTrue(abs(result.params['g2_amplitude'].value - 12.2) < 0.5)
+        self.assertTrue(abs(result.params['g1_center'].value    -  1.1) < 0.2)
+        self.assertTrue(abs(result.params['g2_center'].value    -  2.5) < 0.2)
+
+
+    def test_hints_in_composite_models(self):
+        # test propagation of hints from base models to composite model
+        def func(x, amplitude):
+            pass
+
+        m1 = Model(func, prefix='p1_')
+        m2 = Model(func, prefix='p2_')
+
+        m1.set_param_hint('amplitude', value=1)
+        m2.set_param_hint('amplitude', value=2)
+
+        mx = (m1 + m2)
+        params = mx.make_params()
+        param_values = {name: p.value for name, p in params.items()}
+        self.assertEqual(param_values['p1_amplitude'], 1)
+        self.assertEqual(param_values['p2_amplitude'], 2)
+
+    def test_hints_for_peakmodels(self):
+        # test that height/fwhm do not cause asteval errors.
+
+        x = np.linspace(-10, 10, 101)
+        y = np.sin(x / 3)  + x /100.
+
+        m1 = models.LinearModel(prefix='m1_')
+
+        params = m1.guess(y, x=x)
+
+        m2 = models.GaussianModel(prefix='m2_')
+        params.update(m2.make_params())
+
+        m = m1 + m2
+
+        param_values = {name: p.value for name, p in params.items()}
+        self.assertTrue(param_values['m1_intercept'] < -0.0)
+        self.assertEqual(param_values['m2_amplitude'], 1)
+
+    def test_weird_param_hints(self):
+        # tests Github Issue 312, a very weird way to access param_hints
+        def func(x, amp):
+            return amp*x
+
+        m = Model(func)
+        models = {}
+        for i in range(2):
+            m.set_param_hint('amp', value=1)
+            m.set_param_hint('amp', value=25)
+
+            models[i] = Model(func, prefix='mod%i_' % i)
+            models[i].param_hints['amp'] = m.param_hints['amp']
+
+        self.assertEqual(models[0].param_hints['amp'],
+                         models[1].param_hints['amp'])
+
+
+    def test_composite_model_with_expr_constrains(self):
+        """Smoke test for composite model fitting with expr constraints.
+        """
+        y = [  0,   0,   4,   2,   1,   8,  21,  21,  23,  35,  50,  54,  46,
+             70,   77,  87,  98, 113, 148, 136, 185, 195, 194, 168, 170, 139,
+             155, 115, 132, 109, 102,  85,  69,  81,  82,  80,  71,  64,  79,
+             88,  111,  97,  97,  73,  72,  62,  41,  30,  13,   3,   9,   7,
+             0,     0,   0]
+        x = np.arange(-0.2, 1.2, 0.025)[:-1] + 0.5*0.025
+
+        def gauss(x, sigma, mu, A):
+            return A*np.exp(-(x-mu)**2/(2*sigma**2))
+
+        # Initial values
+        p1_mu = 0.2
+        p1_sigma = 0.1
+        #p2_mu = 0.8
+        p2_sigma = 0.1
+
+        peak1 = Model(gauss, prefix='p1_')
+        peak2 = Model(gauss, prefix='p2_')
+        model = peak1 + peak2
+
+        model.set_param_hint('p1_mu', value=p1_mu, min=-1, max=2)
+        #model.set_param_hint('p2_mu', value=p2_mu, min=-1, max=2)
+        model.set_param_hint('p1_sigma', value=p1_sigma, min=0.01, max=0.2)
+        model.set_param_hint('p2_sigma', value=p2_sigma, min=0.01, max=0.2)
+        model.set_param_hint('p1_A', value=100, min=0.01)
+        model.set_param_hint('p2_A', value=50, min=0.01)
+
+        # Constrains the distance between peaks to be > 0
+        model.set_param_hint('pos_delta', value=0.3, min=0)
+        model.set_param_hint('p2_mu', min=-1, expr='p1_mu + pos_delta')
+
+        # Test fitting
+        result = model.fit(y, x=x)
+        self.assertTrue(result.params['pos_delta'].value > 0)
+
+
+class TestLinear(CommonTests, unittest.TestCase):
+
+    def setUp(self):
+        self.true_values = lambda: dict(slope=5, intercept=2)
+        self.guess = lambda: dict(slope=10, intercept=6)
+        self.model_constructor = models.LinearModel
+        super(TestLinear, self).setUp()
+
+
+class TestParabolic(CommonTests, unittest.TestCase):
+
+    def setUp(self):
+        self.true_values = lambda: dict(a=5, b=2, c=8)
+        self.guess = lambda: dict(a=1, b=6, c=3)
+        self.model_constructor = models.ParabolicModel
+        super(TestParabolic, self).setUp()
+
+
+class TestPolynomialOrder2(CommonTests, unittest.TestCase):
+   # class Polynomial constructed with order=2
+
+    def setUp(self):
+        self.true_values = lambda: dict(c2=5, c1=2, c0=8)
+        self.guess = lambda: dict(c1=1, c2=6, c0=3)
+        self.model_constructor = models.PolynomialModel
+        self.args = (2,)
+        super(TestPolynomialOrder2, self).setUp()
+
+
+class TestPolynomialOrder3(CommonTests, unittest.TestCase):
+   # class Polynomial constructed with order=3
+
+    def setUp(self):
+        self.true_values = lambda: dict(c3=2, c2=5, c1=2, c0=8)
+        self.guess = lambda: dict(c3=1, c1=1, c2=6, c0=3)
+        self.model_constructor = models.PolynomialModel
+        self.args = (3,)
+        super(TestPolynomialOrder3, self).setUp()
+
+
+class TestConstant(CommonTests, unittest.TestCase):
+    def setUp(self):
+        self.true_values = lambda: dict(c=5)
+        self.guess = lambda: dict(c=2)
+        self.model_constructor = models.ConstantModel
+        super(TestConstant, self).setUp()
+
+    def check_skip_independent_vars(self):
+        raise nose.SkipTest("ConstantModel has not independent_vars.")
+
+class TestPowerlaw(CommonTests, unittest.TestCase):
+    def setUp(self):
+        self.true_values = lambda: dict(amplitude=5, exponent=3)
+        self.guess = lambda: dict(amplitude=2, exponent=8)
+        self.model_constructor = models.PowerLawModel
+        super(TestPowerlaw, self).setUp()
+
+
+class TestExponential(CommonTests, unittest.TestCase):
+    def setUp(self):
+        self.true_values = lambda: dict(amplitude=5, decay=3)
+        self.guess = lambda: dict(amplitude=2, decay=8)
+        self.model_constructor = models.ExponentialModel
+        super(TestExponential, self).setUp()
+
+
+class TestComplexConstant(CommonTests, unittest.TestCase):
+    def setUp(self):
+        self.true_values = lambda: dict(re=5,im=5)
+        self.guess = lambda: dict(re=2,im=2)
+        self.model_constructor = models.ComplexConstantModel
+        super(TestComplexConstant, self).setUp()
+
+#
diff --git a/tests/test_multidatasets.py b/tests/test_multidatasets.py
index 105a8cf..985a70c 100644
--- a/tests/test_multidatasets.py
+++ b/tests/test_multidatasets.py
@@ -1,74 +1,74 @@
-#
-# example fitting to multiple (simulated) data sets
-#
-import numpy as np
-from lmfit import minimize, Parameters, fit_report
-from lmfit.lineshapes import gaussian
-
-def gauss_dataset(params, i, x):
-    """calc gaussian from params for data set i
-    using simple, hardwired naming convention"""
-    amp = params['amp_%i' % (i+1)].value
-    cen = params['cen_%i' % (i+1)].value
-    sig = params['sig_%i' % (i+1)].value
-    return gaussian(x, amp, cen, sig)
-
-def objective(params, x, data):
-    """ calculate total residual for fits to several data sets held
-    in a 2-D array, and modeled by Gaussian functions"""
-    ndata, nx = data.shape
-    resid = 0.0*data[:]
-    # make residual per data set
-    for i in range(ndata):
-        resid[i, :] = data[i, :] - gauss_dataset(params, i, x)
-    # now flatten this to a 1D array, as minimize() needs
-    return resid.flatten()
-
-def test_multidatasets():
-    # create 5 datasets
-    x  = np.linspace( -1, 2, 151)
-    data = []
-    for i in np.arange(5):
-        amp  =  2.60 + 1.50*np.random.rand()
-        cen  = -0.20 + 1.50*np.random.rand()
-        sig  =  0.25 + 0.03*np.random.rand()
-        dat  = gaussian(x, amp, cen, sig) + \
-               np.random.normal(size=len(x), scale=0.1)
-        data.append(dat)
-
-    # data has shape (5, 151)
-    data = np.array(data)
-    assert(data.shape) == (5, 151)
-
-    # create 5 sets of parameters, one per data set
-    pars = Parameters()
-    for iy, y in enumerate(data):
-        pars.add( 'amp_%i' % (iy+1), value=0.5, min=0.0,  max=200)
-        pars.add( 'cen_%i' % (iy+1), value=0.4, min=-2.0,  max=2.0)
-        pars.add( 'sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0)
-
-    # but now constrain all values of sigma to have the same value
-    # by assigning sig_2, sig_3, .. sig_5 to be equal to sig_1
-    for iy in (2, 3, 4, 5):
-        pars['sig_%i' % iy].expr='sig_1'
-
-    # run the global fit to all the data sets
-    out = minimize(objective, pars, args=(x, data))
-
-    assert(len(pars) == 15)
-    assert(out.nvarys == 11)
-    assert(out.nfev  > 15)
-    assert(out.chisqr > 1.0)
-    assert(pars['amp_1'].value > 0.1)
-    assert(pars['sig_1'].value > 0.1)
-    assert(pars['sig_2'].value == pars['sig_1'].value)
-
-    ## plot the data sets and fits
-    #  plt.figure()
-    #  for i in range(5):
-    #      y_fit = gauss_dataset(pars, i, x)
-    #      plt.plot(x, data[i, :], 'o', x, y_fit, '-')
-    #  plt.show()
-
-if __name__ == '__main__':
-    test_multidatasets()
+#
+# example fitting to multiple (simulated) data sets
+#
+import numpy as np
+from lmfit import minimize, Parameters, fit_report
+from lmfit.lineshapes import gaussian
+
+def gauss_dataset(params, i, x):
+    """calc gaussian from params for data set i
+    using simple, hardwired naming convention"""
+    amp = params['amp_%i' % (i+1)].value
+    cen = params['cen_%i' % (i+1)].value
+    sig = params['sig_%i' % (i+1)].value
+    return gaussian(x, amp, cen, sig)
+
+def objective(params, x, data):
+    """ calculate total residual for fits to several data sets held
+    in a 2-D array, and modeled by Gaussian functions"""
+    ndata, nx = data.shape
+    resid = 0.0*data[:]
+    # make residual per data set
+    for i in range(ndata):
+        resid[i, :] = data[i, :] - gauss_dataset(params, i, x)
+    # now flatten this to a 1D array, as minimize() needs
+    return resid.flatten()
+
+def test_multidatasets():
+    # create 5 datasets
+    x  = np.linspace( -1, 2, 151)
+    data = []
+    for i in np.arange(5):
+        amp  =  2.60 + 1.50*np.random.rand()
+        cen  = -0.20 + 1.50*np.random.rand()
+        sig  =  0.25 + 0.03*np.random.rand()
+        dat  = gaussian(x, amp, cen, sig) + \
+               np.random.normal(size=len(x), scale=0.1)
+        data.append(dat)
+
+    # data has shape (5, 151)
+    data = np.array(data)
+    assert(data.shape) == (5, 151)
+
+    # create 5 sets of parameters, one per data set
+    pars = Parameters()
+    for iy, y in enumerate(data):
+        pars.add( 'amp_%i' % (iy+1), value=0.5, min=0.0,  max=200)
+        pars.add( 'cen_%i' % (iy+1), value=0.4, min=-2.0,  max=2.0)
+        pars.add( 'sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0)
+
+    # but now constrain all values of sigma to have the same value
+    # by assigning sig_2, sig_3, .. sig_5 to be equal to sig_1
+    for iy in (2, 3, 4, 5):
+        pars['sig_%i' % iy].expr='sig_1'
+
+    # run the global fit to all the data sets
+    out = minimize(objective, pars, args=(x, data))
+
+    assert(len(pars) == 15)
+    assert(out.nvarys == 11)
+    assert(out.nfev  > 15)
+    assert(out.chisqr > 1.0)
+    assert(pars['amp_1'].value > 0.1)
+    assert(pars['sig_1'].value > 0.1)
+    assert(pars['sig_2'].value == pars['sig_1'].value)
+
+    ## plot the data sets and fits
+    #  plt.figure()
+    #  for i in range(5):
+    #      y_fit = gauss_dataset(pars, i, x)
+    #      plt.plot(x, data[i, :], 'o', x, y_fit, '-')
+    #  plt.show()
+
+if __name__ == '__main__':
+    test_multidatasets()
diff --git a/tests/test_nose.py b/tests/test_nose.py
index 3d0095f..b5ad44a 100644
--- a/tests/test_nose.py
+++ b/tests/test_nose.py
@@ -1,399 +1,608 @@
-# -*- coding: utf-8 -*-
-from __future__ import print_function
-from lmfit import minimize, Parameters, Parameter, report_fit, Minimizer
-from lmfit.minimizer import SCALAR_METHODS
-from lmfit.lineshapes import gaussian
-import numpy as np
-from numpy import pi
-from numpy.testing import assert_
-import unittest
-import nose
-from nose import SkipTest
-
-def check(para, real_val, sig=3):
-    err = abs(para.value - real_val)
-    print( para.name, para.value, real_val, para.stderr)
-    assert(err < sig * para.stderr)
-
-def check_wo_stderr(para, real_val, sig=0.1):
-    err = abs(para.value - real_val)
-    print (para.name, para.value, real_val)
-    assert(err < sig)
-
-def check_paras(para_fit, para_real):
-    for i in para_fit:
-        check(para_fit[i], para_real[i].value)
-
-def test_simple():
-    # create data to be fitted
-    np.random.seed(1)
-    x = np.linspace(0, 15, 301)
-    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
-            np.random.normal(size=len(x), scale=0.2) )
-
-    # define objective function: returns the array to be minimized
-    def fcn2min(params, x, data):
-        """ model decaying sine wave, subtract data"""
-        amp = params['amp'].value
-        shift = params['shift'].value
-        omega = params['omega'].value
-        decay = params['decay'].value
-
-        model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
-        return model - data
-
-    # create a set of Parameters
-    params = Parameters()
-    params.add('amp',   value= 10,  min=0)
-    params.add('decay', value= 0.1)
-    params.add('shift', value= 0.0, min=-pi / 2., max=pi / 2)
-    params.add('omega', value= 3.0)
-
-    # do fit, here with leastsq model
-    result = minimize(fcn2min, params, args=(x, data))
-
-    # calculate final result
-    final = data + result.residual
-
-    # write error report
-    print(" --> SIMPLE --> ")
-    print(result.params)
-    report_fit(result.params)
-
-    #assert that the real parameters are found
-
-    for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]):
-        
-        check(para, val)
-
-def test_lbfgsb():
-    p_true = Parameters()
-    p_true.add('amp', value=14.0)
-    p_true.add('period', value=5.33)
-    p_true.add('shift', value=0.123)
-    p_true.add('decay', value=0.010)
-
-    def residual(pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
-
-        if abs(shift) > pi/2:
-            shift = shift - np.sign(shift) * pi
-        model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
-        if data is None:
-            return model
-        return (model - data)
-
-    n = 2500
-    xmin = 0.
-    xmax = 250.0
-    noise = np.random.normal(scale=0.7215, size=n)
-    x     = np.linspace(xmin, xmax, n)
-    data  = residual(p_true, x) + noise
-
-    fit_params = Parameters()
-    fit_params.add('amp', value=11.0, min=5, max=20)
-    fit_params.add('period', value=5., min=1., max=7)
-    fit_params.add('shift', value=.10,  min=0.0, max=0.2)
-    fit_params.add('decay', value=6.e-3, min=0, max=0.1)
-
-    init = residual(fit_params, x)
-
-    out = minimize(residual, fit_params, method='lbfgsb', args=(x,), kws={'data':data})
-
-    fit = residual(fit_params, x)
-
-    for name, par in out.params.items():
-        nout = "%s:%s" % (name, ' '*(20-len(name)))
-        print("%s: %s (%s) " % (nout, par.value, p_true[name].value))
-
-    for para, true_para in zip(out.params.values(), p_true.values()):
-        check_wo_stderr(para, true_para.value)
-
-def test_derive():
-    def func(pars, x, data=None):
-        a = pars['a'].value
-        b = pars['b'].value
-        c = pars['c'].value
-
-        model=a * np.exp(-b * x)+c
-        if data is None:
-            return model
-        return (model - data)
-
-    def dfunc(pars, x, data=None):
-        a = pars['a'].value
-        b = pars['b'].value
-        c = pars['c'].value
-        v = np.exp(-b*x)
-        return np.array([v, -a*x*v, np.ones(len(x))])
-
-    def f(var, x):
-        return var[0]* np.exp(-var[1] * x)+var[2]
-
-    params1 = Parameters()
-    params1.add('a', value=10)
-    params1.add('b', value=10)
-    params1.add('c', value=10)
-
-    params2 = Parameters()
-    params2.add('a', value=10)
-    params2.add('b', value=10)
-    params2.add('c', value=10)
-
-    a, b, c = 2.5, 1.3, 0.8
-    x = np.linspace(0,4,50)
-    y = f([a, b, c], x)
-    data = y + 0.15*np.random.normal(size=len(x))
-
-    # fit without analytic derivative
-    min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data':data})
-    out1 = min1.leastsq()
-    fit1 = func(out1.params, x)
-
-    # fit with analytic derivative
-    min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data':data})
-    out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
-    fit2 = func(out2.params, x)
-
-    
-    print ('''Comparison of fit to exponential decay
-    with and without analytic derivatives, to
-       model = a*exp(-b*x) + c
-    for a = %.2f, b = %.2f, c = %.2f
-    ==============================================
-    Statistic/Parameter|   Without   | With      |
-    ----------------------------------------------
-    N Function Calls   |   %3i       |   %3i     |
-    Chi-square         |   %.4f    |   %.4f  |
-       a               |   %.4f    |   %.4f  |
-       b               |   %.4f    |   %.4f  |
-       c               |   %.4f    |   %.4f  |
-    ----------------------------------------------
-    ''' %  (a, b, c,
-            out1.nfev,   out2.nfev,
-            out1.chisqr, out2.chisqr,
-            out1.params['a'].value, out2.params['a'].value,
-            out1.params['b'].value, out2.params['b'].value,
-            out1.params['c'].value, out2.params['c'].value ))
-
-    check_wo_stderr(out1.params['a'], out2.params['a'].value, 0.00005)
-    check_wo_stderr(out1.params['b'], out2.params['b'].value, 0.00005)
-    check_wo_stderr(out1.params['c'], out2.params['c'].value, 0.00005)
-
-def test_peakfit():
-    def residual(pars, x, data=None):
-        g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
-        g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
-        model = g1 + g2
-        if data is None:
-            return model
-        return (model - data)
-
-    n    = 601
-    xmin = 0.
-    xmax = 15.0
-    noise = np.random.normal(scale=.65, size=n)
-    x = np.linspace(xmin, xmax, n)
-
-    org_params = Parameters()
-    org_params.add_many(('a1', 12.0, True, None, None, None),
-                        ('c1',  5.3, True, None, None, None),
-                        ('w1',  1.0, True, None, None, None),
-                        ('a2',  9.1, True, None, None, None),
-                        ('c2',  8.1, True, None, None, None),
-                        ('w2',  2.5, True, None, None, None))
-
-    data  = residual(org_params, x) + noise
-
-
-    fit_params = Parameters()
-    fit_params.add_many(('a1',  8.0, True, None, 14., None),
-                        ('c1',  5.0, True, None, None, None),
-                        ('w1',  0.7, True, None, None, None),
-                        ('a2',  3.1, True, None, None, None),
-                        ('c2',  8.8, True, None, None, None))
-
-    fit_params.add('w2', expr='2.5*w1')
-
-    myfit = Minimizer(residual, fit_params,
-                      fcn_args=(x,), fcn_kws={'data':data})
-
-    myfit.prepare_fit()
-
-    init = residual(fit_params, x)
-
-
-    out = myfit.leastsq()
-
-    # print(' N fev = ', myfit.nfev)
-    # print(myfit.chisqr, myfit.redchi, myfit.nfree)
-
-    report_fit(out.params)
-
-    fit = residual(out.params, x)
-    check_paras(out.params, org_params)
-
-
-def test_scalar_minimize_has_no_uncertainties():
-    # scalar_minimize doesn't calculate uncertainties.
-    # when a scalar_minimize is run the stderr and correl for each parameter
-    # should be None. (stderr and correl are set to None when a Parameter is
-    # initialised).
-    # This requires a reset after a leastsq fit has been done.
-    # Only when scalar_minimize calculates stderr and correl can this test
-    # be removed.
-
-    np.random.seed(1)
-    x = np.linspace(0, 15, 301)
-    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
-            np.random.normal(size=len(x), scale=0.2) )
-
-    # define objective function: returns the array to be minimized
-    def fcn2min(params, x, data):
-        """ model decaying sine wave, subtract data"""
-        amp = params['amp'].value
-        shift = params['shift'].value
-        omega = params['omega'].value
-        decay = params['decay'].value
-
-        model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
-        return model - data
-
-    # create a set of Parameters
-    params = Parameters()
-    params.add('amp',   value= 10,  min=0)
-    params.add('decay', value= 0.1)
-    params.add('shift', value= 0.0, min=-pi / 2., max=pi / 2)
-    params.add('omega', value= 3.0)
-
-    mini = Minimizer(fcn2min, params, fcn_args=(x, data))
-    out = mini.minimize()
-    assert_(np.isfinite(out.params['amp'].stderr))
-    print(out.errorbars)
-    assert_(out.errorbars == True)
-    out2 = mini.minimize(method='nelder-mead')
-    assert_(out2.params['amp'].stderr is None)
-    assert_(out2.params['decay'].stderr is None)
-    assert_(out2.params['shift'].stderr is None)
-    assert_(out2.params['omega'].stderr is None)
-    assert_(out2.params['amp'].correl is None)
-    assert_(out2.params['decay'].correl is None)
-    assert_(out2.params['shift'].correl is None)
-    assert_(out2.params['omega'].correl is None)
-    assert_(out2.errorbars == False)
-
-
-def test_multidimensional_fit_GH205():
-    # test that you don't need to flatten the output from the objective
-    # function. Tests regression for GH205.
-    pos = np.linspace(0, 99, 100)
-    xv, yv = np.meshgrid(pos, pos)
-    f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1)
-                                             + np.cos(yv * lambda2))
-
-    data = f(xv, yv, 0.3, 3)
-    assert_(data.ndim, 2)
-
-    def fcn2min(params, xv, yv, data):
-        """ model decaying sine wave, subtract data"""
-        lambda1 = params['lambda1'].value
-        lambda2 = params['lambda2'].value
-        model = f(xv, yv, lambda1, lambda2)
-        return model - data
-
-    # create a set of Parameters
-    params = Parameters()
-    params.add('lambda1', value=0.4)
-    params.add('lambda2', value=3.2)
-
-    mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data))
-    res = mini.minimize()
-
-class CommonMinimizerTest(unittest.TestCase):
-
-    def setUp(self):
-        """
-        test scale minimizers except newton-cg (needs jacobian) and
-        anneal (doesn't work out of the box).
-        """
-        p_true = Parameters()
-        p_true.add('amp', value=14.0)
-        p_true.add('period', value=5.33)
-        p_true.add('shift', value=0.123)
-        p_true.add('decay', value=0.010)
-        self.p_true = p_true
-
-        n = 2500
-        xmin = 0.
-        xmax = 250.0
-        noise = np.random.normal(scale=0.7215, size=n)
-        self.x     = np.linspace(xmin, xmax, n)
-        data  = self.residual(p_true, self.x) + noise
-
-        fit_params = Parameters()
-        fit_params.add('amp', value=11.0, min=5, max=20)
-        fit_params.add('period', value=5., min=1., max=7)
-        fit_params.add('shift', value=.10,  min=0.0, max=0.2)
-        fit_params.add('decay', value=6.e-3, min=0, max=0.1)
-        self.fit_params = fit_params
-
-        init = self.residual(fit_params, self.x)
-        self.mini = Minimizer(self.residual, fit_params, [self.x, data])
-
-    def residual(self, pars, x, data=None):
-        amp = pars['amp'].value
-        per = pars['period'].value
-        shift = pars['shift'].value
-        decay = pars['decay'].value
-
-        if abs(shift) > pi/2:
-            shift = shift - np.sign(shift) * pi
-        model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
-        if data is None:
-            return model
-        return (model - data)
-        
-    def test_diffev_bounds_check(self):
-        # You need finite (min, max) for each parameter if you're using
-        # differential_evolution.
-        self.fit_params['decay'].min = None
-        self.minimizer = 'differential_evolution'
-        np.testing.assert_raises(ValueError, self.scalar_minimizer)
-
-    def test_scalar_minimizers(self):
-        # test all the scalar minimizers
-        for method in SCALAR_METHODS:
-            if method in ['newton', 'dogleg', 'trust-ncg']:
-                continue
-            self.minimizer = SCALAR_METHODS[method]
-            if method == 'Nelder-Mead':
-                sig = 0.2
-            else:
-                sig = 0.15
-            self.scalar_minimizer(sig=sig)
-        
-    def scalar_minimizer(self, sig=0.15):
-        try:
-            from scipy.optimize import minimize as scipy_minimize
-        except ImportError:
-            raise SkipTest
-
-        print(self.minimizer)
-        out = self.mini.scalar_minimize(method=self.minimizer)
-
-        fit = self.residual(out.params, self.x)
-
-        for name, par in out.params.items():
-            nout = "%s:%s" % (name, ' '*(20-len(name)))
-            print("%s: %s (%s) " % (nout, par.value, self.p_true[name].value))
-
-        for para, true_para in zip(out.params.values(),
-                                   self.p_true.values()):
-            check_wo_stderr(para, true_para.value, sig=sig)
-
-
-if __name__ == '__main__':
-    nose.main()
+# -*- coding: utf-8 -*-
+from __future__ import print_function
+from lmfit import minimize, Parameters, Parameter, report_fit, Minimizer
+from lmfit.minimizer import (SCALAR_METHODS, HAS_EMCEE,
+                             MinimizerResult, _lnpost)
+from lmfit.lineshapes import gaussian
+import numpy as np
+from numpy import pi
+from numpy.testing import (assert_, decorators, assert_raises,
+                           assert_almost_equal)
+import unittest
+import nose
+from nose import SkipTest
+
+
+def check(para, real_val, sig=3):
+    err = abs(para.value - real_val)
+    print('Check Param w/ stderr: ',  para.name, para.value, real_val, para.stderr)
+    assert(err < sig * para.stderr)
+
+def check_wo_stderr(para, real_val, sig=0.1):
+    err = abs(para.value - real_val)
+    print('Check Param w/o stderr: ', para.name, para.value, real_val, sig)
+    assert(err < sig)
+
+def check_paras(para_fit, para_real, sig=3):
+    for i in para_fit:
+        check(para_fit[i], para_real[i].value, sig=sig)
+
+def test_simple():
+    # create data to be fitted
+    np.random.seed(1)
+    x = np.linspace(0, 15, 301)
+    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+            np.random.normal(size=len(x), scale=0.2))
+
+    # define objective function: returns the array to be minimized
+    def fcn2min(params, x, data):
+        """ model decaying sine wave, subtract data"""
+        amp = params['amp'].value
+        shift = params['shift'].value
+        omega = params['omega'].value
+        decay = params['decay'].value
+
+        model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+        return model - data
+
+    # create a set of Parameters
+    params = Parameters()
+    params.add('amp',   value= 10,  min=0)
+    params.add('decay', value= 0.1)
+    params.add('shift', value= 0.0, min=-pi / 2., max=pi / 2)
+    params.add('omega', value= 3.0)
+
+    # do fit, here with leastsq model
+    result = minimize(fcn2min, params, args=(x, data))
+
+    # calculate final result
+    final = data + result.residual
+
+    # write error report
+    print(" --> SIMPLE --> ")
+    print(result.params)
+    report_fit(result.params)
+
+    #assert that the real parameters are found
+
+    for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]):
+
+        check(para, val)
+
+def test_lbfgsb():
+    p_true = Parameters()
+    p_true.add('amp', value=14.0)
+    p_true.add('period', value=5.33)
+    p_true.add('shift', value=0.123)
+    p_true.add('decay', value=0.010)
+
+    def residual(pars, x, data=None):
+        amp = pars['amp'].value
+        per = pars['period'].value
+        shift = pars['shift'].value
+        decay = pars['decay'].value
+
+        if abs(shift) > pi/2:
+            shift = shift - np.sign(shift) * pi
+        model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
+        if data is None:
+            return model
+        return (model - data)
+
+    n = 2500
+    xmin = 0.
+    xmax = 250.0
+    noise = np.random.normal(scale=0.7215, size=n)
+    x     = np.linspace(xmin, xmax, n)
+    data  = residual(p_true, x) + noise
+
+    fit_params = Parameters()
+    fit_params.add('amp', value=11.0, min=5, max=20)
+    fit_params.add('period', value=5., min=1., max=7)
+    fit_params.add('shift', value=.10,  min=0.0, max=0.2)
+    fit_params.add('decay', value=6.e-3, min=0, max=0.1)
+
+    init = residual(fit_params, x)
+
+    out = minimize(residual, fit_params, method='lbfgsb', args=(x,), kws={'data':data})
+
+    fit = residual(fit_params, x)
+
+    for name, par in out.params.items():
+        nout = "%s:%s" % (name, ' '*(20-len(name)))
+        print("%s: %s (%s) " % (nout, par.value, p_true[name].value))
+
+    for para, true_para in zip(out.params.values(), p_true.values()):
+        check_wo_stderr(para, true_para.value)
+
+def test_derive():
+    def func(pars, x, data=None):
+        a = pars['a'].value
+        b = pars['b'].value
+        c = pars['c'].value
+
+        model=a * np.exp(-b * x)+c
+        if data is None:
+            return model
+        return model - data
+
+    def dfunc(pars, x, data=None):
+        a = pars['a'].value
+        b = pars['b'].value
+        c = pars['c'].value
+        v = np.exp(-b*x)
+        return np.array([v, -a*x*v, np.ones(len(x))])
+
+    def f(var, x):
+        return var[0]* np.exp(-var[1] * x)+var[2]
+
+    params1 = Parameters()
+    params1.add('a', value=10)
+    params1.add('b', value=10)
+    params1.add('c', value=10)
+
+    params2 = Parameters()
+    params2.add('a', value=10)
+    params2.add('b', value=10)
+    params2.add('c', value=10)
+
+    a, b, c = 2.5, 1.3, 0.8
+    x = np.linspace(0,4,50)
+    y = f([a, b, c], x)
+    data = y + 0.15*np.random.normal(size=len(x))
+
+    # fit without analytic derivative
+    min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data':data})
+    out1 = min1.leastsq()
+    fit1 = func(out1.params, x)
+
+    # fit with analytic derivative
+    min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data':data})
+    out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
+    fit2 = func(out2.params, x)
+
+    print ('''Comparison of fit to exponential decay
+    with and without analytic derivatives, to
+       model = a*exp(-b*x) + c
+    for a = %.2f, b = %.2f, c = %.2f
+    ==============================================
+    Statistic/Parameter|   Without   | With      |
+    ----------------------------------------------
+    N Function Calls   |   %3i       |   %3i     |
+    Chi-square         |   %.4f    |   %.4f  |
+       a               |   %.4f    |   %.4f  |
+       b               |   %.4f    |   %.4f  |
+       c               |   %.4f    |   %.4f  |
+    ----------------------------------------------
+    ''' %  (a, b, c,
+            out1.nfev,   out2.nfev,
+            out1.chisqr, out2.chisqr,
+            out1.params['a'].value, out2.params['a'].value,
+            out1.params['b'].value, out2.params['b'].value,
+            out1.params['c'].value, out2.params['c'].value ))
+
+    check_wo_stderr(out1.params['a'], out2.params['a'].value, 0.00005)
+    check_wo_stderr(out1.params['b'], out2.params['b'].value, 0.00005)
+    check_wo_stderr(out1.params['c'], out2.params['c'].value, 0.00005)
+
+def test_peakfit():
+    def residual(pars, x, data=None):
+        g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
+        g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
+        model = g1 + g2
+        if data is None:
+            return model
+        return (model - data)
+
+    n    = 601
+    xmin = 0.
+    xmax = 15.0
+    noise = np.random.normal(scale=.65, size=n)
+    x = np.linspace(xmin, xmax, n)
+
+    org_params = Parameters()
+    org_params.add_many(('a1', 12.0, True, None, None, None),
+                        ('c1',  5.3, True, None, None, None),
+                        ('w1',  1.0, True, None, None, None),
+                        ('a2',  9.1, True, None, None, None),
+                        ('c2',  8.1, True, None, None, None),
+                        ('w2',  2.5, True, None, None, None))
+
+    data  = residual(org_params, x) + noise
+
+
+    fit_params = Parameters()
+    fit_params.add_many(('a1',  8.0, True, None, 14., None),
+                        ('c1',  5.0, True, None, None, None),
+                        ('w1',  0.7, True, None, None, None),
+                        ('a2',  3.1, True, None, None, None),
+                        ('c2',  8.8, True, None, None, None))
+
+    fit_params.add('w2', expr='2.5*w1')
+
+    myfit = Minimizer(residual, fit_params,
+                      fcn_args=(x,), fcn_kws={'data': data})
+
+    myfit.prepare_fit()
+
+    init = residual(fit_params, x)
+
+
+    out = myfit.leastsq()
+
+    # print(' N fev = ', myfit.nfev)
+    # print(myfit.chisqr, myfit.redchi, myfit.nfree)
+
+    report_fit(out.params)
+
+    fit = residual(out.params, x)
+    check_paras(out.params, org_params)
+
+
+def test_scalar_minimize_has_no_uncertainties():
+    # scalar_minimize doesn't calculate uncertainties.
+    # when a scalar_minimize is run the stderr and correl for each parameter
+    # should be None. (stderr and correl are set to None when a Parameter is
+    # initialised).
+    # This requires a reset after a leastsq fit has been done.
+    # Only when scalar_minimize calculates stderr and correl can this test
+    # be removed.
+
+    np.random.seed(1)
+    x = np.linspace(0, 15, 301)
+    data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+            np.random.normal(size=len(x), scale=0.2) )
+
+    # define objective function: returns the array to be minimized
+    def fcn2min(params, x, data):
+        """ model decaying sine wave, subtract data"""
+        amp = params['amp'].value
+        shift = params['shift'].value
+        omega = params['omega'].value
+        decay = params['decay'].value
+
+        model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+        return model - data
+
+    # create a set of Parameters
+    params = Parameters()
+    params.add('amp',   value= 10,  min=0)
+    params.add('decay', value= 0.1)
+    params.add('shift', value= 0.0, min=-pi / 2., max=pi / 2)
+    params.add('omega', value= 3.0)
+
+    mini = Minimizer(fcn2min, params, fcn_args=(x, data))
+    out = mini.minimize()
+    assert_(np.isfinite(out.params['amp'].stderr))
+    print(out.errorbars)
+    assert_(out.errorbars == True)
+    out2 = mini.minimize(method='nelder-mead')
+    assert_(out2.params['amp'].stderr is None)
+    assert_(out2.params['decay'].stderr is None)
+    assert_(out2.params['shift'].stderr is None)
+    assert_(out2.params['omega'].stderr is None)
+    assert_(out2.params['amp'].correl is None)
+    assert_(out2.params['decay'].correl is None)
+    assert_(out2.params['shift'].correl is None)
+    assert_(out2.params['omega'].correl is None)
+    assert_(out2.errorbars == False)
+
+
+def test_multidimensional_fit_GH205():
+    # test that you don't need to flatten the output from the objective
+    # function. Tests regression for GH205.
+    pos = np.linspace(0, 99, 100)
+    xv, yv = np.meshgrid(pos, pos)
+    f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1)
+                                             + np.cos(yv * lambda2))
+
+    data = f(xv, yv, 0.3, 3)
+    assert_(data.ndim, 2)
+
+    def fcn2min(params, xv, yv, data):
+        """ model decaying sine wave, subtract data"""
+        lambda1 = params['lambda1'].value
+        lambda2 = params['lambda2'].value
+        model = f(xv, yv, lambda1, lambda2)
+        return model - data
+
+    # create a set of Parameters
+    params = Parameters()
+    params.add('lambda1', value=0.4)
+    params.add('lambda2', value=3.2)
+
+    mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data))
+    res = mini.minimize()
+
+class CommonMinimizerTest(unittest.TestCase):
+
+    def setUp(self):
+        """
+        test scale minimizers except newton-cg (needs jacobian) and
+        anneal (doesn't work out of the box).
+        """
+        p_true = Parameters()
+        p_true.add('amp', value=14.0)
+        p_true.add('period', value=5.33)
+        p_true.add('shift', value=0.123)
+        p_true.add('decay', value=0.010)
+        self.p_true = p_true
+
+        n = 2500
+        xmin = 0.
+        xmax = 250.0
+        noise = np.random.normal(scale=0.7215, size=n)
+        self.x = np.linspace(xmin, xmax, n)
+        self.data = self.residual(p_true, self.x) + noise
+
+        fit_params = Parameters()
+        fit_params.add('amp', value=11.0, min=5, max=20)
+        fit_params.add('period', value=5., min=1., max=7)
+        fit_params.add('shift', value=.10,  min=0.0, max=0.2)
+        fit_params.add('decay', value=6.e-3, min=0, max=0.1)
+        self.fit_params = fit_params
+
+        self.mini = Minimizer(self.residual, fit_params, [self.x, self.data])
+
+    def residual(self, pars, x, data=None):
+        amp = pars['amp'].value
+        per = pars['period'].value
+        shift = pars['shift'].value
+        decay = pars['decay'].value
+
+        if abs(shift) > pi/2:
+            shift = shift - np.sign(shift) * pi
+        model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
+        if data is None:
+            return model
+        return model - data
+
+    def test_diffev_bounds_check(self):
+        # You need finite (min, max) for each parameter if you're using
+        # differential_evolution.
+        self.fit_params['decay'].min = -np.inf
+        self.minimizer = 'differential_evolution'
+        np.testing.assert_raises(ValueError, self.scalar_minimizer)
+
+    def test_scalar_minimizers(self):
+        # test all the scalar minimizers
+        for method in SCALAR_METHODS:
+            if method in ['newton', 'dogleg', 'trust-ncg', 'cg']:
+                continue
+            self.minimizer = SCALAR_METHODS[method]
+            if method == 'Nelder-Mead':
+                sig = 0.2
+            else:
+                sig = 0.15
+            self.scalar_minimizer(sig=sig)
+
+    def scalar_minimizer(self, sig=0.15):
+        try:
+            from scipy.optimize import minimize as scipy_minimize
+        except ImportError:
+            raise SkipTest
+
+        print(self.minimizer)
+        out = self.mini.scalar_minimize(method=self.minimizer)
+
+        self.residual(out.params, self.x)
+
+        for name, par in out.params.items():
+            nout = "%s:%s" % (name, ' '*(20-len(name)))
+            print("%s: %s (%s) " % (nout, par.value, self.p_true[name].value))
+
+        for para, true_para in zip(out.params.values(),
+                                   self.p_true.values()):
+            check_wo_stderr(para, true_para.value, sig=sig)
+
+    @decorators.slow
+    def test_emcee(self):
+        # test emcee
+        if not HAS_EMCEE:
+            return True
+
+        np.random.seed(123456)
+        out = self.mini.emcee(nwalkers=100, steps=200,
+                                      burn=50, thin=10)
+
+        check_paras(out.params, self.p_true, sig=3)
+
+    @decorators.slow
+    def test_emcee_PT(self):
+        # test emcee with parallel tempering
+        if not HAS_EMCEE:
+            return True
+
+        np.random.seed(123456)
+        self.mini.userfcn = residual_for_multiprocessing
+        out = self.mini.emcee(ntemps=4, nwalkers=50, steps=200,
+                              burn=100, thin=10, workers=2)
+
+        check_paras(out.params, self.p_true, sig=3)
+
+    @decorators.slow
+    def test_emcee_multiprocessing(self):
+        # test multiprocessing runs
+        if not HAS_EMCEE:
+            return True
+
+        np.random.seed(123456)
+        self.mini.userfcn = residual_for_multiprocessing
+        out = self.mini.emcee(steps=10, workers=4)
+
+    def test_emcee_bounds_length(self):
+        # the log-probability functions check if the parameters are
+        # inside the bounds. Check that the bounds and parameters
+        # are the right lengths for comparison. This can be done
+        # if nvarys != nparams
+        if not HAS_EMCEE:
+            return True
+        self.mini.params['amp'].vary=False
+        self.mini.params['period'].vary=False
+        self.mini.params['shift'].vary=False
+
+        out = self.mini.emcee(steps=10)
+
+    @decorators.slow
+    def test_emcee_partial_bounds(self):
+        # mcmc with partial bounds
+        if not HAS_EMCEE:
+            return True
+
+        np.random.seed(123456)
+        # test mcmc output vs lm, some parameters not bounded
+        self.fit_params['amp'].max = np.inf
+        # self.fit_params['amp'].min = -np.inf
+        out = self.mini.emcee(nwalkers=100, steps=300,
+                                      burn=100, thin=10)
+
+        check_paras(out.params, self.p_true, sig=3)
+
+    def test_emcee_init_with_chain(self):
+        # can you initialise with a previous chain
+        if not HAS_EMCEE:
+            return True
+
+        out = self.mini.emcee(nwalkers=100, steps=5)
+        # can initialise with a chain
+        out2 = self.mini.emcee(nwalkers=100, steps=1, pos=out.chain)
+
+        # can initialise with a correct subset of a chain
+        out3 = self.mini.emcee(nwalkers=100,
+                               steps=1,
+                               pos=out.chain[..., -1, :])
+
+        # but you can't initialise if the shape is wrong.
+        assert_raises(ValueError,
+                      self.mini.emcee,
+                      nwalkers=100,
+                      steps=1,
+                      pos=out.chain[..., -1, :-1])
+
+    def test_emcee_reuse_sampler(self):
+        if not HAS_EMCEE:
+            return True
+
+        self.mini.emcee(nwalkers=100, steps=5)
+
+        # if you've run the sampler the Minimizer object should have a _lastpos
+        # attribute
+        assert_(hasattr(self.mini, '_lastpos'))
+
+        # now try and re-use sampler
+        out2 = self.mini.emcee(steps=10, reuse_sampler=True)
+        assert_(out2.chain.shape[1] == 15)
+
+        # you shouldn't be able to reuse the sampler if nvarys has changed.
+        self.mini.params['amp'].vary = False
+        assert_raises(ValueError, self.mini.emcee, reuse_sampler=True)
+
+    def test_emcee_lnpost(self):
+        # check ln likelihood is calculated correctly. It should be
+        # -0.5 * chi**2.
+        result = self.mini.minimize()
+
+        # obtain the numeric values
+        # note - in this example all the parameters are varied
+        fvars = np.array([par.value for par in result.params.values()])
+
+        # calculate the cost function with scaled values (parameters all have
+        # lower and upper bounds.
+        scaled_fvars = []
+        for par, fvar in zip(result.params.values(), fvars):
+            par.value = fvar
+            scaled_fvars.append(par.setup_bounds())
+
+        val = self.mini.penalty(np.array(scaled_fvars))
+
+        # calculate the log-likelihood value
+        bounds = np.array([(par.min, par.max)
+                           for par in result.params.values()])
+        val2 = _lnpost(fvars,
+                       self.residual,
+                       result.params,
+                       result.var_names,
+                       bounds,
+                       userargs=(self.x, self.data))
+
+        assert_almost_equal(-0.5 * val, val2)
+
+    def test_emcee_output(self):
+        # test mcmc output
+        if not HAS_EMCEE:
+            return True
+        try:
+            from pandas import DataFrame
+        except ImportError:
+            return True
+        out = self.mini.emcee(nwalkers=10, steps=20, burn=5, thin=2)
+        assert_(isinstance(out, MinimizerResult))
+        assert_(isinstance(out.flatchain, DataFrame))
+
+        # check that we can access the chains via parameter name
+        assert_(out.flatchain['amp'].shape[0] == 80)
+        assert_(out.errorbars is True)
+        assert_(np.isfinite(out.params['amp'].correl['period']))
+
+        # the lnprob array should be the same as the chain size
+        assert_(np.size(out.chain)//4 == np.size(out.lnprob))
+
+    @decorators.slow
+    def test_emcee_float(self):
+        # test that it works if the residuals returns a float, not a vector
+        if not HAS_EMCEE:
+            return True
+
+        def resid(pars, x, data=None):
+            return -0.5 * np.sum(self.residual(pars, x, data=data)**2)
+
+        # just return chi2
+        def resid2(pars, x, data=None):
+            return np.sum(self.residual(pars, x, data=data)**2)
+
+        self.mini.userfcn = resid
+        np.random.seed(123456)
+        out = self.mini.emcee(nwalkers=100, steps=200,
+                                      burn=50, thin=10)
+        check_paras(out.params, self.p_true, sig=3)
+
+        self.mini.userfcn = resid2
+        np.random.seed(123456)
+        out = self.mini.emcee(nwalkers=100, steps=200,
+                              burn=50, thin=10, float_behavior='chi2')
+        check_paras(out.params, self.p_true, sig=3)
+
+    @decorators.slow
+    def test_emcee_seed(self):
+        # test emcee seeding can reproduce a sampling run
+        if not HAS_EMCEE:
+            return True
+
+        out = self.mini.emcee(params=self.fit_params,
+                              nwalkers=100,
+                              steps=1, seed=1)
+        out2 = self.mini.emcee(params=self.fit_params,
+                               nwalkers=100,
+                               steps=1, seed=1)
+
+        assert_almost_equal(out.chain, out2.chain)
+
+
+def residual_for_multiprocessing(pars, x, data=None):
+    # a residual function defined in the top level is needed for
+    # multiprocessing. bound methods don't work.
+    amp = pars['amp'].value
+    per = pars['period'].value
+    shift = pars['shift'].value
+    decay = pars['decay'].value
+
+    if abs(shift) > pi/2:
+        shift = shift - np.sign(shift) * pi
+    model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
+    if data is None:
+        return model
+    return (model - data)
+
+
+if __name__ == '__main__':
+    nose.main()
diff --git a/tests/test_parameters.py b/tests/test_parameters.py
index beed610..29f3c2d 100644
--- a/tests/test_parameters.py
+++ b/tests/test_parameters.py
@@ -1,107 +1,152 @@
-from __future__ import print_function
-from lmfit import Parameters, Parameter
-from numpy.testing import assert_, assert_almost_equal
-import unittest
-from copy import deepcopy
-import numpy as np
-import pickle
-
-
-class TestParameters(unittest.TestCase):
-
-    def setUp(self):
-        self.params = Parameters()
-        self.params.add_many(('a', 1., True, None, None, None),
-                             ('b', 2., True, None, None, None),
-                             ('c', 3., True, None, None, '2. * a'))
-
-    def test_expr_was_evaluated(self):
-        self.params.update_constraints()
-        assert_almost_equal(self.params['c'].value,
-                            2 * self.params['a'].value)
-
-    def test_deepcopy(self):
-        # check that a simple copy works
-        b = deepcopy(self.params)
-        assert_(self.params == b)
-
-        # check that we can add a symbol to the interpreter
-        self.params['b'].expr = 'sin(1)'
-        self.params['b'].value = 10
-        assert_almost_equal(self.params['b'].value, np.sin(1))
-        assert_almost_equal(self.params._asteval.symtable['b'], np.sin(1))
-
-        # check that the symbols in the interpreter are still the same after
-        # deepcopying
-        b = deepcopy(self.params)
-
-        unique_symbols_params = self.params._asteval.user_defined_symbols()
-        unique_symbols_b = self.params._asteval.user_defined_symbols()
-        assert_(unique_symbols_b == unique_symbols_params)
-        for unique_symbol in unique_symbols_b:
-            if self.params._asteval.symtable[unique_symbol] is np.nan:
-                continue
-
-            assert_(self.params._asteval.symtable[unique_symbol]
-                    ==
-                    b._asteval.symtable[unique_symbol])
-
-    def test_add_many_params(self):
-        # test that we can add many parameters, but only parameters are added.
-        a = Parameter('a', 1)
-        b = Parameter('b', 2)
-
-        p = Parameters()
-        p.add_many(a, b)
-
-        assert_(list(p.keys()) == ['a', 'b'])
-
-    def test_expr_and_constraints_GH265(self):
-        # test that parameters are reevaluated if they have bounds and expr
-        # see GH265
-        p = Parameters()
-
-        p['a'] = Parameter('a', 10, True)
-        p['b'] = Parameter('b', 10, True, 0, 20)
-
-        p['a'].expr = '2 * b'
-        assert_almost_equal(p['a'].value, 20)
-
-        p['b'].value = 15
-        assert_almost_equal(p['b'].value, 15)
-        assert_almost_equal(p['a'].value, 30)
-
-        p['b'].value = 30
-        assert_almost_equal(p['b'].value, 20)
-        assert_almost_equal(p['a'].value, 40)
-
-    def test_pickle_parameter(self):
-        # test that we can pickle a Parameter
-        p = Parameter('a', 10, True, 0, 1)
-        pkl = pickle.dumps(p)
-
-        q = pickle.loads(pkl)
-
-        assert_(p == q)
-
-    def test_pickle_parameters(self):
-        # test that we can pickle a Parameters object
-        p = Parameters()
-        p.add('a', 10, True, 0, 100)
-        p.add('b', 10, True, 0, 100, 'a * sin(1)')
-        p.update_constraints()
-        p._asteval.symtable['abc'] = '2 * 3.142'
-
-        pkl = pickle.dumps(p, -1)
-        q = pickle.loads(pkl)
-
-        q.update_constraints()
-        assert_(p == q)
-        assert_(not p is q)
-
-        # now test if the asteval machinery survived
-        assert_(q._asteval.symtable['abc'] == '2 * 3.142')
-
-
-if __name__ == '__main__':
-    unittest.main()
+from __future__ import print_function
+from lmfit import Parameters, Parameter
+from lmfit.parameter import isclose
+from numpy.testing import assert_, assert_almost_equal, assert_equal
+import unittest
+from copy import deepcopy
+import numpy as np
+import pickle
+
+
+class TestParameters(unittest.TestCase):
+
+    def setUp(self):
+        self.params = Parameters()
+        self.params.add_many(('a', 1., True, None, None, None),
+                             ('b', 2., True, None, None, None),
+                             ('c', 3., True, None, None, '2. * a'))
+
+    def test_expr_was_evaluated(self):
+        self.params.update_constraints()
+        assert_almost_equal(self.params['c'].value,
+                            2 * self.params['a'].value)
+
+    def test_copy(self):
+        # check simple Parameters.copy() does not fail
+        # on non-trivial Parameters
+        p1 = Parameters()
+        p1.add('t', 2.0, min=0.0, max=5.0)
+        p1.add('x', 10.0)
+        p1.add('y', expr='x*t + sqrt(t)/3.0')
+
+        p2 = p1.copy()
+        assert(isinstance(p2, Parameters))
+        assert('t' in p2)
+        assert('y' in p2)
+        assert(p2['t'].max < 6.0)
+        assert(np.isinf(p2['x'].max) and p2['x'].max > 0)
+        assert(np.isinf(p2['x'].min) and p2['x'].min < 0)
+        assert('sqrt(t)' in p2['y'].expr )
+        assert(p2._asteval is not None)
+        assert(p2._asteval.symtable is not None)
+        assert((p2['y'].value > 20) and (p2['y'].value < 21))
+
+
+    def test_deepcopy(self):
+        # check that a simple copy works
+        b = deepcopy(self.params)
+        assert_(self.params == b)
+
+        # check that we can add a symbol to the interpreter
+        self.params['b'].expr = 'sin(1)'
+        self.params['b'].value = 10
+        assert_almost_equal(self.params['b'].value, np.sin(1))
+        assert_almost_equal(self.params._asteval.symtable['b'], np.sin(1))
+
+        # check that the symbols in the interpreter are still the same after
+        # deepcopying
+        b = deepcopy(self.params)
+
+        unique_symbols_params = self.params._asteval.user_defined_symbols()
+        unique_symbols_b = self.params._asteval.user_defined_symbols()
+        assert_(unique_symbols_b == unique_symbols_params)
+        for unique_symbol in unique_symbols_b:
+            if self.params._asteval.symtable[unique_symbol] is np.nan:
+                continue
+
+            assert_(self.params._asteval.symtable[unique_symbol]
+                    ==
+                    b._asteval.symtable[unique_symbol])
+
+    def test_add_many_params(self):
+        # test that we can add many parameters, but only parameters are added.
+        a = Parameter('a', 1)
+        b = Parameter('b', 2)
+
+        p = Parameters()
+        p.add_many(a, b)
+
+        assert_(list(p.keys()) == ['a', 'b'])
+
+    def test_expr_and_constraints_GH265(self):
+        # test that parameters are reevaluated if they have bounds and expr
+        # see GH265
+        p = Parameters()
+
+        p['a'] = Parameter('a', 10, True)
+        p['b'] = Parameter('b', 10, True, 0, 20)
+
+        assert_equal(p['b'].min, 0)
+        assert_equal(p['b'].max, 20)
+
+        p['a'].expr = '2 * b'
+        assert_almost_equal(p['a'].value, 20)
+
+        p['b'].value = 15
+        assert_almost_equal(p['b'].value, 15)
+        assert_almost_equal(p['a'].value, 30)
+
+        p['b'].value = 30
+        assert_almost_equal(p['b'].value, 20)
+        assert_almost_equal(p['a'].value, 40)
+
+    def test_pickle_parameter(self):
+        # test that we can pickle a Parameter
+        p = Parameter('a', 10, True, 0, 1)
+        pkl = pickle.dumps(p)
+
+        q = pickle.loads(pkl)
+
+        assert_(p == q)
+
+    def test_pickle_parameters(self):
+        # test that we can pickle a Parameters object
+        p = Parameters()
+        p.add('a', 10, True, 0, 100)
+        p.add('b', 10, True, 0, 100, 'a * sin(1)')
+        p.update_constraints()
+        p._asteval.symtable['abc'] = '2 * 3.142'
+
+        pkl = pickle.dumps(p, -1)
+        q = pickle.loads(pkl)
+
+        q.update_constraints()
+        assert_(p == q)
+        assert_(not p is q)
+
+        # now test if the asteval machinery survived
+        assert_(q._asteval.symtable['abc'] == '2 * 3.142')
+
+        # check that unpickling of Parameters is not affected by expr that
+        # refer to Parameter that are added later on. In the following
+        # example var_0.expr refers to var_1, which is a Parameter later
+        # on in the Parameters OrderedDict.
+        p = Parameters()
+        p.add('var_0', value=1)
+        p.add('var_1', value=2)
+        p['var_0'].expr = 'var_1'
+        pkl = pickle.dumps(p)
+        q = pickle.loads(pkl)
+
+    def test_isclose(self):
+        assert_(isclose(1., 1+1e-5, atol=1e-4, rtol=0))
+        assert_(not isclose(1., 1+1e-5, atol=1e-6, rtol=0))
+        assert_(isclose(1e10, 1.00001e10, rtol=1e-5, atol=1e-8))
+        assert_(not isclose(0, np.inf))
+        assert_(not isclose(-np.inf, np.inf))
+        assert_(isclose(np.inf, np.inf))
+        assert_(not isclose(np.nan, np.nan))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_params_set.py b/tests/test_params_set.py
index 26125ea..24b1089 100644
--- a/tests/test_params_set.py
+++ b/tests/test_params_set.py
@@ -1,48 +1,48 @@
-import numpy as np
-from numpy.testing import assert_allclose
-from lmfit import Parameters, minimize, report_fit
-from lmfit.lineshapes import gaussian
-from lmfit.models import VoigtModel
-
-def test_param_set():
-    np.random.seed(2015)
-    x = np.arange(0, 20, 0.05)
-    y = gaussian(x, amplitude=15.43, center=4.5, sigma=2.13)
-    y = y + 0.05 - 0.01*x + np.random.normal(scale=0.03, size=len(x))
-
-    model  = VoigtModel()
-    params = model.guess(y, x=x)
-
-    # test #1:  gamma is constrained to equal sigma
-    assert(params['gamma'].expr == 'sigma')
-    params.update_constraints()
-    sigval = params['gamma'].value
-    assert_allclose(params['gamma'].value, sigval, 1e-4, 1e-4, '', True)
-
-    # test #2: explicitly setting a param value should work, even when
-    #          it had been an expression.  The value will be left as fixed
-    gamval = 0.87543
-    params['gamma'].set(value=gamval)
-    assert(params['gamma'].expr is None)
-    assert(not params['gamma'].vary)
-    assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
-
-    # test #3: explicitly setting an expression should work
-    # Note, the only way to ensure that **ALL** constraints are up to date
-    # is to call params.update_constraints(). This is because the constraint
-    # may have multiple dependencies.
-    params['gamma'].set(expr='sigma/2.0')
-    assert(params['gamma'].expr is not None)
-    assert(not params['gamma'].vary)
-    params.update_constraints()
-    assert_allclose(params['gamma'].value, sigval/2.0, 1e-4, 1e-4, '', True)
-
-    # test #4: explicitly setting a param value WITH vary=True
-    #          will set it to be variable
-    gamval = 0.7777
-    params['gamma'].set(value=gamval, vary=True)
-    assert(params['gamma'].expr is None)
-    assert(params['gamma'].vary)
-    assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
-
+import numpy as np
+from numpy.testing import assert_allclose
+from lmfit import Parameters, minimize, report_fit
+from lmfit.lineshapes import gaussian
+from lmfit.models import VoigtModel
+
+def test_param_set():
+    np.random.seed(2015)
+    x = np.arange(0, 20, 0.05)
+    y = gaussian(x, amplitude=15.43, center=4.5, sigma=2.13)
+    y = y + 0.05 - 0.01*x + np.random.normal(scale=0.03, size=len(x))
+
+    model  = VoigtModel()
+    params = model.guess(y, x=x)
+
+    # test #1:  gamma is constrained to equal sigma
+    assert(params['gamma'].expr == 'sigma')
+    params.update_constraints()
+    sigval = params['gamma'].value
+    assert_allclose(params['gamma'].value, sigval, 1e-4, 1e-4, '', True)
+
+    # test #2: explicitly setting a param value should work, even when
+    #          it had been an expression.  The value will be left as fixed
+    gamval = 0.87543
+    params['gamma'].set(value=gamval)
+    assert(params['gamma'].expr is None)
+    assert(not params['gamma'].vary)
+    assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
+
+    # test #3: explicitly setting an expression should work
+    # Note, the only way to ensure that **ALL** constraints are up to date
+    # is to call params.update_constraints(). This is because the constraint
+    # may have multiple dependencies.
+    params['gamma'].set(expr='sigma/2.0')
+    assert(params['gamma'].expr is not None)
+    assert(not params['gamma'].vary)
+    params.update_constraints()
+    assert_allclose(params['gamma'].value, sigval/2.0, 1e-4, 1e-4, '', True)
+
+    # test #4: explicitly setting a param value WITH vary=True
+    #          will set it to be variable
+    gamval = 0.7777
+    params['gamma'].set(value=gamval, vary=True)
+    assert(params['gamma'].expr is None)
+    assert(params['gamma'].vary)
+    assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
+
 test_param_set()
\ No newline at end of file
diff --git a/tests/test_stepmodel.py b/tests/test_stepmodel.py
index d854467..cabf118 100644
--- a/tests/test_stepmodel.py
+++ b/tests/test_stepmodel.py
@@ -1,58 +1,58 @@
-import numpy as np
-from lmfit import fit_report
-from lmfit.models import StepModel, ConstantModel
-from lmfit_testutils import assert_paramval, assert_paramattr
-
-def get_data():
-    x  = np.linspace(0, 10, 201)
-    dat = np.ones_like(x)
-    dat[:48] = 0.0
-    dat[48:77] = np.arange(77-48)/(77.0-48)
-    dat = dat +  5e-2*np.random.randn(len(x))
-    dat = 110.2 * dat + 12.0
-    return x, dat
-
-def test_stepmodel_linear():
-    x, y = get_data()
-    stepmod = StepModel(form='linear')
-    const = ConstantModel()
-    pars = stepmod.guess(y, x)
-    pars = pars + const.make_params(c=3*y.min())
-    mod = stepmod + const
-
-    out = mod.fit(y, pars, x=x)
-
-    assert(out.nfev > 5)
-    assert(out.nvarys == 4)
-    assert(out.chisqr > 1)
-    assert(out.params['c'].value > 3)
-    assert(out.params['center'].value > 1)
-    assert(out.params['center'].value < 4)
-    assert(out.params['sigma'].value > 0.5)
-    assert(out.params['sigma'].value < 3.5)
-    assert(out.params['amplitude'].value > 50)
-
-
-def test_stepmodel_erf():
-    x, y = get_data()
-    stepmod = StepModel(form='linear')
-    const = ConstantModel()
-    pars = stepmod.guess(y, x)
-    pars = pars + const.make_params(c=3*y.min())
-    mod = stepmod + const
-
-    out = mod.fit(y, pars, x=x)
-
-    assert(out.nfev > 5)
-    assert(out.nvarys == 4)
-    assert(out.chisqr > 1)
-    assert(out.params['c'].value > 3)
-    assert(out.params['center'].value > 1)
-    assert(out.params['center'].value < 4)
-    assert(out.params['amplitude'].value > 50)
-    assert(out.params['sigma'].value > 0.2)
-    assert(out.params['sigma'].value < 1.5)
-
-if __name__ == '__main__':
-    # test_stepmodel_linear()
-    test_stepmodel_erf()
+import numpy as np
+from lmfit import fit_report
+from lmfit.models import StepModel, ConstantModel
+from lmfit_testutils import assert_paramval, assert_paramattr
+
+def get_data():
+    x  = np.linspace(0, 10, 201)
+    dat = np.ones_like(x)
+    dat[:48] = 0.0
+    dat[48:77] = np.arange(77-48)/(77.0-48)
+    dat = dat +  5e-2*np.random.randn(len(x))
+    dat = 110.2 * dat + 12.0
+    return x, dat
+
+def test_stepmodel_linear():
+    x, y = get_data()
+    stepmod = StepModel(form='linear')
+    const = ConstantModel()
+    pars = stepmod.guess(y, x)
+    pars = pars + const.make_params(c=3*y.min())
+    mod = stepmod + const
+
+    out = mod.fit(y, pars, x=x)
+
+    assert(out.nfev > 5)
+    assert(out.nvarys == 4)
+    assert(out.chisqr > 1)
+    assert(out.params['c'].value > 3)
+    assert(out.params['center'].value > 1)
+    assert(out.params['center'].value < 4)
+    assert(out.params['sigma'].value > 0.5)
+    assert(out.params['sigma'].value < 3.5)
+    assert(out.params['amplitude'].value > 50)
+
+
+def test_stepmodel_erf():
+    x, y = get_data()
+    stepmod = StepModel(form='linear')
+    const = ConstantModel()
+    pars = stepmod.guess(y, x)
+    pars = pars + const.make_params(c=3*y.min())
+    mod = stepmod + const
+
+    out = mod.fit(y, pars, x=x)
+
+    assert(out.nfev > 5)
+    assert(out.nvarys == 4)
+    assert(out.chisqr > 1)
+    assert(out.params['c'].value > 3)
+    assert(out.params['center'].value > 1)
+    assert(out.params['center'].value < 4)
+    assert(out.params['amplitude'].value > 50)
+    assert(out.params['sigma'].value > 0.2)
+    assert(out.params['sigma'].value < 1.5)
+
+if __name__ == '__main__':
+    # test_stepmodel_linear()
+    test_stepmodel_erf()
diff --git a/versioneer.py b/versioneer.py
index 4162e8a..481180d 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1,901 +1,901 @@
-
-# Version: 0.12
-
-"""
-The Versioneer
-==============
-
-* like a rocketeer, but for versions!
-* https://github.com/warner/python-versioneer
-* Brian Warner
-* License: Public Domain
-* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
-
-[![Build Status](https://travis-ci.org/warner/python-versioneer.png?branch=master)](https://travis-ci.org/warner/python-versioneer)
-
-This is a tool for managing a recorded version number in distutils-based
-python projects. The goal is to remove the tedious and error-prone "update
-the embedded version string" step from your release process. Making a new
-release should be as easy as recording a new tag in your version-control
-system, and maybe making new tarballs.
-
-
-## Quick Install
-
-* `pip install versioneer` to somewhere to your $PATH
-* run `versioneer-installer` in your source tree: this installs `versioneer.py`
-* follow the instructions below (also in the `versioneer.py` docstring)
-
-## Version Identifiers
-
-Source trees come from a variety of places:
-
-* a version-control system checkout (mostly used by developers)
-* a nightly tarball, produced by build automation
-* a snapshot tarball, produced by a web-based VCS browser, like github's
-  "tarball from tag" feature
-* a release tarball, produced by "setup.py sdist", distributed through PyPI
-
-Within each source tree, the version identifier (either a string or a number,
-this tool is format-agnostic) can come from a variety of places:
-
-* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
-  about recent "tags" and an absolute revision-id
-* the name of the directory into which the tarball was unpacked
-* an expanded VCS keyword ($Id$, etc)
-* a `_version.py` created by some earlier build step
-
-For released software, the version identifier is closely related to a VCS
-tag. Some projects use tag names that include more than just the version
-string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
-needs to strip the tag prefix to extract the version identifier. For
-unreleased software (between tags), the version identifier should provide
-enough information to help developers recreate the same tree, while also
-giving them an idea of roughly how old the tree is (after version 1.2, before
-version 1.3). Many VCS systems can report a description that captures this,
-for example 'git describe --tags --dirty --always' reports things like
-"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
-0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
-uncommitted changes.
-
-The version identifier is used for multiple purposes:
-
-* to allow the module to self-identify its version: `myproject.__version__`
-* to choose a name and prefix for a 'setup.py sdist' tarball
-
-## Theory of Operation
-
-Versioneer works by adding a special `_version.py` file into your source
-tree, where your `__init__.py` can import it. This `_version.py` knows how to
-dynamically ask the VCS tool for version information at import time. However,
-when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
-copy is replaced by a small static file that contains just the generated
-version data.
-
-`_version.py` also contains `$Revision$` markers, and the installation
-process marks `_version.py` to have this marker rewritten with a tag name
-during the "git archive" command. As a result, generated tarballs will
-contain enough information to get the proper version.
-
-
-## Installation
-
-First, decide on values for the following configuration variables:
-
-* `VCS`: the version control system you use. Currently accepts "git".
-
-* `versionfile_source`:
-
-  A project-relative pathname into which the generated version strings should
-  be written. This is usually a `_version.py` next to your project's main
-  `__init__.py` file, so it can be imported at runtime. If your project uses
-  `src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
-  This file should be checked in to your VCS as usual: the copy created below
-  by `setup.py versioneer` will include code that parses expanded VCS
-  keywords in generated tarballs. The 'build' and 'sdist' commands will
-  replace it with a copy that has just the calculated version string.
-
-  This must be set even if your project does not have any modules (and will
-  therefore never import `_version.py`), since "setup.py sdist" -based trees
-  still need somewhere to record the pre-calculated version strings. Anywhere
-  in the source tree should do. If there is a `__init__.py` next to your
-  `_version.py`, the `setup.py versioneer` command (described below) will
-  append some `__version__`-setting assignments, if they aren't already
-  present.
-
-*  `versionfile_build`:
-
-  Like `versionfile_source`, but relative to the build directory instead of
-  the source directory. These will differ when your setup.py uses
-  'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
-  then you will probably have `versionfile_build='myproject/_version.py'` and
-  `versionfile_source='src/myproject/_version.py'`.
-
-  If this is set to None, then `setup.py build` will not attempt to rewrite
-  any `_version.py` in the built tree. If your project does not have any
-  libraries (e.g. if it only builds a script), then you should use
-  `versionfile_build = None` and override `distutils.command.build_scripts`
-  to explicitly insert a copy of `versioneer.get_version()` into your
-  generated script.
-
-* `tag_prefix`:
-
-  a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
-  If your tags look like 'myproject-1.2.0', then you should use
-  tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
-  should be an empty string.
-
-* `parentdir_prefix`:
-
-  a string, frequently the same as tag_prefix, which appears at the start of
-  all unpacked tarball filenames. If your tarball unpacks into
-  'myproject-1.2.0', this should be 'myproject-'.
-
-This tool provides one script, named `versioneer-installer`. That script does
-one thing: write a copy of `versioneer.py` into the current directory.
-
-To versioneer-enable your project:
-
-* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
-  source tree.
-
-* 2: add the following lines to the top of your `setup.py`, with the
-  configuration values you decided earlier:
-
-        import versioneer
-        versioneer.VCS = 'git'
-        versioneer.versionfile_source = 'src/myproject/_version.py'
-        versioneer.versionfile_build = 'myproject/_version.py'
-        versioneer.tag_prefix = '' # tags are like 1.2.0
-        versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
-
-* 3: add the following arguments to the setup() call in your setup.py:
-
-        version=versioneer.get_version(),
-        cmdclass=versioneer.get_cmdclass(),
-
-* 4: now run `setup.py versioneer`, which will create `_version.py`, and will
-  modify your `__init__.py` (if one exists next to `_version.py`) to define
-  `__version__` (by calling a function from `_version.py`). It will also
-  modify your `MANIFEST.in` to include both `versioneer.py` and the generated
-  `_version.py` in sdist tarballs.
-
-* 5: commit these changes to your VCS. To make sure you won't forget,
-  `setup.py versioneer` will mark everything it touched for addition.
-
-## Post-Installation Usage
-
-Once established, all uses of your tree from a VCS checkout should get the
-current version string. All generated tarballs should include an embedded
-version string (so users who unpack them will not need a VCS tool installed).
-
-If you distribute your project through PyPI, then the release process should
-boil down to two steps:
-
-* 1: git tag 1.0
-* 2: python setup.py register sdist upload
-
-If you distribute it through github (i.e. users use github to generate
-tarballs with `git archive`), the process is:
-
-* 1: git tag 1.0
-* 2: git push; git push --tags
-
-Currently, all version strings must be based upon a tag. Versioneer will
-report "unknown" until your tree has at least one tag in its history. This
-restriction will be fixed eventually (see issue #12).
-
-## Version-String Flavors
-
-Code which uses Versioneer can learn about its version string at runtime by
-importing `_version` from your main `__init__.py` file and running the
-`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
-import the top-level `versioneer.py` and run `get_versions()`.
-
-Both functions return a dictionary with different keys for different flavors
-of the version string:
-
-* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
-  this uses the output of `git describe --tags --dirty --always` but strips
-  the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
-  is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
-  that this commit is two revisions ("-2-") beyond the "0.11" tag. For
-  released software (exactly equal to a known tag), the identifier will only
-  contain the stripped tag, e.g. "0.11".
-
-* `['full']`: detailed revision identifier. For Git, this is the full SHA1
-  commit id, followed by "-dirty" if the tree contains uncommitted changes,
-  e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
-
-Some variants are more useful than others. Including `full` in a bug report
-should allow developers to reconstruct the exact code being tested (or
-indicate the presence of local changes that should be shared with the
-developers). `version` is suitable for display in an "about" box or a CLI
-`--version` output: it can be easily compared against release notes and lists
-of bugs fixed in various releases.
-
-In the future, this will also include a
-[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
-(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
-for a hash-based revision id), but is safe to use in a `setup.py`
-"`version=`" argument. It also enables tools like *pip* to compare version
-strings and evaluate compatibility constraint declarations.
-
-The `setup.py versioneer` command adds the following text to your
-`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
-
-    from ._version import get_versions
-    __version__ = get_versions()['version']
-    del get_versions
-
-## Updating Versioneer
-
-To upgrade your project to a new release of Versioneer, do the following:
-
-* install the new Versioneer (`pip install -U versioneer` or equivalent)
-* re-run `versioneer-installer` in your source tree to replace your copy of
-  `versioneer.py`
-* edit `setup.py`, if necessary, to include any new configuration settings
-  indicated by the release notes
-* re-run `setup.py versioneer` to replace `SRC/_version.py`
-* commit any changed files
-
-### Upgrading from 0.10 to 0.11
-
-You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
-`setup.py versioneer`. This will enable the use of additional version-control
-systems (SVN, etc) in the future.
-
-### Upgrading from 0.11 to 0.12
-
-Nothing special.
-
-## Future Directions
-
-This tool is designed to make it easily extended to other version-control
-systems: all VCS-specific components are in separate directories like
-src/git/ . The top-level `versioneer.py` script is assembled from these
-components by running make-versioneer.py . In the future, make-versioneer.py
-will take a VCS name as an argument, and will construct a version of
-`versioneer.py` that is specific to the given VCS. It might also take the
-configuration arguments that are currently provided manually during
-installation by editing setup.py . Alternatively, it might go the other
-direction and include code from all supported VCS systems, reducing the
-number of intermediate scripts.
-
-
-## License
-
-To make Versioneer easier to embed, all its code is hereby released into the
-public domain. The `_version.py` that it creates is also in the public
-domain.
-
-"""
-
-import os, sys, re, subprocess, errno
-from distutils.core import Command
-from distutils.command.sdist import sdist as _sdist
-from distutils.command.build import build as _build
-
-# these configuration settings will be overridden by setup.py after it
-# imports us
-versionfile_source = None
-versionfile_build = None
-tag_prefix = None
-parentdir_prefix = None
-VCS = None
-
-# these dictionaries contain VCS-specific tools
-LONG_VERSION_PY = {}
-
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
-    assert isinstance(commands, list)
-    p = None
-    for c in commands:
-        try:
-            # remember shell=False, so use git.cmd on windows, not just git
-            p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
-                                 stderr=(subprocess.PIPE if hide_stderr
-                                         else None))
-            break
-        except EnvironmentError:
-            e = sys.exc_info()[1]
-            if e.errno == errno.ENOENT:
-                continue
-            if verbose:
-                print("unable to run %s" % args[0])
-                print(e)
-            return None
-    else:
-        if verbose:
-            print("unable to find command, tried %s" % (commands,))
-        return None
-    stdout = p.communicate()[0].strip()
-    if sys.version >= '3':
-        stdout = stdout.decode()
-    if p.returncode != 0:
-        if verbose:
-            print("unable to run %s (error)" % args[0])
-        return None
-    return stdout
-
-LONG_VERSION_PY['git'] = '''
-# This file helps to compute a version number in source trees obtained from
-# git-archive tarball (such as those provided by githubs download-from-tag
-# feature). Distribution tarballs (built by setup.py sdist) and build
-# directories (produced by setup.py build) will contain a much shorter file
-# that just contains the computed version number.
-
-# This file is released into the public domain. Generated by
-# versioneer-0.12 (https://github.com/warner/python-versioneer)
-
-# these strings will be replaced by git during git-archive
-git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
-git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
-
-# these strings are filled in when 'setup.py versioneer' creates _version.py
-tag_prefix = "%(TAG_PREFIX)s"
-parentdir_prefix = "%(PARENTDIR_PREFIX)s"
-versionfile_source = "%(VERSIONFILE_SOURCE)s"
-
-import os, sys, re, subprocess, errno
-
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
-    assert isinstance(commands, list)
-    p = None
-    for c in commands:
-        try:
-            # remember shell=False, so use git.cmd on windows, not just git
-            p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
-                                 stderr=(subprocess.PIPE if hide_stderr
-                                         else None))
-            break
-        except EnvironmentError:
-            e = sys.exc_info()[1]
-            if e.errno == errno.ENOENT:
-                continue
-            if verbose:
-                print("unable to run %%s" %% args[0])
-                print(e)
-            return None
-    else:
-        if verbose:
-            print("unable to find command, tried %%s" %% (commands,))
-        return None
-    stdout = p.communicate()[0].strip()
-    if sys.version >= '3':
-        stdout = stdout.decode()
-    if p.returncode != 0:
-        if verbose:
-            print("unable to run %%s (error)" %% args[0])
-        return None
-    return stdout
-
-
-def versions_from_parentdir(parentdir_prefix, root, verbose=False):
-    # Source tarballs conventionally unpack into a directory that includes
-    # both the project name and a version string.
-    dirname = os.path.basename(root)
-    if not dirname.startswith(parentdir_prefix):
-        if verbose:
-            print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
-                  (root, dirname, parentdir_prefix))
-        return None
-    return {"version": dirname[len(parentdir_prefix):], "full": ""}
-
-def git_get_keywords(versionfile_abs):
-    # the code embedded in _version.py can just fetch the value of these
-    # keywords. When used from setup.py, we don't want to import _version.py,
-    # so we do it with a regexp instead. This function is not used from
-    # _version.py.
-    keywords = {}
-    try:
-        f = open(versionfile_abs,"r")
-        for line in f.readlines():
-            if line.strip().startswith("git_refnames ="):
-                mo = re.search(r'=\s*"(.*)"', line)
-                if mo:
-                    keywords["refnames"] = mo.group(1)
-            if line.strip().startswith("git_full ="):
-                mo = re.search(r'=\s*"(.*)"', line)
-                if mo:
-                    keywords["full"] = mo.group(1)
-        f.close()
-    except EnvironmentError:
-        pass
-    return keywords
-
-def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
-    if not keywords:
-        return {} # keyword-finding function failed to find keywords
-    refnames = keywords["refnames"].strip()
-    if refnames.startswith("$Format"):
-        if verbose:
-            print("keywords are unexpanded, not using")
-        return {} # unexpanded, so not in an unpacked git-archive tarball
-    refs = set([r.strip() for r in refnames.strip("()").split(",")])
-    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
-    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
-    TAG = "tag: "
-    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
-    if not tags:
-        # Either we're using git < 1.8.3, or there really are no tags. We use
-        # a heuristic: assume all version tags have a digit. The old git %%d
-        # expansion behaves like git log --decorate=short and strips out the
-        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
-        # between branches and tags. By ignoring refnames without digits, we
-        # filter out many common branch names like "release" and
-        # "stabilization", as well as "HEAD" and "master".
-        tags = set([r for r in refs if re.search(r'\d', r)])
-        if verbose:
-            print("discarding '%%s', no digits" %% ",".join(refs-tags))
-    if verbose:
-        print("likely tags: %%s" %% ",".join(sorted(tags)))
-    for ref in sorted(tags):
-        # sorting will prefer e.g. "2.0" over "2.0rc1"
-        if ref.startswith(tag_prefix):
-            r = ref[len(tag_prefix):]
-            if verbose:
-                print("picking %%s" %% r)
-            return { "version": r,
-                     "full": keywords["full"].strip() }
-    # no suitable tags, so we use the full revision id
-    if verbose:
-        print("no suitable tags, using full revision id")
-    return { "version": keywords["full"].strip(),
-             "full": keywords["full"].strip() }
-
-
-def git_versions_from_vcs(tag_prefix, root, verbose=False):
-    # this runs 'git' from the root of the source tree. This only gets called
-    # if the git-archive 'subst' keywords were *not* expanded, and
-    # _version.py hasn't already been rewritten with a short version string,
-    # meaning we're inside a checked out source tree.
-
-    if not os.path.exists(os.path.join(root, ".git")):
-        if verbose:
-            print("no .git in %%s" %% root)
-        return {}
-
-    GITS = ["git"]
-    if sys.platform == "win32":
-        GITS = ["git.cmd", "git.exe"]
-    stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
-                         cwd=root)
-    if stdout is None:
-        return {}
-    if not stdout.startswith(tag_prefix):
-        if verbose:
-            print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
-        return {}
-    tag = stdout[len(tag_prefix):]
-    stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
-    if stdout is None:
-        return {}
-    full = stdout.strip()
-    if tag.endswith("-dirty"):
-        full += "-dirty"
-    return {"version": tag, "full": full}
-
-
-def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
-    # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
-    # __file__, we can work backwards from there to the root. Some
-    # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
-    # case we can only use expanded keywords.
-
-    keywords = { "refnames": git_refnames, "full": git_full }
-    ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
-    if ver:
-        return ver
-
-    try:
-        root = os.path.abspath(__file__)
-        # versionfile_source is the relative path from the top of the source
-        # tree (where the .git directory might live) to this file. Invert
-        # this to find the root from __file__.
-        for i in range(len(versionfile_source.split(os.sep))):
-            root = os.path.dirname(root)
-    except NameError:
-        return default
-
-    return (git_versions_from_vcs(tag_prefix, root, verbose)
-            or versions_from_parentdir(parentdir_prefix, root, verbose)
-            or default)
-'''
-
-def git_get_keywords(versionfile_abs):
-    # the code embedded in _version.py can just fetch the value of these
-    # keywords. When used from setup.py, we don't want to import _version.py,
-    # so we do it with a regexp instead. This function is not used from
-    # _version.py.
-    keywords = {}
-    try:
-        f = open(versionfile_abs,"r")
-        for line in f.readlines():
-            if line.strip().startswith("git_refnames ="):
-                mo = re.search(r'=\s*"(.*)"', line)
-                if mo:
-                    keywords["refnames"] = mo.group(1)
-            if line.strip().startswith("git_full ="):
-                mo = re.search(r'=\s*"(.*)"', line)
-                if mo:
-                    keywords["full"] = mo.group(1)
-        f.close()
-    except EnvironmentError:
-        pass
-    return keywords
-
-def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
-    if not keywords:
-        return {} # keyword-finding function failed to find keywords
-    refnames = keywords["refnames"].strip()
-    if refnames.startswith("$Format"):
-        if verbose:
-            print("keywords are unexpanded, not using")
-        return {} # unexpanded, so not in an unpacked git-archive tarball
-    refs = set([r.strip() for r in refnames.strip("()").split(",")])
-    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
-    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
-    TAG = "tag: "
-    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
-    if not tags:
-        # Either we're using git < 1.8.3, or there really are no tags. We use
-        # a heuristic: assume all version tags have a digit. The old git %d
-        # expansion behaves like git log --decorate=short and strips out the
-        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
-        # between branches and tags. By ignoring refnames without digits, we
-        # filter out many common branch names like "release" and
-        # "stabilization", as well as "HEAD" and "master".
-        tags = set([r for r in refs if re.search(r'\d', r)])
-        if verbose:
-            print("discarding '%s', no digits" % ",".join(refs-tags))
-    if verbose:
-        print("likely tags: %s" % ",".join(sorted(tags)))
-    for ref in sorted(tags):
-        # sorting will prefer e.g. "2.0" over "2.0rc1"
-        if ref.startswith(tag_prefix):
-            r = ref[len(tag_prefix):]
-            if verbose:
-                print("picking %s" % r)
-            return { "version": r,
-                     "full": keywords["full"].strip() }
-    # no suitable tags, so we use the full revision id
-    if verbose:
-        print("no suitable tags, using full revision id")
-    return { "version": keywords["full"].strip(),
-             "full": keywords["full"].strip() }
-
-
-def git_versions_from_vcs(tag_prefix, root, verbose=False):
-    # this runs 'git' from the root of the source tree. This only gets called
-    # if the git-archive 'subst' keywords were *not* expanded, and
-    # _version.py hasn't already been rewritten with a short version string,
-    # meaning we're inside a checked out source tree.
-
-    if not os.path.exists(os.path.join(root, ".git")):
-        if verbose:
-            print("no .git in %s" % root)
-        return {}
-
-    GITS = ["git"]
-    if sys.platform == "win32":
-        GITS = ["git.cmd", "git.exe"]
-    stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
-                         cwd=root)
-    if stdout is None:
-        return {}
-    if not stdout.startswith(tag_prefix):
-        if verbose:
-            print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
-        return {}
-    tag = stdout[len(tag_prefix):]
-    stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
-    if stdout is None:
-        return {}
-    full = stdout.strip()
-    if tag.endswith("-dirty"):
-        full += "-dirty"
-    return {"version": tag, "full": full}
-
-
-def do_vcs_install(manifest_in, versionfile_source, ipy):
-    GITS = ["git"]
-    if sys.platform == "win32":
-        GITS = ["git.cmd", "git.exe"]
-    files = [manifest_in, versionfile_source]
-    if ipy:
-        files.append(ipy)
-    try:
-        me = __file__
-        if me.endswith(".pyc") or me.endswith(".pyo"):
-            me = os.path.splitext(me)[0] + ".py"
-        versioneer_file = os.path.relpath(me)
-    except NameError:
-        versioneer_file = "versioneer.py"
-    files.append(versioneer_file)
-    present = False
-    try:
-        f = open(".gitattributes", "r")
-        for line in f.readlines():
-            if line.strip().startswith(versionfile_source):
-                if "export-subst" in line.strip().split()[1:]:
-                    present = True
-        f.close()
-    except EnvironmentError:
-        pass
-    if not present:
-        f = open(".gitattributes", "a+")
-        f.write("%s export-subst\n" % versionfile_source)
-        f.close()
-        files.append(".gitattributes")
-    run_command(GITS, ["add", "--"] + files)
-
-def versions_from_parentdir(parentdir_prefix, root, verbose=False):
-    # Source tarballs conventionally unpack into a directory that includes
-    # both the project name and a version string.
-    dirname = os.path.basename(root)
-    if not dirname.startswith(parentdir_prefix):
-        if verbose:
-            print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
-                  (root, dirname, parentdir_prefix))
-        return None
-    return {"version": dirname[len(parentdir_prefix):], "full": ""}
-
-SHORT_VERSION_PY = """
-# This file was generated by 'versioneer.py' (0.12) from
-# revision-control system data, or from the parent directory name of an
-# unpacked source archive. Distribution tarballs contain a pre-generated copy
-# of this file.
-
-version_version = '%(version)s'
-version_full = '%(full)s'
-def get_versions(default={}, verbose=False):
-    return {'version': version_version, 'full': version_full}
-
-"""
-
-DEFAULT = {"version": "unknown", "full": "unknown"}
-
-def versions_from_file(filename):
-    versions = {}
-    try:
-        with open(filename) as f:
-            for line in f.readlines():
-                mo = re.match("version_version = '([^']+)'", line)
-                if mo:
-                    versions["version"] = mo.group(1)
-                mo = re.match("version_full = '([^']+)'", line)
-                if mo:
-                    versions["full"] = mo.group(1)
-    except EnvironmentError:
-        return {}
-
-    return versions
-
-def write_to_version_file(filename, versions):
-    with open(filename, "w") as f:
-        f.write(SHORT_VERSION_PY % versions)
-
-    print("set %s to '%s'" % (filename, versions["version"]))
-
-
-def get_root():
-    try:
-        return os.path.dirname(os.path.abspath(__file__))
-    except NameError:
-        return os.path.dirname(os.path.abspath(sys.argv[0]))
-
-def vcs_function(vcs, suffix):
-    return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
-
-def get_versions(default=DEFAULT, verbose=False):
-    # returns dict with two keys: 'version' and 'full'
-    assert versionfile_source is not None, "please set versioneer.versionfile_source"
-    assert tag_prefix is not None, "please set versioneer.tag_prefix"
-    assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
-    assert VCS is not None, "please set versioneer.VCS"
-
-    # I am in versioneer.py, which must live at the top of the source tree,
-    # which we use to compute the root directory. py2exe/bbfreeze/non-CPython
-    # don't have __file__, in which case we fall back to sys.argv[0] (which
-    # ought to be the setup.py script). We prefer __file__ since that's more
-    # robust in cases where setup.py was invoked in some weird way (e.g. pip)
-    root = get_root()
-    versionfile_abs = os.path.join(root, versionfile_source)
-
-    # extract version from first of _version.py, VCS command (e.g. 'git
-    # describe'), parentdir. This is meant to work for developers using a
-    # source checkout, for users of a tarball created by 'setup.py sdist',
-    # and for users of a tarball/zipball created by 'git archive' or github's
-    # download-from-tag feature or the equivalent in other VCSes.
-
-    get_keywords_f = vcs_function(VCS, "get_keywords")
-    versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
-    if get_keywords_f and versions_from_keywords_f:
-        vcs_keywords = get_keywords_f(versionfile_abs)
-        ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
-        if ver:
-            if verbose: print("got version from expanded keyword %s" % ver)
-            return ver
-
-    ver = versions_from_file(versionfile_abs)
-    if ver:
-        if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
-        return ver
-
-    versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
-    if versions_from_vcs_f:
-        ver = versions_from_vcs_f(tag_prefix, root, verbose)
-        if ver:
-            if verbose: print("got version from VCS %s" % ver)
-            return ver
-
-    ver = versions_from_parentdir(parentdir_prefix, root, verbose)
-    if ver:
-        if verbose: print("got version from parentdir %s" % ver)
-        return ver
-
-    if verbose: print("got version from default %s" % default)
-    return default
-
-def get_version(verbose=False):
-    return get_versions(verbose=verbose)["version"]
-
-class cmd_version(Command):
-    description = "report generated version string"
-    user_options = []
-    boolean_options = []
-    def initialize_options(self):
-        pass
-    def finalize_options(self):
-        pass
-    def run(self):
-        ver = get_version(verbose=True)
-        print("Version is currently: %s" % ver)
-
-
-class cmd_build(_build):
-    def run(self):
-        versions = get_versions(verbose=True)
-        _build.run(self)
-        # now locate _version.py in the new build/ directory and replace it
-        # with an updated value
-        if versionfile_build:
-            target_versionfile = os.path.join(self.build_lib, versionfile_build)
-            print("UPDATING %s" % target_versionfile)
-            os.unlink(target_versionfile)
-            with open(target_versionfile, "w") as f:
-                f.write(SHORT_VERSION_PY % versions)
-
-if 'cx_Freeze' in sys.modules:  # cx_freeze enabled?
-    from cx_Freeze.dist import build_exe as _build_exe
-
-    class cmd_build_exe(_build_exe):
-        def run(self):
-            versions = get_versions(verbose=True)
-            target_versionfile = versionfile_source
-            print("UPDATING %s" % target_versionfile)
-            os.unlink(target_versionfile)
-            with open(target_versionfile, "w") as f:
-                f.write(SHORT_VERSION_PY % versions)
-
-            _build_exe.run(self)
-            os.unlink(target_versionfile)
-            with open(versionfile_source, "w") as f:
-                assert VCS is not None, "please set versioneer.VCS"
-                LONG = LONG_VERSION_PY[VCS]
-                f.write(LONG % {"DOLLAR": "$",
-                                "TAG_PREFIX": tag_prefix,
-                                "PARENTDIR_PREFIX": parentdir_prefix,
-                                "VERSIONFILE_SOURCE": versionfile_source,
-                                })
-
-class cmd_sdist(_sdist):
-    def run(self):
-        versions = get_versions(verbose=True)
-        self._versioneer_generated_versions = versions
-        # unless we update this, the command will keep using the old version
-        self.distribution.metadata.version = versions["version"]
-        return _sdist.run(self)
-
-    def make_release_tree(self, base_dir, files):
-        _sdist.make_release_tree(self, base_dir, files)
-        # now locate _version.py in the new base_dir directory (remembering
-        # that it may be a hardlink) and replace it with an updated value
-        target_versionfile = os.path.join(base_dir, versionfile_source)
-        print("UPDATING %s" % target_versionfile)
-        os.unlink(target_versionfile)
-        with open(target_versionfile, "w") as f:
-            f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
-
-INIT_PY_SNIPPET = """
-from ._version import get_versions
-__version__ = get_versions()['version']
-del get_versions
-"""
-
-class cmd_update_files(Command):
-    description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
-    user_options = []
-    boolean_options = []
-    def initialize_options(self):
-        pass
-    def finalize_options(self):
-        pass
-    def run(self):
-        print(" creating %s" % versionfile_source)
-        with open(versionfile_source, "w") as f:
-            assert VCS is not None, "please set versioneer.VCS"
-            LONG = LONG_VERSION_PY[VCS]
-            f.write(LONG % {"DOLLAR": "$",
-                            "TAG_PREFIX": tag_prefix,
-                            "PARENTDIR_PREFIX": parentdir_prefix,
-                            "VERSIONFILE_SOURCE": versionfile_source,
-                            })
-
-        ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
-        if os.path.exists(ipy):
-            try:
-                with open(ipy, "r") as f:
-                    old = f.read()
-            except EnvironmentError:
-                old = ""
-            if INIT_PY_SNIPPET not in old:
-                print(" appending to %s" % ipy)
-                with open(ipy, "a") as f:
-                    f.write(INIT_PY_SNIPPET)
-            else:
-                print(" %s unmodified" % ipy)
-        else:
-            print(" %s doesn't exist, ok" % ipy)
-            ipy = None
-
-        # Make sure both the top-level "versioneer.py" and versionfile_source
-        # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
-        # they'll be copied into source distributions. Pip won't be able to
-        # install the package without this.
-        manifest_in = os.path.join(get_root(), "MANIFEST.in")
-        simple_includes = set()
-        try:
-            with open(manifest_in, "r") as f:
-                for line in f:
-                    if line.startswith("include "):
-                        for include in line.split()[1:]:
-                            simple_includes.add(include)
-        except EnvironmentError:
-            pass
-        # That doesn't cover everything MANIFEST.in can do
-        # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
-        # it might give some false negatives. Appending redundant 'include'
-        # lines is safe, though.
-        if "versioneer.py" not in simple_includes:
-            print(" appending 'versioneer.py' to MANIFEST.in")
-            with open(manifest_in, "a") as f:
-                f.write("include versioneer.py\n")
-        else:
-            print(" 'versioneer.py' already in MANIFEST.in")
-        if versionfile_source not in simple_includes:
-            print(" appending versionfile_source ('%s') to MANIFEST.in" %
-                  versionfile_source)
-            with open(manifest_in, "a") as f:
-                f.write("include %s\n" % versionfile_source)
-        else:
-            print(" versionfile_source already in MANIFEST.in")
-
-        # Make VCS-specific changes. For git, this means creating/changing
-        # .gitattributes to mark _version.py for export-time keyword
-        # substitution.
-        do_vcs_install(manifest_in, versionfile_source, ipy)
-
-def get_cmdclass():
-    cmds = {'version': cmd_version,
-            'versioneer': cmd_update_files,
-            'build': cmd_build,
-            'sdist': cmd_sdist,
-            }
-    if 'cx_Freeze' in sys.modules:  # cx_freeze enabled?
-        cmds['build_exe'] = cmd_build_exe
-        del cmds['build']
-
-    return cmds
+
+# Version: 0.12
+
+"""
+The Versioneer
+==============
+
+* like a rocketeer, but for versions!
+* https://github.com/warner/python-versioneer
+* Brian Warner
+* License: Public Domain
+* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
+
+[![Build Status](https://travis-ci.org/warner/python-versioneer.png?branch=master)](https://travis-ci.org/warner/python-versioneer)
+
+This is a tool for managing a recorded version number in distutils-based
+python projects. The goal is to remove the tedious and error-prone "update
+the embedded version string" step from your release process. Making a new
+release should be as easy as recording a new tag in your version-control
+system, and maybe making new tarballs.
+
+
+## Quick Install
+
+* `pip install versioneer` to somewhere to your $PATH
+* run `versioneer-installer` in your source tree: this installs `versioneer.py`
+* follow the instructions below (also in the `versioneer.py` docstring)
+
+## Version Identifiers
+
+Source trees come from a variety of places:
+
+* a version-control system checkout (mostly used by developers)
+* a nightly tarball, produced by build automation
+* a snapshot tarball, produced by a web-based VCS browser, like github's
+  "tarball from tag" feature
+* a release tarball, produced by "setup.py sdist", distributed through PyPI
+
+Within each source tree, the version identifier (either a string or a number,
+this tool is format-agnostic) can come from a variety of places:
+
+* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
+  about recent "tags" and an absolute revision-id
+* the name of the directory into which the tarball was unpacked
+* an expanded VCS keyword ($Id$, etc)
+* a `_version.py` created by some earlier build step
+
+For released software, the version identifier is closely related to a VCS
+tag. Some projects use tag names that include more than just the version
+string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
+needs to strip the tag prefix to extract the version identifier. For
+unreleased software (between tags), the version identifier should provide
+enough information to help developers recreate the same tree, while also
+giving them an idea of roughly how old the tree is (after version 1.2, before
+version 1.3). Many VCS systems can report a description that captures this,
+for example 'git describe --tags --dirty --always' reports things like
+"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
+0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
+uncommitted changes.
+
+The version identifier is used for multiple purposes:
+
+* to allow the module to self-identify its version: `myproject.__version__`
+* to choose a name and prefix for a 'setup.py sdist' tarball
+
+## Theory of Operation
+
+Versioneer works by adding a special `_version.py` file into your source
+tree, where your `__init__.py` can import it. This `_version.py` knows how to
+dynamically ask the VCS tool for version information at import time. However,
+when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
+copy is replaced by a small static file that contains just the generated
+version data.
+
+`_version.py` also contains `$Revision$` markers, and the installation
+process marks `_version.py` to have this marker rewritten with a tag name
+during the "git archive" command. As a result, generated tarballs will
+contain enough information to get the proper version.
+
+
+## Installation
+
+First, decide on values for the following configuration variables:
+
+* `VCS`: the version control system you use. Currently accepts "git".
+
+* `versionfile_source`:
+
+  A project-relative pathname into which the generated version strings should
+  be written. This is usually a `_version.py` next to your project's main
+  `__init__.py` file, so it can be imported at runtime. If your project uses
+  `src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
+  This file should be checked in to your VCS as usual: the copy created below
+  by `setup.py versioneer` will include code that parses expanded VCS
+  keywords in generated tarballs. The 'build' and 'sdist' commands will
+  replace it with a copy that has just the calculated version string.
+
+  This must be set even if your project does not have any modules (and will
+  therefore never import `_version.py`), since "setup.py sdist" -based trees
+  still need somewhere to record the pre-calculated version strings. Anywhere
+  in the source tree should do. If there is a `__init__.py` next to your
+  `_version.py`, the `setup.py versioneer` command (described below) will
+  append some `__version__`-setting assignments, if they aren't already
+  present.
+
+*  `versionfile_build`:
+
+  Like `versionfile_source`, but relative to the build directory instead of
+  the source directory. These will differ when your setup.py uses
+  'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
+  then you will probably have `versionfile_build='myproject/_version.py'` and
+  `versionfile_source='src/myproject/_version.py'`.
+
+  If this is set to None, then `setup.py build` will not attempt to rewrite
+  any `_version.py` in the built tree. If your project does not have any
+  libraries (e.g. if it only builds a script), then you should use
+  `versionfile_build = None` and override `distutils.command.build_scripts`
+  to explicitly insert a copy of `versioneer.get_version()` into your
+  generated script.
+
+* `tag_prefix`:
+
+  a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
+  If your tags look like 'myproject-1.2.0', then you should use
+  tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
+  should be an empty string.
+
+* `parentdir_prefix`:
+
+  a string, frequently the same as tag_prefix, which appears at the start of
+  all unpacked tarball filenames. If your tarball unpacks into
+  'myproject-1.2.0', this should be 'myproject-'.
+
+This tool provides one script, named `versioneer-installer`. That script does
+one thing: write a copy of `versioneer.py` into the current directory.
+
+To versioneer-enable your project:
+
+* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
+  source tree.
+
+* 2: add the following lines to the top of your `setup.py`, with the
+  configuration values you decided earlier:
+
+        import versioneer
+        versioneer.VCS = 'git'
+        versioneer.versionfile_source = 'src/myproject/_version.py'
+        versioneer.versionfile_build = 'myproject/_version.py'
+        versioneer.tag_prefix = '' # tags are like 1.2.0
+        versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
+
+* 3: add the following arguments to the setup() call in your setup.py:
+
+        version=versioneer.get_version(),
+        cmdclass=versioneer.get_cmdclass(),
+
+* 4: now run `setup.py versioneer`, which will create `_version.py`, and will
+  modify your `__init__.py` (if one exists next to `_version.py`) to define
+  `__version__` (by calling a function from `_version.py`). It will also
+  modify your `MANIFEST.in` to include both `versioneer.py` and the generated
+  `_version.py` in sdist tarballs.
+
+* 5: commit these changes to your VCS. To make sure you won't forget,
+  `setup.py versioneer` will mark everything it touched for addition.
+
+## Post-Installation Usage
+
+Once established, all uses of your tree from a VCS checkout should get the
+current version string. All generated tarballs should include an embedded
+version string (so users who unpack them will not need a VCS tool installed).
+
+If you distribute your project through PyPI, then the release process should
+boil down to two steps:
+
+* 1: git tag 1.0
+* 2: python setup.py register sdist upload
+
+If you distribute it through github (i.e. users use github to generate
+tarballs with `git archive`), the process is:
+
+* 1: git tag 1.0
+* 2: git push; git push --tags
+
+Currently, all version strings must be based upon a tag. Versioneer will
+report "unknown" until your tree has at least one tag in its history. This
+restriction will be fixed eventually (see issue #12).
+
+## Version-String Flavors
+
+Code which uses Versioneer can learn about its version string at runtime by
+importing `_version` from your main `__init__.py` file and running the
+`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
+import the top-level `versioneer.py` and run `get_versions()`.
+
+Both functions return a dictionary with different keys for different flavors
+of the version string:
+
+* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
+  this uses the output of `git describe --tags --dirty --always` but strips
+  the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
+  is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
+  that this commit is two revisions ("-2-") beyond the "0.11" tag. For
+  released software (exactly equal to a known tag), the identifier will only
+  contain the stripped tag, e.g. "0.11".
+
+* `['full']`: detailed revision identifier. For Git, this is the full SHA1
+  commit id, followed by "-dirty" if the tree contains uncommitted changes,
+  e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
+
+Some variants are more useful than others. Including `full` in a bug report
+should allow developers to reconstruct the exact code being tested (or
+indicate the presence of local changes that should be shared with the
+developers). `version` is suitable for display in an "about" box or a CLI
+`--version` output: it can be easily compared against release notes and lists
+of bugs fixed in various releases.
+
+In the future, this will also include a
+[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
+(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
+for a hash-based revision id), but is safe to use in a `setup.py`
+"`version=`" argument. It also enables tools like *pip* to compare version
+strings and evaluate compatibility constraint declarations.
+
+The `setup.py versioneer` command adds the following text to your
+`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
+
+    from ._version import get_versions
+    __version__ = get_versions()['version']
+    del get_versions
+
+## Updating Versioneer
+
+To upgrade your project to a new release of Versioneer, do the following:
+
+* install the new Versioneer (`pip install -U versioneer` or equivalent)
+* re-run `versioneer-installer` in your source tree to replace your copy of
+  `versioneer.py`
+* edit `setup.py`, if necessary, to include any new configuration settings
+  indicated by the release notes
+* re-run `setup.py versioneer` to replace `SRC/_version.py`
+* commit any changed files
+
+### Upgrading from 0.10 to 0.11
+
+You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
+`setup.py versioneer`. This will enable the use of additional version-control
+systems (SVN, etc) in the future.
+
+### Upgrading from 0.11 to 0.12
+
+Nothing special.
+
+## Future Directions
+
+This tool is designed to make it easily extended to other version-control
+systems: all VCS-specific components are in separate directories like
+src/git/ . The top-level `versioneer.py` script is assembled from these
+components by running make-versioneer.py . In the future, make-versioneer.py
+will take a VCS name as an argument, and will construct a version of
+`versioneer.py` that is specific to the given VCS. It might also take the
+configuration arguments that are currently provided manually during
+installation by editing setup.py . Alternatively, it might go the other
+direction and include code from all supported VCS systems, reducing the
+number of intermediate scripts.
+
+
+## License
+
+To make Versioneer easier to embed, all its code is hereby released into the
+public domain. The `_version.py` that it creates is also in the public
+domain.
+
+"""
+
+import os, sys, re, subprocess, errno
+from distutils.core import Command
+from distutils.command.sdist import sdist as _sdist
+from distutils.command.build import build as _build
+
+# these configuration settings will be overridden by setup.py after it
+# imports us
+versionfile_source = None
+versionfile_build = None
+tag_prefix = None
+parentdir_prefix = None
+VCS = None
+
+# these dictionaries contain VCS-specific tools
+LONG_VERSION_PY = {}
+
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+    assert isinstance(commands, list)
+    p = None
+    for c in commands:
+        try:
+            # remember shell=False, so use git.cmd on windows, not just git
+            p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+                                 stderr=(subprocess.PIPE if hide_stderr
+                                         else None))
+            break
+        except EnvironmentError:
+            e = sys.exc_info()[1]
+            if e.errno == errno.ENOENT:
+                continue
+            if verbose:
+                print("unable to run %s" % args[0])
+                print(e)
+            return None
+    else:
+        if verbose:
+            print("unable to find command, tried %s" % (commands,))
+        return None
+    stdout = p.communicate()[0].strip()
+    if sys.version >= '3':
+        stdout = stdout.decode()
+    if p.returncode != 0:
+        if verbose:
+            print("unable to run %s (error)" % args[0])
+        return None
+    return stdout
+
+LONG_VERSION_PY['git'] = '''
+# This file helps to compute a version number in source trees obtained from
+# git-archive tarball (such as those provided by githubs download-from-tag
+# feature). Distribution tarballs (built by setup.py sdist) and build
+# directories (produced by setup.py build) will contain a much shorter file
+# that just contains the computed version number.
+
+# This file is released into the public domain. Generated by
+# versioneer-0.12 (https://github.com/warner/python-versioneer)
+
+# these strings will be replaced by git during git-archive
+git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
+git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
+
+# these strings are filled in when 'setup.py versioneer' creates _version.py
+tag_prefix = "%(TAG_PREFIX)s"
+parentdir_prefix = "%(PARENTDIR_PREFIX)s"
+versionfile_source = "%(VERSIONFILE_SOURCE)s"
+
+import os, sys, re, subprocess, errno
+
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+    assert isinstance(commands, list)
+    p = None
+    for c in commands:
+        try:
+            # remember shell=False, so use git.cmd on windows, not just git
+            p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+                                 stderr=(subprocess.PIPE if hide_stderr
+                                         else None))
+            break
+        except EnvironmentError:
+            e = sys.exc_info()[1]
+            if e.errno == errno.ENOENT:
+                continue
+            if verbose:
+                print("unable to run %%s" %% args[0])
+                print(e)
+            return None
+    else:
+        if verbose:
+            print("unable to find command, tried %%s" %% (commands,))
+        return None
+    stdout = p.communicate()[0].strip()
+    if sys.version >= '3':
+        stdout = stdout.decode()
+    if p.returncode != 0:
+        if verbose:
+            print("unable to run %%s (error)" %% args[0])
+        return None
+    return stdout
+
+
+def versions_from_parentdir(parentdir_prefix, root, verbose=False):
+    # Source tarballs conventionally unpack into a directory that includes
+    # both the project name and a version string.
+    dirname = os.path.basename(root)
+    if not dirname.startswith(parentdir_prefix):
+        if verbose:
+            print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
+                  (root, dirname, parentdir_prefix))
+        return None
+    return {"version": dirname[len(parentdir_prefix):], "full": ""}
+
+def git_get_keywords(versionfile_abs):
+    # the code embedded in _version.py can just fetch the value of these
+    # keywords. When used from setup.py, we don't want to import _version.py,
+    # so we do it with a regexp instead. This function is not used from
+    # _version.py.
+    keywords = {}
+    try:
+        f = open(versionfile_abs,"r")
+        for line in f.readlines():
+            if line.strip().startswith("git_refnames ="):
+                mo = re.search(r'=\s*"(.*)"', line)
+                if mo:
+                    keywords["refnames"] = mo.group(1)
+            if line.strip().startswith("git_full ="):
+                mo = re.search(r'=\s*"(.*)"', line)
+                if mo:
+                    keywords["full"] = mo.group(1)
+        f.close()
+    except EnvironmentError:
+        pass
+    return keywords
+
+def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
+    if not keywords:
+        return {} # keyword-finding function failed to find keywords
+    refnames = keywords["refnames"].strip()
+    if refnames.startswith("$Format"):
+        if verbose:
+            print("keywords are unexpanded, not using")
+        return {} # unexpanded, so not in an unpacked git-archive tarball
+    refs = set([r.strip() for r in refnames.strip("()").split(",")])
+    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
+    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
+    TAG = "tag: "
+    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+    if not tags:
+        # Either we're using git < 1.8.3, or there really are no tags. We use
+        # a heuristic: assume all version tags have a digit. The old git %%d
+        # expansion behaves like git log --decorate=short and strips out the
+        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
+        # between branches and tags. By ignoring refnames without digits, we
+        # filter out many common branch names like "release" and
+        # "stabilization", as well as "HEAD" and "master".
+        tags = set([r for r in refs if re.search(r'\d', r)])
+        if verbose:
+            print("discarding '%%s', no digits" %% ",".join(refs-tags))
+    if verbose:
+        print("likely tags: %%s" %% ",".join(sorted(tags)))
+    for ref in sorted(tags):
+        # sorting will prefer e.g. "2.0" over "2.0rc1"
+        if ref.startswith(tag_prefix):
+            r = ref[len(tag_prefix):]
+            if verbose:
+                print("picking %%s" %% r)
+            return { "version": r,
+                     "full": keywords["full"].strip() }
+    # no suitable tags, so we use the full revision id
+    if verbose:
+        print("no suitable tags, using full revision id")
+    return { "version": keywords["full"].strip(),
+             "full": keywords["full"].strip() }
+
+
+def git_versions_from_vcs(tag_prefix, root, verbose=False):
+    # this runs 'git' from the root of the source tree. This only gets called
+    # if the git-archive 'subst' keywords were *not* expanded, and
+    # _version.py hasn't already been rewritten with a short version string,
+    # meaning we're inside a checked out source tree.
+
+    if not os.path.exists(os.path.join(root, ".git")):
+        if verbose:
+            print("no .git in %%s" %% root)
+        return {}
+
+    GITS = ["git"]
+    if sys.platform == "win32":
+        GITS = ["git.cmd", "git.exe"]
+    stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
+                         cwd=root)
+    if stdout is None:
+        return {}
+    if not stdout.startswith(tag_prefix):
+        if verbose:
+            print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
+        return {}
+    tag = stdout[len(tag_prefix):]
+    stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+    if stdout is None:
+        return {}
+    full = stdout.strip()
+    if tag.endswith("-dirty"):
+        full += "-dirty"
+    return {"version": tag, "full": full}
+
+
+def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
+    # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
+    # __file__, we can work backwards from there to the root. Some
+    # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
+    # case we can only use expanded keywords.
+
+    keywords = { "refnames": git_refnames, "full": git_full }
+    ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
+    if ver:
+        return ver
+
+    try:
+        root = os.path.abspath(__file__)
+        # versionfile_source is the relative path from the top of the source
+        # tree (where the .git directory might live) to this file. Invert
+        # this to find the root from __file__.
+        for i in range(len(versionfile_source.split(os.sep))):
+            root = os.path.dirname(root)
+    except NameError:
+        return default
+
+    return (git_versions_from_vcs(tag_prefix, root, verbose)
+            or versions_from_parentdir(parentdir_prefix, root, verbose)
+            or default)
+'''
+
+def git_get_keywords(versionfile_abs):
+    # the code embedded in _version.py can just fetch the value of these
+    # keywords. When used from setup.py, we don't want to import _version.py,
+    # so we do it with a regexp instead. This function is not used from
+    # _version.py.
+    keywords = {}
+    try:
+        f = open(versionfile_abs,"r")
+        for line in f.readlines():
+            if line.strip().startswith("git_refnames ="):
+                mo = re.search(r'=\s*"(.*)"', line)
+                if mo:
+                    keywords["refnames"] = mo.group(1)
+            if line.strip().startswith("git_full ="):
+                mo = re.search(r'=\s*"(.*)"', line)
+                if mo:
+                    keywords["full"] = mo.group(1)
+        f.close()
+    except EnvironmentError:
+        pass
+    return keywords
+
+def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
+    if not keywords:
+        return {} # keyword-finding function failed to find keywords
+    refnames = keywords["refnames"].strip()
+    if refnames.startswith("$Format"):
+        if verbose:
+            print("keywords are unexpanded, not using")
+        return {} # unexpanded, so not in an unpacked git-archive tarball
+    refs = set([r.strip() for r in refnames.strip("()").split(",")])
+    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
+    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
+    TAG = "tag: "
+    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+    if not tags:
+        # Either we're using git < 1.8.3, or there really are no tags. We use
+        # a heuristic: assume all version tags have a digit. The old git %d
+        # expansion behaves like git log --decorate=short and strips out the
+        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
+        # between branches and tags. By ignoring refnames without digits, we
+        # filter out many common branch names like "release" and
+        # "stabilization", as well as "HEAD" and "master".
+        tags = set([r for r in refs if re.search(r'\d', r)])
+        if verbose:
+            print("discarding '%s', no digits" % ",".join(refs-tags))
+    if verbose:
+        print("likely tags: %s" % ",".join(sorted(tags)))
+    for ref in sorted(tags):
+        # sorting will prefer e.g. "2.0" over "2.0rc1"
+        if ref.startswith(tag_prefix):
+            r = ref[len(tag_prefix):]
+            if verbose:
+                print("picking %s" % r)
+            return { "version": r,
+                     "full": keywords["full"].strip() }
+    # no suitable tags, so we use the full revision id
+    if verbose:
+        print("no suitable tags, using full revision id")
+    return { "version": keywords["full"].strip(),
+             "full": keywords["full"].strip() }
+
+
+def git_versions_from_vcs(tag_prefix, root, verbose=False):
+    # this runs 'git' from the root of the source tree. This only gets called
+    # if the git-archive 'subst' keywords were *not* expanded, and
+    # _version.py hasn't already been rewritten with a short version string,
+    # meaning we're inside a checked out source tree.
+
+    if not os.path.exists(os.path.join(root, ".git")):
+        if verbose:
+            print("no .git in %s" % root)
+        return {}
+
+    GITS = ["git"]
+    if sys.platform == "win32":
+        GITS = ["git.cmd", "git.exe"]
+    stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
+                         cwd=root)
+    if stdout is None:
+        return {}
+    if not stdout.startswith(tag_prefix):
+        if verbose:
+            print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
+        return {}
+    tag = stdout[len(tag_prefix):]
+    stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+    if stdout is None:
+        return {}
+    full = stdout.strip()
+    if tag.endswith("-dirty"):
+        full += "-dirty"
+    return {"version": tag, "full": full}
+
+
+def do_vcs_install(manifest_in, versionfile_source, ipy):
+    GITS = ["git"]
+    if sys.platform == "win32":
+        GITS = ["git.cmd", "git.exe"]
+    files = [manifest_in, versionfile_source]
+    if ipy:
+        files.append(ipy)
+    try:
+        me = __file__
+        if me.endswith(".pyc") or me.endswith(".pyo"):
+            me = os.path.splitext(me)[0] + ".py"
+        versioneer_file = os.path.relpath(me)
+    except NameError:
+        versioneer_file = "versioneer.py"
+    files.append(versioneer_file)
+    present = False
+    try:
+        f = open(".gitattributes", "r")
+        for line in f.readlines():
+            if line.strip().startswith(versionfile_source):
+                if "export-subst" in line.strip().split()[1:]:
+                    present = True
+        f.close()
+    except EnvironmentError:
+        pass
+    if not present:
+        f = open(".gitattributes", "a+")
+        f.write("%s export-subst\n" % versionfile_source)
+        f.close()
+        files.append(".gitattributes")
+    run_command(GITS, ["add", "--"] + files)
+
+def versions_from_parentdir(parentdir_prefix, root, verbose=False):
+    # Source tarballs conventionally unpack into a directory that includes
+    # both the project name and a version string.
+    dirname = os.path.basename(root)
+    if not dirname.startswith(parentdir_prefix):
+        if verbose:
+            print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
+                  (root, dirname, parentdir_prefix))
+        return None
+    return {"version": dirname[len(parentdir_prefix):], "full": ""}
+
+SHORT_VERSION_PY = """
+# This file was generated by 'versioneer.py' (0.12) from
+# revision-control system data, or from the parent directory name of an
+# unpacked source archive. Distribution tarballs contain a pre-generated copy
+# of this file.
+
+version_version = '%(version)s'
+version_full = '%(full)s'
+def get_versions(default={}, verbose=False):
+    return {'version': version_version, 'full': version_full}
+
+"""
+
+DEFAULT = {"version": "unknown", "full": "unknown"}
+
+def versions_from_file(filename):
+    versions = {}
+    try:
+        with open(filename) as f:
+            for line in f.readlines():
+                mo = re.match("version_version = '([^']+)'", line)
+                if mo:
+                    versions["version"] = mo.group(1)
+                mo = re.match("version_full = '([^']+)'", line)
+                if mo:
+                    versions["full"] = mo.group(1)
+    except EnvironmentError:
+        return {}
+
+    return versions
+
+def write_to_version_file(filename, versions):
+    with open(filename, "w") as f:
+        f.write(SHORT_VERSION_PY % versions)
+
+    print("set %s to '%s'" % (filename, versions["version"]))
+
+
+def get_root():
+    try:
+        return os.path.dirname(os.path.abspath(__file__))
+    except NameError:
+        return os.path.dirname(os.path.abspath(sys.argv[0]))
+
+def vcs_function(vcs, suffix):
+    return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
+
+def get_versions(default=DEFAULT, verbose=False):
+    # returns dict with two keys: 'version' and 'full'
+    assert versionfile_source is not None, "please set versioneer.versionfile_source"
+    assert tag_prefix is not None, "please set versioneer.tag_prefix"
+    assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
+    assert VCS is not None, "please set versioneer.VCS"
+
+    # I am in versioneer.py, which must live at the top of the source tree,
+    # which we use to compute the root directory. py2exe/bbfreeze/non-CPython
+    # don't have __file__, in which case we fall back to sys.argv[0] (which
+    # ought to be the setup.py script). We prefer __file__ since that's more
+    # robust in cases where setup.py was invoked in some weird way (e.g. pip)
+    root = get_root()
+    versionfile_abs = os.path.join(root, versionfile_source)
+
+    # extract version from first of _version.py, VCS command (e.g. 'git
+    # describe'), parentdir. This is meant to work for developers using a
+    # source checkout, for users of a tarball created by 'setup.py sdist',
+    # and for users of a tarball/zipball created by 'git archive' or github's
+    # download-from-tag feature or the equivalent in other VCSes.
+
+    get_keywords_f = vcs_function(VCS, "get_keywords")
+    versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
+    if get_keywords_f and versions_from_keywords_f:
+        vcs_keywords = get_keywords_f(versionfile_abs)
+        ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
+        if ver:
+            if verbose: print("got version from expanded keyword %s" % ver)
+            return ver
+
+    ver = versions_from_file(versionfile_abs)
+    if ver:
+        if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
+        return ver
+
+    versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
+    if versions_from_vcs_f:
+        ver = versions_from_vcs_f(tag_prefix, root, verbose)
+        if ver:
+            if verbose: print("got version from VCS %s" % ver)
+            return ver
+
+    ver = versions_from_parentdir(parentdir_prefix, root, verbose)
+    if ver:
+        if verbose: print("got version from parentdir %s" % ver)
+        return ver
+
+    if verbose: print("got version from default %s" % default)
+    return default
+
+def get_version(verbose=False):
+    return get_versions(verbose=verbose)["version"]
+
+class cmd_version(Command):
+    description = "report generated version string"
+    user_options = []
+    boolean_options = []
+    def initialize_options(self):
+        pass
+    def finalize_options(self):
+        pass
+    def run(self):
+        ver = get_version(verbose=True)
+        print("Version is currently: %s" % ver)
+
+
+class cmd_build(_build):
+    def run(self):
+        versions = get_versions(verbose=True)
+        _build.run(self)
+        # now locate _version.py in the new build/ directory and replace it
+        # with an updated value
+        if versionfile_build:
+            target_versionfile = os.path.join(self.build_lib, versionfile_build)
+            print("UPDATING %s" % target_versionfile)
+            os.unlink(target_versionfile)
+            with open(target_versionfile, "w") as f:
+                f.write(SHORT_VERSION_PY % versions)
+
+if 'cx_Freeze' in sys.modules:  # cx_freeze enabled?
+    from cx_Freeze.dist import build_exe as _build_exe
+
+    class cmd_build_exe(_build_exe):
+        def run(self):
+            versions = get_versions(verbose=True)
+            target_versionfile = versionfile_source
+            print("UPDATING %s" % target_versionfile)
+            os.unlink(target_versionfile)
+            with open(target_versionfile, "w") as f:
+                f.write(SHORT_VERSION_PY % versions)
+
+            _build_exe.run(self)
+            os.unlink(target_versionfile)
+            with open(versionfile_source, "w") as f:
+                assert VCS is not None, "please set versioneer.VCS"
+                LONG = LONG_VERSION_PY[VCS]
+                f.write(LONG % {"DOLLAR": "$",
+                                "TAG_PREFIX": tag_prefix,
+                                "PARENTDIR_PREFIX": parentdir_prefix,
+                                "VERSIONFILE_SOURCE": versionfile_source,
+                                })
+
+class cmd_sdist(_sdist):
+    def run(self):
+        versions = get_versions(verbose=True)
+        self._versioneer_generated_versions = versions
+        # unless we update this, the command will keep using the old version
+        self.distribution.metadata.version = versions["version"]
+        return _sdist.run(self)
+
+    def make_release_tree(self, base_dir, files):
+        _sdist.make_release_tree(self, base_dir, files)
+        # now locate _version.py in the new base_dir directory (remembering
+        # that it may be a hardlink) and replace it with an updated value
+        target_versionfile = os.path.join(base_dir, versionfile_source)
+        print("UPDATING %s" % target_versionfile)
+        os.unlink(target_versionfile)
+        with open(target_versionfile, "w") as f:
+            f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
+
+INIT_PY_SNIPPET = """
+from ._version import get_versions
+__version__ = get_versions()['version']
+del get_versions
+"""
+
+class cmd_update_files(Command):
+    description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
+    user_options = []
+    boolean_options = []
+    def initialize_options(self):
+        pass
+    def finalize_options(self):
+        pass
+    def run(self):
+        print(" creating %s" % versionfile_source)
+        with open(versionfile_source, "w") as f:
+            assert VCS is not None, "please set versioneer.VCS"
+            LONG = LONG_VERSION_PY[VCS]
+            f.write(LONG % {"DOLLAR": "$",
+                            "TAG_PREFIX": tag_prefix,
+                            "PARENTDIR_PREFIX": parentdir_prefix,
+                            "VERSIONFILE_SOURCE": versionfile_source,
+                            })
+
+        ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
+        if os.path.exists(ipy):
+            try:
+                with open(ipy, "r") as f:
+                    old = f.read()
+            except EnvironmentError:
+                old = ""
+            if INIT_PY_SNIPPET not in old:
+                print(" appending to %s" % ipy)
+                with open(ipy, "a") as f:
+                    f.write(INIT_PY_SNIPPET)
+            else:
+                print(" %s unmodified" % ipy)
+        else:
+            print(" %s doesn't exist, ok" % ipy)
+            ipy = None
+
+        # Make sure both the top-level "versioneer.py" and versionfile_source
+        # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
+        # they'll be copied into source distributions. Pip won't be able to
+        # install the package without this.
+        manifest_in = os.path.join(get_root(), "MANIFEST.in")
+        simple_includes = set()
+        try:
+            with open(manifest_in, "r") as f:
+                for line in f:
+                    if line.startswith("include "):
+                        for include in line.split()[1:]:
+                            simple_includes.add(include)
+        except EnvironmentError:
+            pass
+        # That doesn't cover everything MANIFEST.in can do
+        # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
+        # it might give some false negatives. Appending redundant 'include'
+        # lines is safe, though.
+        if "versioneer.py" not in simple_includes:
+            print(" appending 'versioneer.py' to MANIFEST.in")
+            with open(manifest_in, "a") as f:
+                f.write("include versioneer.py\n")
+        else:
+            print(" 'versioneer.py' already in MANIFEST.in")
+        if versionfile_source not in simple_includes:
+            print(" appending versionfile_source ('%s') to MANIFEST.in" %
+                  versionfile_source)
+            with open(manifest_in, "a") as f:
+                f.write("include %s\n" % versionfile_source)
+        else:
+            print(" versionfile_source already in MANIFEST.in")
+
+        # Make VCS-specific changes. For git, this means creating/changing
+        # .gitattributes to mark _version.py for export-time keyword
+        # substitution.
+        do_vcs_install(manifest_in, versionfile_source, ipy)
+
+def get_cmdclass():
+    cmds = {'version': cmd_version,
+            'versioneer': cmd_update_files,
+            'build': cmd_build,
+            'sdist': cmd_sdist,
+            }
+    if 'cx_Freeze' in sys.modules:  # cx_freeze enabled?
+        cmds['build_exe'] = cmd_build_exe
+        del cmds['build']
+
+    return cmds

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/lmfit-py.git



More information about the debian-science-commits mailing list