[lmfit-py] 01/01: Imported Upstream version 0.9.1

Frédéric-Emmanuel Picca picca at moszumanska.debian.org
Sat Oct 31 09:10:31 UTC 2015


This is an automated email from the git hooks/post-receive script.

picca pushed a commit to annotated tag upstream/0.9.1
in repository lmfit-py.

commit d2096b64628fe27ed7cfe73c921173cfa148090e
Author: Picca Frédéric-Emmanuel <picca at debian.org>
Date:   Tue Oct 6 18:52:23 2015 +0200

    Imported Upstream version 0.9.1
---
 .gitattributes                                    |  20 +-
 .travis.yml                                       |   1 +
 Contributing.md                                   |  40 ++
 {tests/NIST_STRD => NIST_STRD}/Bennett5.dat       |   0
 {tests/NIST_STRD => NIST_STRD}/BoxBOD.dat         |   0
 {tests/NIST_STRD => NIST_STRD}/Chwirut1.dat       |   0
 {tests/NIST_STRD => NIST_STRD}/Chwirut2.dat       |   0
 {tests/NIST_STRD => NIST_STRD}/DanWood.dat        |   0
 {tests/NIST_STRD => NIST_STRD}/ENSO.dat           |   0
 {tests/NIST_STRD => NIST_STRD}/Eckerle4.dat       |   0
 {tests/NIST_STRD => NIST_STRD}/Gauss1.dat         |   0
 {tests/NIST_STRD => NIST_STRD}/Gauss2.dat         |   0
 {tests/NIST_STRD => NIST_STRD}/Gauss3.dat         |   0
 {tests/NIST_STRD => NIST_STRD}/Hahn1.dat          |   0
 {tests/NIST_STRD => NIST_STRD}/Kirby2.dat         |   0
 {tests/NIST_STRD => NIST_STRD}/Lanczos1.dat       |   0
 {tests/NIST_STRD => NIST_STRD}/Lanczos2.dat       |   0
 {tests/NIST_STRD => NIST_STRD}/Lanczos3.dat       |   0
 {tests/NIST_STRD => NIST_STRD}/MGH09.dat          |   0
 {tests/NIST_STRD => NIST_STRD}/MGH10.dat          |   0
 {tests/NIST_STRD => NIST_STRD}/MGH17.dat          |   0
 {tests/NIST_STRD => NIST_STRD}/Misra1a.dat        |   0
 {tests/NIST_STRD => NIST_STRD}/Misra1b.dat        |   0
 {tests/NIST_STRD => NIST_STRD}/Misra1c.dat        |   0
 {tests/NIST_STRD => NIST_STRD}/Misra1d.dat        |   0
 {tests/NIST_STRD => NIST_STRD}/Models             |   0
 {tests/NIST_STRD => NIST_STRD}/Nelson.dat         |   0
 {tests/NIST_STRD => NIST_STRD}/Rat42.dat          |   0
 {tests/NIST_STRD => NIST_STRD}/Rat43.dat          |   0
 {tests/NIST_STRD => NIST_STRD}/Roszman1.dat       |   0
 {tests/NIST_STRD => NIST_STRD}/Thurber.dat        |   0
 README.rst                                        |  19 +
 THANKS.txt                                        |  48 +--
 doc/_templates/indexsidebar.html                  |   5 +-
 doc/builtin_models.rst                            |  41 +-
 doc/confidence.rst                                |  80 ++--
 doc/contents.rst                                  |   1 +
 doc/faq.rst                                       |  63 ++-
 doc/fitting.rst                                   | 501 ++++++++++++++--------
 doc/index.rst                                     |  33 +-
 doc/installation.rst                              |  12 +-
 doc/intro.rst                                     |  30 +-
 doc/model.rst                                     | 188 ++++++--
 doc/parameters.rst                                |  54 +--
 doc/sphinx/theme/lmfitdoc/layout.html             |  31 +-
 doc/support.rst                                   |   2 +
 doc/whatsnew.rst                                  |  97 +++++
 {tests => examples}/NISTModels.py                 |   2 +-
 examples/confidence_interval.py                   |  15 +-
 examples/confidence_interval2.py                  |  14 +-
 examples/doc_basic.py                             |   2 +-
 examples/doc_basic_valuesdict.py                  |   2 +-
 examples/doc_confidence1.py                       |   9 +-
 examples/doc_confidence2.py                       |  27 +-
 examples/doc_model1.py                            |  50 +--
 examples/doc_model2.py                            |  62 +--
 examples/doc_withreport.py                        |   6 +-
 examples/example_covar.py                         |   8 +-
 examples/example_derivfunc.py                     |  18 +-
 examples/example_lbfgsb.py                        |  11 +-
 examples/fit_NIST_leastsq.py                      | 148 -------
 examples/{test_NIST_Strd.py => fit_NIST_lmfit.py} |  13 +-
 examples/fit_multi_datasets.py                    |   6 +-
 examples/fit_with_algebraic_constraint.py         |   5 +-
 examples/fit_with_bounds.py                       |   4 +-
 examples/fit_with_inequality.py                   |  45 ++
 examples/lmfit_and_emcee.py                       |   6 +-
 lmfit/__init__.py                                 |   1 -
 lmfit/_version.py                                 | 366 ++++++++--------
 lmfit/asteval.py                                  |  10 +
 lmfit/astutils.py                                 |   2 +-
 lmfit/confidence.py                               | 118 +++--
 lmfit/lineshapes.py                               |  15 +-
 lmfit/minimizer.py                                | 496 ++++++++++-----------
 lmfit/model.py                                    | 198 ++++++---
 lmfit/models.py                                   |  24 +-
 lmfit/parameter.py                                | 139 +++++-
 lmfit/ui/__init__.py                              |  14 +-
 lmfit/ui/basefitter.py                            |   2 +-
 lmfit/ui/ipy_fitter.py                            |  48 ++-
 tests/NISTModels.py                               |   2 +-
 tests/test_NIST_Strd.py                           |   5 +-
 tests/test_algebraic_constraint.py                |  46 +-
 tests/test_algebraic_constraint2.py               |  23 +-
 tests/test_basicfit.py                            |   4 +-
 tests/test_bounded_jacobian.py                    |  43 ++
 tests/test_bounds.py                              |   6 +-
 tests/test_confidence.py                          |  44 ++
 tests/test_default_kws.py                         |   3 +-
 tests/test_itercb.py                              |  29 ++
 tests/test_model.py                               |  39 ++
 tests/test_multidatasets.py                       |   2 -
 tests/test_nose.py                                | 108 +++--
 tests/test_params_set.py                          |  41 ++
 tests/test_stepmodel.py                           |   3 -
 95 files changed, 2179 insertions(+), 1371 deletions(-)

diff --git a/.gitattributes b/.gitattributes
index 8bde2c9..47dc601 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1 +1,19 @@
-lmfit/_version.py export-subst
+lmfit/_version.py export-subst
+
+# Auto detect text files and perform LF normalization
+* text=auto eol=lf
+
+*.jpg -text
+*.png -text
+
+# Standard to msysgit
+*.doc	 diff=astextplain
+*.DOC	 diff=astextplain
+*.docx diff=astextplain
+*.DOCX diff=astextplain
+*.dot  diff=astextplain
+*.DOT  diff=astextplain
+*.pdf  diff=astextplain
+*.PDF	 diff=astextplain
+*.rtf	 diff=astextplain
+*.RTF	 diff=astextplain
diff --git a/.travis.yml b/.travis.yml
index bbdfbf2..08b78f1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,6 +5,7 @@ language: python
 python:
     - 2.7
     - 3.3
+    - 3.4
 
 before_install:
     - wget http://repo.continuum.io/miniconda/Miniconda3-3.5.5-Linux-x86_64.sh -O miniconda.sh
diff --git a/Contributing.md b/Contributing.md
new file mode 100644
index 0000000..14000a8
--- /dev/null
+++ b/Contributing.md
@@ -0,0 +1,40 @@
+
+## Contributing Code
+
+We'd love your help, either as ideas, documentation, or code.  If you have a
+new module or want to add or fix existing code, please do.  We try to follow
+Python's PEP-8 closely.  We really want good, numpy-doc style docstrings,
+usable off-line documentation, and good unit tests.  A good contribution
+includes all of these.
+
+## Using the Mailing List and Github Issues
+
+If you have questions, comments, or suggestions for lmfit, please use the
+Mailing List, https://groups.google.com/group/lmfit-py.  This provides an
+on-line conversation that is archived and can be searched easily.
+
+If you find a bug with the code or documentation, use the Github Issues,
+https://github.com/lmfit/lmfit-py/issues to submit a bug report.  If you have
+an idea for how to solve the problem and are familiar with python and github,
+submitting a Pull Request on github.com would be greatly appreciated.
+
+If you are at all unsure whether to use the mailing list or the Issue tracker,
+please start a conversation on the mailing list.
+
+Starting the conversation on the mailing list with "How do I do this?" or "Why
+didn't this work?" instead of "This doesn't work" is preferred, and will
+better help others with similar questions.  No posting about fitting data is
+inappropriate for the mailing list, but many questions are not Issues.  We
+will try our best to engage in all discussions, but we may simply close github
+Issues that are actually questions.
+
+## Using IPython Notebooks to show examples
+
+IPython Notebooks are very useful for showing code snippets and outcomes, and
+are very good way to demonstrate a question or raise an issue.  Please know
+that we will *read* your notebook, but will probably not run it off-line.
+Also, please do not expect that we know much about your problem domain, or
+will read your notebook in so much detail as to fully understand what you're
+trying to do without adequate explanation.  State the problem, including what
+you result you think you should have gotten, and definitely include a
+conclusion.
diff --git a/tests/NIST_STRD/Bennett5.dat b/NIST_STRD/Bennett5.dat
similarity index 100%
rename from tests/NIST_STRD/Bennett5.dat
rename to NIST_STRD/Bennett5.dat
diff --git a/tests/NIST_STRD/BoxBOD.dat b/NIST_STRD/BoxBOD.dat
similarity index 100%
rename from tests/NIST_STRD/BoxBOD.dat
rename to NIST_STRD/BoxBOD.dat
diff --git a/tests/NIST_STRD/Chwirut1.dat b/NIST_STRD/Chwirut1.dat
similarity index 100%
rename from tests/NIST_STRD/Chwirut1.dat
rename to NIST_STRD/Chwirut1.dat
diff --git a/tests/NIST_STRD/Chwirut2.dat b/NIST_STRD/Chwirut2.dat
similarity index 100%
rename from tests/NIST_STRD/Chwirut2.dat
rename to NIST_STRD/Chwirut2.dat
diff --git a/tests/NIST_STRD/DanWood.dat b/NIST_STRD/DanWood.dat
similarity index 100%
rename from tests/NIST_STRD/DanWood.dat
rename to NIST_STRD/DanWood.dat
diff --git a/tests/NIST_STRD/ENSO.dat b/NIST_STRD/ENSO.dat
similarity index 100%
rename from tests/NIST_STRD/ENSO.dat
rename to NIST_STRD/ENSO.dat
diff --git a/tests/NIST_STRD/Eckerle4.dat b/NIST_STRD/Eckerle4.dat
similarity index 100%
rename from tests/NIST_STRD/Eckerle4.dat
rename to NIST_STRD/Eckerle4.dat
diff --git a/tests/NIST_STRD/Gauss1.dat b/NIST_STRD/Gauss1.dat
similarity index 100%
rename from tests/NIST_STRD/Gauss1.dat
rename to NIST_STRD/Gauss1.dat
diff --git a/tests/NIST_STRD/Gauss2.dat b/NIST_STRD/Gauss2.dat
similarity index 100%
rename from tests/NIST_STRD/Gauss2.dat
rename to NIST_STRD/Gauss2.dat
diff --git a/tests/NIST_STRD/Gauss3.dat b/NIST_STRD/Gauss3.dat
similarity index 100%
rename from tests/NIST_STRD/Gauss3.dat
rename to NIST_STRD/Gauss3.dat
diff --git a/tests/NIST_STRD/Hahn1.dat b/NIST_STRD/Hahn1.dat
similarity index 100%
rename from tests/NIST_STRD/Hahn1.dat
rename to NIST_STRD/Hahn1.dat
diff --git a/tests/NIST_STRD/Kirby2.dat b/NIST_STRD/Kirby2.dat
similarity index 100%
rename from tests/NIST_STRD/Kirby2.dat
rename to NIST_STRD/Kirby2.dat
diff --git a/tests/NIST_STRD/Lanczos1.dat b/NIST_STRD/Lanczos1.dat
similarity index 100%
rename from tests/NIST_STRD/Lanczos1.dat
rename to NIST_STRD/Lanczos1.dat
diff --git a/tests/NIST_STRD/Lanczos2.dat b/NIST_STRD/Lanczos2.dat
similarity index 100%
rename from tests/NIST_STRD/Lanczos2.dat
rename to NIST_STRD/Lanczos2.dat
diff --git a/tests/NIST_STRD/Lanczos3.dat b/NIST_STRD/Lanczos3.dat
similarity index 100%
rename from tests/NIST_STRD/Lanczos3.dat
rename to NIST_STRD/Lanczos3.dat
diff --git a/tests/NIST_STRD/MGH09.dat b/NIST_STRD/MGH09.dat
similarity index 100%
rename from tests/NIST_STRD/MGH09.dat
rename to NIST_STRD/MGH09.dat
diff --git a/tests/NIST_STRD/MGH10.dat b/NIST_STRD/MGH10.dat
similarity index 100%
rename from tests/NIST_STRD/MGH10.dat
rename to NIST_STRD/MGH10.dat
diff --git a/tests/NIST_STRD/MGH17.dat b/NIST_STRD/MGH17.dat
similarity index 100%
rename from tests/NIST_STRD/MGH17.dat
rename to NIST_STRD/MGH17.dat
diff --git a/tests/NIST_STRD/Misra1a.dat b/NIST_STRD/Misra1a.dat
similarity index 100%
rename from tests/NIST_STRD/Misra1a.dat
rename to NIST_STRD/Misra1a.dat
diff --git a/tests/NIST_STRD/Misra1b.dat b/NIST_STRD/Misra1b.dat
similarity index 100%
rename from tests/NIST_STRD/Misra1b.dat
rename to NIST_STRD/Misra1b.dat
diff --git a/tests/NIST_STRD/Misra1c.dat b/NIST_STRD/Misra1c.dat
similarity index 100%
rename from tests/NIST_STRD/Misra1c.dat
rename to NIST_STRD/Misra1c.dat
diff --git a/tests/NIST_STRD/Misra1d.dat b/NIST_STRD/Misra1d.dat
similarity index 100%
rename from tests/NIST_STRD/Misra1d.dat
rename to NIST_STRD/Misra1d.dat
diff --git a/tests/NIST_STRD/Models b/NIST_STRD/Models
similarity index 100%
rename from tests/NIST_STRD/Models
rename to NIST_STRD/Models
diff --git a/tests/NIST_STRD/Nelson.dat b/NIST_STRD/Nelson.dat
similarity index 100%
rename from tests/NIST_STRD/Nelson.dat
rename to NIST_STRD/Nelson.dat
diff --git a/tests/NIST_STRD/Rat42.dat b/NIST_STRD/Rat42.dat
similarity index 100%
rename from tests/NIST_STRD/Rat42.dat
rename to NIST_STRD/Rat42.dat
diff --git a/tests/NIST_STRD/Rat43.dat b/NIST_STRD/Rat43.dat
similarity index 100%
rename from tests/NIST_STRD/Rat43.dat
rename to NIST_STRD/Rat43.dat
diff --git a/tests/NIST_STRD/Roszman1.dat b/NIST_STRD/Roszman1.dat
similarity index 100%
rename from tests/NIST_STRD/Roszman1.dat
rename to NIST_STRD/Roszman1.dat
diff --git a/tests/NIST_STRD/Thurber.dat b/NIST_STRD/Thurber.dat
similarity index 100%
rename from tests/NIST_STRD/Thurber.dat
rename to NIST_STRD/Thurber.dat
diff --git a/README.rst b/README.rst
index fb37334..3169c60 100644
--- a/README.rst
+++ b/README.rst
@@ -7,6 +7,25 @@ LMfit-py
 .. image:: https://zenodo.org/badge/doi/10.5281/zenodo.11813.png
    :target: http://dx.doi.org/10.5281/zenodo.11813
 
+
+Overview
+---------
+
+LMfit-py provides a Least-Squares Minimization routine and class with a
+simple, flexible approach to parameterizing a model for fitting to data.
+ 
+LMFIT is a pure python package, and so easy to install from source or witn
+`pip install lmfit`. 
+
+For questions, comments, and suggestions, please use the LMFIt mailing
+list, https://groups.google.com/group/lmfit-py.  Using the bug tracking
+software in Github Issues is encouraged for known problems and bug reports.
+Please read Contributing.md before creating an Issue.
+
+
+Parameters and Fitting
+-------------------------
+
 LMfit-py provides a Least-Squares Minimization routine and class
 with a simple, flexible approach to parameterizing a model for
 fitting to data.  Named Parameters can be held fixed or freely
diff --git a/THANKS.txt b/THANKS.txt
index 4436a80..83b6b67 100644
--- a/THANKS.txt
+++ b/THANKS.txt
@@ -1,24 +1,24 @@
-Many people have contributed to lmfit.
-
-Matthew Newville wrote the original version and maintains the project.
-Till Stensitzki wrote the improved estimates of confidence intervals, and
-    contributed many tests, bug fixes, and documentation.
-Daniel B. Allan wrote much of the high level Model code, and many
-    improvements to the testing and documentation.
-Antonino Ingargiola wrote much of the high level Model code and provided
-    many bug fixes.
-J. J. Helmus wrote the MINUT bounds for leastsq, originally in
-    leastsqbounds.py, and ported to lmfit.
-E. O. Le Bigot wrote the uncertainties package, a version of which is used
-    by lmfit.
-Michal Rawlik added plotting capabilities for Models.
-A. R. J. Nelson added differential_evolution, and greatly improved the code
-    in the docstrings.
-
-Additional patches, bug fixes, and suggestions have come from Christoph
-    Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau, nmearl,
-    Gustavo Pasquevich, Clemens Prescher, LiCode, and Ben Gamari.
-
-The lmfit code obviously depends on, and owes a very large debt to the code
-in scipy.optimize.  Several discussions on the scipy-user and lmfit mailing
-lists have also led to improvements in this code.
+Many people have contributed to lmfit.
+
+Matthew Newville wrote the original version and maintains the project.
+Till Stensitzki wrote the improved estimates of confidence intervals, and
+    contributed many tests, bug fixes, and documentation.
+Daniel B. Allan wrote much of the high level Model code, and many
+    improvements to the testing and documentation.
+Antonino Ingargiola wrote much of the high level Model code and provided
+    many bug fixes.
+J. J. Helmus wrote the MINUT bounds for leastsq, originally in
+    leastsqbounds.py, and ported to lmfit.
+E. O. Le Bigot wrote the uncertainties package, a version of which is used
+    by lmfit.
+Michal Rawlik added plotting capabilities for Models.
+A. R. J. Nelson added differential_evolution, and greatly improved the code
+    in the docstrings.
+
+Additional patches, bug fixes, and suggestions have come from Christoph
+    Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau, nmearl,
+    Gustavo Pasquevich, Clemens Prescher, LiCode, and Ben Gamari.
+
+The lmfit code obviously depends on, and owes a very large debt to the code
+in scipy.optimize.  Several discussions on the scipy-user and lmfit mailing
+lists have also led to improvements in this code.
diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html
index 5c488b4..ceb1a92 100644
--- a/doc/_templates/indexsidebar.html
+++ b/doc/_templates/indexsidebar.html
@@ -8,8 +8,9 @@
 
 <h3>Questions?</h3>
 
-  <a href="support.html"> Getting Help</a> <br>
-  <a href="faq.html"> Frequently Asked Questions</a>
+  <a href="faq.html">Frequently Asked Questions</a><br>
+  <a href="https://groups.google.com/group/lmfit-py">Mailing List</a><br>
+  <a href="support.html">Getting Help</a><br>
 
 <h3>Off-line Documentation</h3>
 
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index 9b3c02c..9a16a98 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -65,8 +65,8 @@ parameter ``fwhm`` is included.
   f(x; A, \mu, \sigma) = \frac{A}{\sigma\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]}
 
 where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
-:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The Full-Width at
-Half-Maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately
+:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The full width at
+half maximum is :math:`2\sigma\sqrt{2\ln{2}}`, approximately
 :math:`2.3548\sigma`
 
 
@@ -85,8 +85,8 @@ parameter ``fwhm`` is included.
   f(x; A, \mu, \sigma) = \frac{A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
 
 where the parameter ``amplitude`` corresponds to :math:`A`, ``center`` to
-:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The Full-Width at
-Half-Maximum is :math:`2\sigma`.
+:math:`\mu`, and ``sigma`` to :math:`\sigma`.  The full width at
+half maximum is :math:`2\sigma`.
 
 
 :class:`VoigtModel`
@@ -132,18 +132,20 @@ the full width at half maximum is approximately :math:`3.6013\sigma`.
 a model based on a `pseudo-Voigt distribution function
 <http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation>`_,
 which is a weighted sum of a Gaussian and Lorentzian distribution functions
-with the same values for ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
-and ``sigma`` (:math:`\sigma`), and a parameter ``fraction`` (:math:`\alpha`)
-in
+with that share values for ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
+and full width at half maximum (and so have  constrained values of
+``sigma`` (:math:`\sigma`).  A parameter ``fraction`` (:math:`\alpha`)
+controls the relative weight of the Gaussian and Lorentzian components,
+giving the full definition of
 
 .. math::
 
-  f(x; A, \mu, \sigma, \alpha) = (1-\alpha)\frac{A}{\pi}
-  \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big] + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
+  f(x; A, \mu, \sigma, \alpha) = \frac{(1-\alpha)A}{\sigma_g\sqrt{2\pi}} e^{[{-{(x-\mu)^2}/{{2\sigma_g}^2}}]}
+ + \frac{\alpha A}{\pi} \big[\frac{\sigma}{(x - \mu)^2 + \sigma^2}\big]
 
-
-The :meth:`guess` function always gives a starting
-value for ``fraction`` of 0.5
+where :math:`\sigma_g = {\sigma}/{\sqrt{2\ln{2}}}` so that the full width
+at half maximum of each component and of the sum is :math:`2\sigma`. The
+:meth:`guess` function always sets the starting value for ``fraction`` at 0.5.
 
 :class:`Pearson7Model`
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -241,7 +243,7 @@ It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
 .. math::
 
     f(x; A, \mu, \sigma, \gamma) = \frac{A\gamma}{2}
-    \exp\bigl[\gamma({\mu - x  + \sigma^2/2})\bigr]
+    \exp\bigl[\gamma({\mu - x  + \gamma\sigma^2/2})\bigr]
     {\operatorname{erfc}}\Bigl(\frac{\mu + \gamma\sigma^2 - x}{\sqrt{2}\sigma}\Bigr)
 
 
@@ -262,7 +264,7 @@ It has the usual parameters ``amplitude`` (:math:`A`), ``center`` (:math:`\mu`)
     f(x; A, \mu, \sigma, \gamma) = \frac{A}{\sigma\sqrt{2\pi}}
   e^{[{-{(x-\mu)^2}/{{2\sigma}^2}}]} \Bigl\{ 1 +
       {\operatorname{erf}}\bigl[
-         \frac{\gamma(x-\mu)}{\sigma\sqrt{2\pi}}
+         \frac{\gamma(x-\mu)}{\sigma\sqrt{2}}
      \bigr] \Bigr\}
 
 
@@ -474,18 +476,19 @@ mathematical constraints as discussed in :ref:`constraints_chapter`.
 :class:`ExpressionModel`
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. class:: ExpressionModel(expr, independent_vars=None, init_script=None, missing=None, prefix='', name=None, **kws)
+.. class:: ExpressionModel(expr, independent_vars=None, init_script=None, **kws)
 
     A model using the user-supplied mathematical expression, which can be nearly any valid Python expresion.
 
     :param expr: expression use to build model
     :type expr: string
+    :param independent_vars: list of argument names in expression that are independent variables.
+    :type independent_vars: ``None`` (default) or list of strings for independent variables.
     :param init_script: python script to run before parsing and evaluating expression.
     :type init_script: ``None`` (default) or string
-    :param independent_vars: list of argument names to ``func`` that are independent variables.
-    :type independent_vars: ``None`` (default) or list of strings for independent variables.
 
-with other parameters passed to :class:`model.Model`.
+with other parameters passed to :class:`model.Model`, with the notable
+exception that :class:`ExpressionModel` does **not** support the `prefix` argument.
 
 Since the point of this model is that an arbitrary expression will be
 supplied, the determination of what are the parameter names for the model
@@ -506,7 +509,7 @@ For example, if one creates an :class:`ExpressionModel` as::
 
 The name `exp` will be recognized as the exponent function, so the model
 will be interpreted to have parameters named `off`, `amp`, `x0` and
-`phase`, and `x` will be assumed to be the sole independent variable.
+`phase`. In addition, `x` will be assumed to be the sole independent variable.
 In general, there is no obvious way to set default parameter values or
 parameter hints for bounds, so this will have to be handled explicitly.
 
diff --git a/doc/confidence.rst b/doc/confidence.rst
index 6c0cdab..44cc664 100644
--- a/doc/confidence.rst
+++ b/doc/confidence.rst
@@ -44,8 +44,8 @@ First we create an example problem::
     >>> import numpy as np
     >>> x = np.linspace(0.3,10,100)
     >>> y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
-    >>> p = lmfit.Parameters()
-    >>> p.add_many(('a', 0.1), ('b', 1))
+    >>> pars = lmfit.Parameters()
+    >>> pars.add_many(('a', 0.1), ('b', 1))
     >>> def residual(p):
     ...    a = p['a'].value
     ...    b = p['b'].value
@@ -57,8 +57,9 @@ that the automated estimate of the standard errors can be used as a
 starting point::
 
 
-    >>> mi = lmfit.minimize(residual, p)
-    >>> lmfit.printfuncs.report_fit(mi.params)
+    >>> mini = lmfit.Minimizer(residual, pars)
+    >>> result = mini.minimize()
+    >>> print(lmfit.fit_report(result.params))
     [Variables]]
         a:   0.09943895 +/- 0.000193 (0.19%) (init= 0.1)
         b:   1.98476945 +/- 0.012226 (0.62%) (init= 1)
@@ -68,7 +69,7 @@ starting point::
 Now it is just a simple function call to calculate the confidence
 intervals::
 
-    >>> ci = lmfit.conf_interval(mi)
+    >>> ci = lmfit.conf_interval(mini, result)
     >>> lmfit.printfuncs.report_ci(ci)
          99.70%    95.00%    67.40%     0.00%    67.40%    95.00%    99.70%
     a   0.09886   0.09905   0.09925   0.09944   0.09963   0.09982   0.10003
@@ -92,26 +93,12 @@ covariance can lead to misleading result -- two decaying exponentials.  In
 fact such a problem is particularly hard for the Levenberg-Marquardt
 method, so we fitst estimate the results using the slower but robust
 Nelder-Mead  method, and *then* use Levenberg-Marquardt to estimate the
-uncertainties and correlations::
+uncertainties and correlations
 
 
-    >>> x = np.linspace(1, 10, 250)
-    >>> np.random.seed(0)
-    >>> y = 3.0*np.exp(-x/2) -5.0*np.exp(-(x-0.1)/10.) + 0.1*np.random.randn(len(x))
-    >>>
-    >>> p = lmfit.Parameters()
-    >>> p.add_many(('a1', 4.), ('a2', 4.), ('t1', 3.), ('t2', 3.))
-    >>>
-    >>> def residual(p):
-    ...    v = p.valuesdict()
-    ...    return v['a1']*np.exp(-x/v['t1']) + v['a2']*np.exp(-(x-0.1)/v['t2'])-y
-    >>>
-    >>> # first solve with Nelder-Mead
-    >>> mi = lmfit.minimize(residual, p, method='Nelder')
-    >>> # then solve with Levenberg-Marquardt
-    >>> mi = lmfit.minimize(residual, p)
-    >>>
-    >>> lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
+.. literalinclude:: ../examples/doc_confidence2.py
+
+which will report::
 
     [[Variables]]
         a1:   2.98622120 +/- 0.148671 (4.98%) (init= 2.986237)
@@ -120,39 +107,25 @@ uncertainties and correlations::
         t2:   11.8240350 +/- 0.463164 (3.92%) (init= 11.82408)
     [[Correlations]] (unreported correlations are <  0.500)
         C(a2, t2)                    =  0.987
-
-
-Again we call :func:`conf_interval`, this time with tracing and only for 1-
-and 2 :math:`\sigma`::
-
-    >>> ci, trace = lmfit.conf_interval(mi, sigmas=[0.68,0.95], trace=True, verbose=False)
-    >>> lmfit.printfuncs.report_ci(ci)
-
+        C(a2, t1)                    = -0.925
+        C(t1, t2)                    = -0.881
+        C(a1, t1)                    = -0.599
           95.00%    68.00%     0.00%    68.00%    95.00%
     a1   2.71850   2.84525   2.98622   3.14874   3.34076
-    a2  -4.63180  -4.46663  -4.35429  -4.22883  -4.14178
-    t2  10.82699  11.33865  11.78219  12.28195  12.71094
-    t1   1.08014   1.18566   1.38044   1.45566   1.62579
-
-
-Comparing these two different estimates, we see that the estimate for `a1`
-is reasonably well approximated from the covariance matrix, but the
-estimates for `a2` and especially for `t1`, and `t2` are very asymmetric
-and that going from 1 :math:`\sigma` (68% confidence) to 2 :math:`\sigma`
-(95% confidence) is not very predictable.
+    a2  -4.63180  -4.46663  -4.33526  -4.22883  -4.14178
+    t2  10.82699  11.33865  11.82404  12.28195  12.71094
+    t1   1.08014   1.18566   1.30994   1.45566   1.62579
 
-Now let's plot a confidence region::
 
-    >>> import matplotlib.pylab as plt
-    >>> x, y, grid = lmfit.conf_interval2d(mi,'a1','t2',30,30)
-    >>> plt.contourf(x, y, grid, np.linspace(0,1,11))
-    >>> plt.xlabel('a1')
-    >>> plt.colorbar()
-    >>> plt.ylabel('t2')
-    >>> plt.show()
+Again we called :func:`conf_interval`, this time with tracing and only for
+1- and 2 :math:`\sigma`.  Comparing these two different estimates, we see
+that the estimate for `a1` is reasonably well approximated from the
+covariance matrix, but the estimates for `a2` and especially for `t1`, and
+`t2` are very asymmetric and that going from 1 :math:`\sigma` (68%
+confidence) to 2 :math:`\sigma` (95% confidence) is not very predictable.
 
-which shows the figure on the left below for ``a1`` and ``t2``, and for
-``a2`` and ``t2`` on the right:
+Let plots mad of the confidence region are shown the figure on the left
+below for ``a1`` and ``t2``, and for ``a2`` and ``t2`` on the right:
 
 .. _figC1:
 
@@ -169,8 +142,9 @@ assumed by the approach using the covariance matrix.
 The trace returned as the optional second argument from
 :func:`conf_interval` contains a dictionary for each variable parameter.
 The values are dictionaries with arrays of values for each variable, and an
-array of corresponding probabilities for the corresponding cumulative variables.  This
-can be used to show the dependence between two parameters::
+array of corresponding probabilities for the corresponding cumulative
+variables.  This can be used to show the dependence between two
+parameters::
 
     >>> x, y, prob = trace['a1']['a1'], trace['a1']['t2'],trace['a1']['prob']
     >>> x2, y2, prob2 = trace['t2']['t2'], trace['t2']['a1'],trace['t2']['prob']
diff --git a/doc/contents.rst b/doc/contents.rst
index dab58f3..46c61b2 100644
--- a/doc/contents.rst
+++ b/doc/contents.rst
@@ -6,6 +6,7 @@ Contents
 
    intro
    installation
+   whatsnew
    support
    faq
    parameters
diff --git a/doc/faq.rst b/doc/faq.rst
index cf8f563..f7f9aad 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -1,29 +1,76 @@
+.. _faq_chapter:
+
 ====================================
 Frequently Asked Questions
 ====================================
 
 A list of common questions.
 
+What's the best way to ask for help or submit a bug report?
+================================================================
+
+See :ref:`support_chapter`.
+
+
+Why did my script break when upgrading from lmfit 0.8.3 to 0.9.0?
+====================================================================
+
+See :ref:`whatsnew_090_label`
+
+
+I get import errors from IPython
+==============================================================
+
+If you see something like::
+
+        from IPython.html.widgets import Dropdown
+
+    ImportError: No module named 'widgets'
+
+then you need to install the ipywidgets package.   Try 'pip install ipywidgets'.
+
+
+
+
 How can I fit multi-dimensional data?
 ========================================
 
-The fitting routines except data arrays that are 1 dimensional and double
+The fitting routines accept data arrays that are 1 dimensional and double
 precision.  So you need to convert the data and model (or the value
-returned by the objective function) to be one dimensional by using
-numpy's :meth:`numpy.ndarray.flatten`, for example::
+returned by the objective function) to be one dimensional.  A simple way to
+do this is to use numpy's :meth:`numpy.ndarray.flatten`, for example::
 
     def residual(params, x, data=None):
         ....
         resid = calculate_multidim_residual()
         return resid.flatten()
 
+How can I fit multiple data sets?
+========================================
+
+As above, the fitting routines accept data arrays that are 1 dimensional and double
+precision.  So you need to convert the sets of data and models (or the value
+returned by the objective function) to be one dimensional.  A simple way to
+do this is to use numpy's :meth:`numpy.concatenate`.  As an example, here
+is a residual function to simultaneously fit two lines to two different
+arrays.  As a bonus, the two lines share the 'offset' parameter:
+
+    def fit_function(params, x=None, dat1=None, dat2=None):
+        model1 = params['offset'].value + x * params['slope1'].value
+        model2 = params['offset'].value + x * params['slope2'].value
+
+	resid1 = dat1 - model1
+        resid2 = dat2 - model2
+        return numpy.concatenate((resid1, resid2))
+
+
 
 How can I fit complex data?
 ===================================
 
 As with working with multidimensional data, you need to convert your data
-and model (or the value returned by the objective function) to be real.
-One way to do this would be to use a function like this::
+and model (or the value returned by the objective function) to be double precision
+floating point numbers. One way to do this would be to use a function like this::
 
     def realimag(array):
         return np.array([(x.real, x.imag) for x in array]).flatten()
@@ -45,3 +92,9 @@ Basically, no.  None of the minimizers in lmfit support integer
 programming.  They all (I think) assume that they can make a very small
 change to a floating point value for a parameters value and see a change in
 the value to be minimized.
+
+
+How should I cite LMFIT?
+==================================
+
+See http://dx.doi.org/10.5281/zenodo.11813
diff --git a/doc/fitting.rst b/doc/fitting.rst
index 2db7d02..ca72339 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -7,18 +7,24 @@ Performing Fits, Analyzing Outputs
 As shown in the previous chapter, a simple fit can be performed with the
 :func:`minimize` function.  For more sophisticated modeling, the
 :class:`Minimizer` class can be used to gain a bit more control, especially
-when using complicated constraints.
+when using complicated constraints or comparing results from related fits.
+
+
+.. warning::
+
+  Upgrading scripts from version 0.8.3 to 0.9.0?  See  :ref:`whatsnew_090_label`
 
 
 The :func:`minimize` function
 ===============================
 
-The minimize function takes a objective function (the function that
-calculates the array to be minimized), a :class:`Parameters` ordered
-dictionary, and several optional arguments.  See :ref:`fit-func-label` for
-details on writing the function to minimize.
+The :func:`minimize` function is a wrapper around :class:`Minimizer` for
+running an optimization problem.  It takes an objective function (the
+function that calculates the array to be minimized), a :class:`Parameters`
+object, and several optional arguments.  See :ref:`fit-func-label` for
+details on writing the objective.
 
-.. function:: minimize(function, params[, args=None[, kws=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **leastsq_kws]]]]]])
+.. function:: minimize(function, params[, args=None[, kws=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **fit_kws]]]]]])
 
    find values for the ``params`` so that the sum-of-squares of the array returned
    from ``function`` is minimized.
@@ -26,9 +32,9 @@ details on writing the function to minimize.
    :param function:  function to return fit residual.  See :ref:`fit-func-label` for details.
    :type  function:  callable.
    :param params:  a :class:`Parameters` dictionary.  Keywords must be strings
-                   that match ``[a-z_][a-z0-9_]*`` and is not a python
+                   that match ``[a-z_][a-z0-9_]*`` and cannot be a python
                    reserved word.  Each value must be :class:`Parameter`.
-   :type  params:  dict or :class:`Parameters`.
+   :type  params:  :class:`Parameters`.
    :param args:  arguments tuple to pass to the residual function as  positional arguments.
    :type  args:  tuple
    :param kws:   dictionary to pass to the residual function as keyword arguments.
@@ -37,25 +43,34 @@ details on writing the function to minimize.
    :type  method:  string (default ``leastsq``)
    :param scale_covar:  whether to automatically scale covariance matrix (``leastsq`` only)
    :type  scale_covar:  bool (default ``True``)
-   :param iter_cb:  function to be called at each fit iteration
+   :param iter_cb:  function to be called at each fit iteration. See :ref:`fit-itercb-label` for details.
    :type  iter_cb:  callable or ``None``
-   :param leastsq_kws:  dictionary to pass to :func:`scipy.optimize.leastsq`.
-   :type  leastsq_kws:  dict
+   :param fit_kws:  dictionary to pass to :func:`scipy.optimize.leastsq` or :func:`scipy.optimize.minimize`.
+   :type  fit_kws:  dict
+
+   :return: :class:`MinimizerResult` instance, which will contain the
+            optimized parameter, and several goodness-of-fit statistics.
 
-   :return: Minimizer object, which can be used to inspect goodness-of-fit
-            statistics, or to re-run fit.
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
 
-   On output, the params will be updated with best-fit values and, where
-   appropriate, estimated uncertainties and correlations.  See
+
+   On output, the params will be unchanged.  The best-fit values, and where
+   appropriate, estimated uncertainties and correlations, will all be
+   contained in the returned :class:`MinimizerResult`.  See
    :ref:`fit-results-label` for further details.
 
-   If provided, the ``iter_cb`` function should take arguments of ``params,
-   iter, resid, *args, **kws``, where ``params`` will have the current
-   parameter values, ``iter`` the iteration, ``resid`` the current residual
-   array, and ``*args`` and ``**kws`` as passed to the objective function.
+   For clarity, it should be emphasized that this function is simply a
+   wrapper around :class:`Minimizer` that runs a single fit, implemented as::
+
+    fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
+                       iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
+    return fitter.minimize(method=method)
+
 
 ..  _fit-func-label:
 
+
 Writing a Fitting Function
 ===============================
 
@@ -70,7 +85,7 @@ but it must look like this:
    calculate objective residual to be minimized from parameters.
 
    :param params: parameters.
-   :type  params:  dict
+   :type  params: :class:`Parameters`.
    :param args:  positional arguments.  Must match ``args`` argument to :func:`minimize`
    :param kws:   keyword arguments.  Must match ``kws`` argument to :func:`minimize`
    :return: residual array (generally data-model) to be minimized in the least-squares sense.
@@ -147,47 +162,43 @@ being fast, and well-behaved for most curve-fitting needs, and making it
 easy to estimate uncertainties for and correlations between pairs of fit
 variables, as discussed in :ref:`fit-results-label`.
 
-Alternative algorithms can also be used by providing the ``method`` keyword
-to the :func:`minimize` function or use the corresponding method name from
-the :class:`Minimizer` class as listed in the :ref:`Table of Supported
-Fitting Methods <fit-methods-table>`.
+Alternative algorithms can also be used by providing the ``method``
+keyword to the :func:`minimize` function or :meth:`Minimizer.minimize`
+class as listed in the :ref:`Table of Supported Fitting Methods
+<fit-methods-table>`.
 
 .. _fit-methods-table:
 
- Table of Supported Fitting Methods:
-
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Fitting Meth          | ``method`` arg to            | :class:`Minimizer`  | ``method`` arg to           |
- |                       | :func:`minimize`             | method              | :meth:`scalar_minimize`     |
- +=======================+==============================+=====================+=============================+
- | Levenberg-Marquardt   |  ``leastsq``                 | :meth:`leastsq`     |   Not available             |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Nelder-Mead           |  ``nelder``                  | :meth:`fmin`        | ``Nelder-Mead``             |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | L-BFGS-B              |  ``lbfgsb``                  | :meth:`lbfgsb`      | ``L-BFGS-B``                |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Powell                |  ``powell``                  |                     | ``Powell``                  |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Conjugate Gradient    |  ``cg``                      |                     | ``CG``                      |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Newton-CG             |  ``newton``                  |                     | ``Newton-CG``               |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | COBYLA                |  ``cobyla``                  |                     |  ``COBYLA``                 |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | COBYLA                |  ``cobyla``                  |                     |  ``COBYLA``                 |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Truncated Newton      |  ``tnc``                     |                     |  ``TNC``                    |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Trust Newton-CGn      |  ``trust-ncg``               |                     |  ``trust-ncg``              |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Dogleg                |  ``dogleg``                  |                     |  ``dogleg``                 |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Sequential Linear     |  ``slsqp``                   |                     |  ``SLSQP``                  |
- | Squares Programming   |                              |                     |                             |
- +-----------------------+------------------------------+---------------------+-----------------------------+
- | Differential          |  ``differential_evolution``  |                     | ``differential_evolution``  |
- | Evolution             |                              |                     |                             |
- +-----------------------+------------------------------+---------------------+-----------------------------+
+ Table of Supported Fitting Method, eithers:
+
+ +-----------------------+------------------------------------------------------------------+
+ | Fitting Method        | ``method`` arg to :func:`minimize` or :meth:`Minimizer.minimize` |
+ +=======================+==================================================================+
+ | Levenberg-Marquardt   |  ``leastsq``                                                     |
+ +-----------------------+------------------------------------------------------------------+
+ | Nelder-Mead           |  ``nelder``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | L-BFGS-B              |  ``lbfgsb``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | Powell                |  ``powell``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | Conjugate Gradient    |  ``cg``                                                          |
+ +-----------------------+------------------------------------------------------------------+
+ | Newton-CG             |  ``newton``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | COBYLA                |  ``cobyla``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | Truncated Newton      |  ``tnc``                                                         |
+ +-----------------------+------------------------------------------------------------------+
+ | Dogleg                |  ``dogleg``                                                      |
+ +-----------------------+------------------------------------------------------------------+
+ | Sequential Linear     |  ``slsqp``                                                       |
+ | Squares Programming   |                                                                  |
+ +-----------------------+------------------------------------------------------------------+
+ | Differential          |  ``differential_evolution``                                      |
+ | Evolution             |                                                                  |
+ +-----------------------+------------------------------------------------------------------+
+
 
 .. note::
 
@@ -206,69 +217,161 @@ Fitting Methods <fit-methods-table>`.
 
 ..  _fit-results-label:
 
-Goodness-of-Fit and estimated uncertainty and correlations
-===================================================================
+:class:`MinimizerResult` -- the optimization result
+========================================================
+
+
+
+.. class:: MinimizerResult(**kws)
+
+.. versionadded:: 0.9.0
+
+An optimization with :func:`minimize` or :meth:`Minimizer.minimize`
+will return a :class:`MinimizerResult` object.  This is an otherwise
+plain container object (that is, with no methods of its own) that
+simply holds the results of the minimization.  These results will
+include several pieces of informational data such as status and error
+messages, fit statistics, and the updated parameters themselves.
+
+Importantly, the parameters passed in to :meth:`Minimizer.minimize`
+will be not be changed.  To to find the best-fit values, uncertainties
+and so on for each parameter, one must use the
+:attr:`MinimizerResult.params` attribute.
+
+.. attribute::   params
+
+  the :class:`Parameters` actually used in the fit, with updated
+  values, :attr:`stderr` and :attr:`correl`.
+
+.. attribute::  var_names
+
+  ordered list of variable parameter names used in optimization, and
+  useful for understanding the the values in :attr:`init_vals` and
+  :attr:`covar`.
+
+.. attribute:: covar
+
+  covariance matrix from minimization (`leastsq` only), with
+  rows/columns using :attr:`var_names`.
+
+.. attribute:: init_vals
+
+  list of initial values for variable parameters using :attr:`var_names`.
+
+.. attribute::  nfev
+
+  number of function evaluations
+
+.. attribute::  success
+
+  boolean (``True``/``False``) for whether fit succeeded.
+
+.. attribute::  errorbars
+
+  boolean (``True``/``False``) for whether uncertainties were
+  estimated.
 
-On a successful fit using the `leastsq` method, several goodness-of-fit
-statistics and values related to the uncertainty in the fitted variables will be
-calculated.  These are all encapsulated in the :class:`Minimizer` object for the
-fit, as returned by :func:`minimize`.  The values related to the entire fit are
-stored in attributes of the :class:`Minimizer` object, as shown in :ref:`Table
-of Fit Results <goodfit-table>` while those related to each fitted variables are
-stored as attributes of the corresponding :class:`Parameter`.
+.. attribute::  message
 
+  message about fit success.
+
+.. attribute::  ier
+
+  integer error value from :func:`scipy.optimize.leastsq`  (`leastsq`
+  only).
+
+.. attribute::  lmdif_message
+
+  message from :func:`scipy.optimize.leastsq` (`leastsq` only).
+
+
+.. attribute::  nvarys
+
+  number of variables in fit  :math:`N_{\rm varys}`
+
+.. attribute::  ndata
+
+  number of data points:  :math:`N`
+
+.. attribute::  nfree `
+
+  degrees of freedom in fit:  :math:`N - N_{\rm varys}`
+
+.. attribute::  residual
+
+  residual array, return value of :func:`func`:  :math:`{\rm Resid}`
+
+.. attribute::  chisqr
+
+  chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`
+
+.. attribute::  redchi
+
+  reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm
+  varys})}`
+
+.. attribute::  aic
+
+  Akaike Information Criterion statistic (see below)
+
+.. attribute::  bic
+
+  Bayesian Information Criterion statistic (see below).
+
+
+
+
+
+Goodness-of-Fit Statistics
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 .. _goodfit-table:
 
  Table of Fit Results:  These values, including the standard Goodness-of-Fit statistics,
- are all attributes of the :class:`Minimizer` object returned by :func:`minimize`.
+ are all attributes of the :class:`MinimizerResult` object returned by
+ :func:`minimize` or :meth:`Minimizer.minimize`.
 
 +----------------------+----------------------------------------------------------------------------+
-| :class:`Minimizer`   | Description / Formula                                                      |
-| Attribute            |                                                                            |
+| Attribute Name       | Description / Formula                                                      |
 +======================+============================================================================+
 |    nfev              | number of function evaluations                                             |
 +----------------------+----------------------------------------------------------------------------+
-|    success           | boolean (``True``/``False``) for whether fit succeeded.                    |
-+----------------------+----------------------------------------------------------------------------+
-|    errorbars         | boolean (``True``/``False``) for whether uncertainties were estimated.     |
-+----------------------+----------------------------------------------------------------------------+
-|    message           | message about fit success.                                                 |
-+----------------------+----------------------------------------------------------------------------+
-|    ier               | integer error value from :func:`scipy.optimize.leastsq`                    |
-+----------------------+----------------------------------------------------------------------------+
-|    lmdif_message     | message from :func:`scipy.optimize.leastsq`                                |
-+----------------------+----------------------------------------------------------------------------+
 |    nvarys            | number of variables in fit  :math:`N_{\rm varys}`                          |
 +----------------------+----------------------------------------------------------------------------+
 |    ndata             | number of data points:  :math:`N`                                          |
 +----------------------+----------------------------------------------------------------------------+
 |    nfree `           | degrees of freedom in fit:  :math:`N - N_{\rm varys}`                      |
 +----------------------+----------------------------------------------------------------------------+
-|    residual          | residual array (return of :func:`func`:  :math:`{\rm Resid}`               |
+|    residual          | residual array, return value of :func:`func`:  :math:`{\rm Resid}`         |
 +----------------------+----------------------------------------------------------------------------+
 |    chisqr            | chi-square: :math:`\chi^2 = \sum_i^N [{\rm Resid}_i]^2`                    |
 +----------------------+----------------------------------------------------------------------------+
 |    redchi            | reduced chi-square: :math:`\chi^2_{\nu}= {\chi^2} / {(N - N_{\rm varys})}` |
 +----------------------+----------------------------------------------------------------------------+
-|    var_map           | list of variable parameter names for rows/columns of covar                 |
+|    aic               | Akaike Information Criterion statistic (see below)                         |
++----------------------+----------------------------------------------------------------------------+
+|    bic               | Bayesian Information Criterion statistic (see below)                       |
 +----------------------+----------------------------------------------------------------------------+
-|    covar             | covariance matrix (with rows/columns using var_map                         |
+|    var_names         | ordered list of variable parameter names used for init_vals and covar      |
++----------------------+----------------------------------------------------------------------------+
+|    covar             | covariance matrix (with rows/columns using var_names                       |
++----------------------+----------------------------------------------------------------------------+
+|    init_vals         | list of initial values for variable parameters                             |
 +----------------------+----------------------------------------------------------------------------+
 
-Note that the calculation of chi-square and reduced chi-square assume that the
-returned residual function is scaled properly to the uncertainties in the data.
-For these statistics to be meaningful, the person writing the function to
-be minimized must scale them properly.
+Note that the calculation of chi-square and reduced chi-square assume
+that the returned residual function is scaled properly to the
+uncertainties in the data.  For these statistics to be meaningful, the
+person writing the function to be minimized must scale them properly.
 
-After a fit using using the :meth:`leastsq` method has completed successfully,
-standard errors for the fitted variables and correlations between pairs of
-fitted variables are automatically calculated from the covariance matrix.
-The standard error (estimated :math:`1\sigma` error-bar) go into the
-:attr:`stderr` attribute of the Parameter.  The correlations with all other
-variables will be put into the :attr:`correl` attribute of the Parameter --
-a dictionary with keys for all other Parameters and values of the
-corresponding correlation.
+After a fit using using the :meth:`leastsq` method has completed
+successfully, standard errors for the fitted variables and correlations
+between pairs of fitted variables are automatically calculated from the
+covariance matrix.  The standard error (estimated :math:`1\sigma`
+error-bar) go into the :attr:`stderr` attribute of the Parameter.  The
+correlations with all other variables will be put into the
+:attr:`correl` attribute of the Parameter -- a dictionary with keys for
+all other Parameters and values of the corresponding correlation.
 
 In some cases, it may not be possible to estimate the errors and
 correlations.  For example, if a variable actually has no practical effect
@@ -279,6 +382,83 @@ near the maximum or minimum value makes the covariance matrix singular.  In
 these cases, the :attr:`errorbars` attribute of the fit result
 (:class:`Minimizer` object) will be ``False``.
 
+Akaike and Bayesian Information Criteria
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The :class:`MinimizerResult` includes the tradtional chi-square and reduced chi-square statistics:
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+        \chi^2  &=&  \sum_i^N r_i^2 \\
+	\chi^2_\nu &=& = \chi^2 / (N-N_{\rm varys})
+    \end{eqnarray*}
+
+where :math:`r` is the residual array returned by the objective function
+(likely to be ``(data-model)/uncertainty`` for data modeling usages),
+:math:`N` is the number of data points (``ndata``), and :math:`N_{\rm
+varys}` is number of variable parameters.
+
+Also included are the `Akaike Information Criterion
+<http://en.wikipedia.org/wiki/Akaike_information_criterion>`_, and
+`Bayesian Information Criterion
+<http://en.wikipedia.org/wiki/Bayesian_information_criterion>`_ statistics,
+held in the ``aic`` and ``bic`` attributes, respectively.  These give slightly
+different measures of the relative quality for a fit, trying to balance
+quality of fit with the number of variable parameters used in the fit.
+These are calculated as
+
+.. math::
+   :nowrap:
+
+   \begin{eqnarray*}
+     {\rm aic} &=&  N \ln(\chi^2/N) + 2 N_{\rm varys} \\
+     {\rm bic} &=&  N \ln(\chi^2/N) + \ln(N) *N_{\rm varys} \\
+    \end{eqnarray*}
+
+
+When comparing fits with different numbers of varying parameters, one
+typically selects the model with lowest reduced chi-square, Akaike
+information criterion, and/or Bayesian information criterion.  Generally,
+the Bayesian information criterion is considered the most conservative of
+these statistics.
+
+..  _fit-itercb-label:
+
+
+Using a Iteration Callback Function
+====================================
+
+An iteration callback function is a function to be called at each
+iteration, just after the objective function is called.  The iteration
+callback allows user-supplied code to be run at each iteration, and can be
+used to abort a fit.
+
+.. function:: iter_cb(params, iter, resid, *args, **kws):
+
+   user-supplied function to be run at each iteration
+
+   :param params: parameters.
+   :type  params: :class:`Parameters`.
+   :param iter:   iteration number
+   :type  iter:   integer
+   :param resid:  residual array.
+   :type  resid:  ndarray
+   :param args:  positional arguments.  Must match ``args`` argument to :func:`minimize`
+   :param kws:   keyword arguments.  Must match ``kws`` argument to :func:`minimize`
+   :return:      residual array (generally data-model) to be minimized in the least-squares sense.
+   :rtype:    ``None`` for normal behavior, any value like ``True`` to abort fit.
+
+
+Normally, the iteration callback would have no return value or return
+``None``.  To abort a fit, have this function return a value that is
+``True`` (including any non-zero integer).  The fit will also abort if any
+exception is raised in the iteration callback. When a fit is aborted this
+way, the parameters will have the values from the last iteration.  The fit
+statistics are not likely to be meaningful, and uncertainties will not be computed.
+
+
 .. module:: Minimizer
 
 ..  _fit-minimizer-label:
@@ -287,12 +467,11 @@ Using the :class:`Minimizer` class
 =======================================
 
 For full control of the fitting process, you'll want to create a
-:class:`Minimizer` object, or at least use the one returned from the
-:func:`minimize` function.
+:class:`Minimizer` object.
 
 .. class:: Minimizer(function, params, fcn_args=None, fcn_kws=None, iter_cb=None, scale_covar=True, **kws)
 
-   creates a Minimizer, for fine-grain access to fitting methods and attributes.
+   creates a Minimizer, for more detailed access to fitting methods and attributes.
 
    :param function:  objective function to return fit residual.  See :ref:`fit-func-label` for details.
    :type  function:  callable.
@@ -304,83 +483,60 @@ For full control of the fitting process, you'll want to create a
    :type  fcn_args: tuple
    :param fcn_kws:  dictionary to pass to the residual function as keyword arguments.
    :type  fcn_kws:  dict
-   :param iter_cb:  function to be called at each fit iteration
+   :param iter_cb:  function to be called at each fit iteration.  See :ref:`fit-itercb-label` for details.
    :type  iter_cb:  callable or ``None``
    :param scale_covar:  flag for automatically scaling covariance matrix and uncertainties to reduced chi-square (``leastsq`` only)
    :type  scale_cover:  bool (default ``True``).
    :param kws:      dictionary to pass as keywords to the underlying :mod:`scipy.optimize` method.
    :type  kws:      dict
-   :return: Minimizer object, which can be used to inspect goodness-of-fit
-            statistics, or to re-run fit.
-
 
 The Minimizer object has a few public methods:
 
-.. method:: leastsq(scale_covar=True, **kws)
-
-   perform fit with Levenberg-Marquardt algorithm.  Keywords will be passed directly to
-   :func:`scipy.optimize.leastsq`.
-   By default, numerical derivatives are used, and the following arguments are set:
-
-    +------------------+----------------+------------------------------------------------------------+
-    | :meth:`leastsq`  |  Default Value | Description                                                |
-    | arg              |                |                                                            |
-    +==================+================+============================================================+
-    |   xtol           |  1.e-7         | Relative error in the approximate solution                 |
-    +------------------+----------------+------------------------------------------------------------+
-    |   ftol           |  1.e-7         | Relative error in the desired sum of squares               |
-    +------------------+----------------+------------------------------------------------------------+
-    |   maxfev         | 2000*(nvar+1)  | maximum number of function calls (nvar= # of variables)    |
-    +------------------+----------------+------------------------------------------------------------+
-    |   Dfun           | ``None``       | function to call for Jacobian calculation                  |
-    +------------------+----------------+------------------------------------------------------------+
+.. method:: minimize(method='leastsq', params=None, **kws)
 
+   perform fit using either :meth:`leastsq` or :meth:`scalar_minimize`.
 
+   :param method: name of fitting method.  Must be one of the naemes in
+                  :ref:`Table of Supported Fitting Methods <fit-methods-table>`
+   :type  method:  str.
+   :param params:  a :class:`Parameters` dictionary for starting values
+   :type  params:  :class:`Parameters` or `None`
 
-.. method:: lbfgsb(**kws)
+   :return: :class:`MinimizerResult` object, containing updated
+            parameters, fitting statistics, and information.
 
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
 
-   perform fit with L-BFGS-B algorithm.  Keywords will be passed directly to
-   :func:`scipy.optimize.fmin_l_bfgs_b`.
+   Additonal keywords are passed on to the correspond :meth:`leastsq`
+   or :meth:`scalar_minimize` method.
 
 
-    +------------------+----------------+------------------------------------------------------------+
-    | :meth:`lbfgsb`   |  Default Value | Description                                                |
-    | arg              |                |                                                            |
-    +==================+================+============================================================+
-    |   factr          | 1000.0         |                                                            |
-    +------------------+----------------+------------------------------------------------------------+
-    |   approx_grad    |  ``True``      | calculate approximations of gradient                       |
-    +------------------+----------------+------------------------------------------------------------+
-    |   maxfun         | 2000*(nvar+1)  | maximum number of function calls (nvar= # of variables)    |
-    +------------------+----------------+------------------------------------------------------------+
-
-.. warning::
+.. method:: leastsq(params=None, scale_covar=True, **kws)
 
-  :meth:`lbfgsb` is deprecated.  Use :meth:`minimize` with ``method='lbfgsb'``.
+   perform fit with Levenberg-Marquardt algorithm.  Keywords will be
+   passed directly to :func:`scipy.optimize.leastsq`.  By default,
+   numerical derivatives are used, and the following arguments are set:
 
-.. method:: fmin(**kws)
-
-   perform fit with Nelder-Mead downhill simplex algorithm.  Keywords will be passed directly to
-   :func:`scipy.optimize.fmin`.
 
     +------------------+----------------+------------------------------------------------------------+
-    | :meth:`fmin`     |  Default Value | Description                                                |
+    | :meth:`leastsq`  |  Default Value | Description                                                |
     | arg              |                |                                                            |
     +==================+================+============================================================+
-    |   ftol           | 1.e-4          | function tolerance                                         |
+    |   xtol           |  1.e-7         | Relative error in the approximate solution                 |
     +------------------+----------------+------------------------------------------------------------+
-    |   xtol           | 1.e-4          | parameter tolerance                                        |
+    |   ftol           |  1.e-7         | Relative error in the desired sum of squares               |
     +------------------+----------------+------------------------------------------------------------+
-    |   maxfun         | 5000*(nvar+1)  | maximum number of function calls (nvar= # of variables)    |
+    |   maxfev         | 2000*(nvar+1)  | maximum number of function calls (nvar= # of variables)    |
+    +------------------+----------------+------------------------------------------------------------+
+    |   Dfun           | ``None``       | function to call for Jacobian calculation                  |
     +------------------+----------------+------------------------------------------------------------+
 
-.. warning::
-
-  :meth:`fmin` is deprecated.  Use :meth:`minimize` with ``method='nelder'``.
 
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
 
-.. method:: scalar_minimize(method='Nelder-Mead', hess=None, tol=None, **kws)
+.. method:: scalar_minimize(method='Nelder-Mead', params=None, hess=None, tol=None, **kws)
 
    perform fit with any of the scalar minimization algorithms supported by
    :func:`scipy.optimize.minimize`.
@@ -396,49 +552,43 @@ The Minimizer object has a few public methods:
     |   hess                  | None            | Hessian of objective function                       |
     +-------------------------+-----------------+-----------------------------------------------------+
 
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
 
 .. method:: prepare_fit(**kws)
 
    prepares and initializes model and Parameters for subsequent
    fitting. This routine prepares the conversion of :class:`Parameters`
-   into fit variables, organizes parameter bounds, and parses, checks and
-   "compiles" constrain expressions.
-
-
-   This is called directly by the fitting methods, and it is generally not
-   necessary to call this function explicitly.  An exception is when you
-   would like to call your function to minimize prior to running one of the
-   minimization routines, for example, to calculate the initial residual
-   function.  In that case, you might want to do something like::
-
-      myfit = Minimizer(my_residual, params,  fcn_args=(x,), fcn_kws={'data':data})
+   into fit variables, organizes parameter bounds, and parses, "compiles"
+   and checks constrain expressions.   The method also creates and returns
+   a new instance of a :class:`MinimizerResult` object that contains the
+   copy of the Parameters that will actually be varied in the fit.
 
-      myfit.prepare_fit()
-      init = my_residual(p_fit, x)
-      pylab.plot(x, init, 'b--')
+   This method is called directly by the fitting methods, and it is
+   generally not necessary to call this function explicitly.
 
-      myfit.leastsq()
+.. versionchanged:: 0.9.0
+   return value changed to :class:`MinimizerResult`
 
-   That is, this method should be called prior to your fitting function being called.
 
 
 Getting and Printing Fit Reports
 ===========================================
 
-.. function:: fit_report(params, modelpars=None, show_correl=True, min_correl=0.1)
+.. function:: fit_report(result, modelpars=None, show_correl=True, min_correl=0.1)
 
    generate and return text of report of best-fit values, uncertainties,
    and correlations from fit.
 
-   :param params:       Parameters from fit, or Minimizer object as returned by :func:`minimize`.
+   :param result:       :class:`MinimizerResult` object as returned by :func:`minimize`.
    :param modelpars:    Parameters with "Known Values" (optional, default None)
    :param show_correl:  whether to show list of sorted correlations [``True``]
    :param min_correl:   smallest correlation absolute value to show [0.1]
 
-   If the first argument is a Minimizer object, as returned from
-   :func:`minimize`, the report will include some goodness-of-fit statistics.
+   If the first argument is a :class:`Parameters` object,
+   goodness-of-fit statistics will not be included.
 
-.. function:: report_fit(params, modelpars=None, show_correl=True, min_correl=0.1)
+.. function:: report_fit(result, modelpars=None, show_correl=True, min_correl=0.1)
 
    print text of report from :func:`fit_report`.
 
@@ -449,12 +599,21 @@ An example fit with report would be
 
 which would write out::
 
-
+    [[Fit Statistics]]
+        # function evals   = 85
+        # data points      = 1001
+        # variables        = 4
+        chi-square         = 498.812
+        reduced chi-square = 0.500
     [[Variables]]
         amp:      13.9121944 +/- 0.141202 (1.01%) (init= 13)
-        decay:    0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
         period:   5.48507044 +/- 0.026664 (0.49%) (init= 2)
         shift:    0.16203677 +/- 0.014056 (8.67%) (init= 0)
+        decay:    0.03264538 +/- 0.000380 (1.16%) (init= 0.02)
     [[Correlations]] (unreported correlations are <  0.100)
         C(period, shift)             =  0.797
         C(amp, decay)                =  0.582
+        C(amp, shift)                = -0.297
+        C(amp, period)               = -0.243
+        C(shift, decay)              = -0.182
+        C(period, decay)             = -0.150
diff --git a/doc/index.rst b/doc/index.rst
index 8e6a217..a8472d2 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -7,28 +7,37 @@ Non-Linear Least-Square Minimization and Curve-Fitting for Python
 .. _MINPACK-1:               http://en.wikipedia.org/wiki/MINPACK
 
 
-Lmfit provides a high-level interface to non-linear optimization and
-curve fitting problems for Python. Lmfit builds on
-`Levenberg-Marquardt`_ algorithm of :func:`scipy.optimize.leastsq`, but
-also supports most of the optimization methods from :mod:`scipy.optimize`.
-It has a number of useful enhancements, including:
+.. warning::
+
+  Upgrading scripts from version 0.8.3 to 0.9.0?  See  :ref:`whatsnew_090_label`
+
+
+Lmfit provides a high-level interface to non-linear optimization and curve
+fitting problems for Python. Lmfit builds on and extends many of the
+optimizatin algorithm of :mod:`scipy.optimize`, especially the
+`Levenberg-Marquardt`_ method from :func:`scipy.optimize.leastsq`.
+
+Lmfit provides a number of useful enhancements to optimization and data
+fitting problems, including:
 
   * Using :class:`Parameter` objects instead of plain floats as variables.
-    A :class:`Parameter` has a value that can be varied in the fit, fixed,
-    have upper and/or lower bounds.  It can even have a value that is
-    constrained by an algebraic expression of other Parameter values.
+    A :class:`Parameter` has a value that can be varied in the fit, have a
+    fixed value, or have upper and/or lower bounds.  A Parameter can even
+    have a value that is constrained by an algebraic expression of other
+    Parameter values.
 
   * Ease of changing fitting algorithms.  Once a fitting model is set up,
-    one can change the fitting algorithm without changing the objective
-    function.
+    one can change the fitting algorithm used to find the optimal solution
+    without changing the objective function.
 
   * Improved estimation of confidence intervals.  While
     :func:`scipy.optimize.leastsq` will automatically calculate
-    uncertainties and correlations from the covariance matrix, lmfit also
+    uncertainties and correlations from the covariance matrix, the accuracy
+    of these estimates are often questionable.  To help address this, lmfit
     has functions to explicitly explore parameter space to determine
     confidence levels even for the most difficult cases.
 
-  * Improved curve-fitting with the :class:`Model` class.  This which
+  * Improved curve-fitting with the :class:`Model` class.  This
     extends the capabilities of :func:`scipy.optimize.curve_fit`, allowing
     you to turn a function that models for your data into a python class
     that helps you parametrize and fit data with that model.
diff --git a/doc/installation.rst b/doc/installation.rst
index dcdeaac..f9adadd 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -12,12 +12,12 @@ Prerequisites
 
 The lmfit package requires Python, Numpy, and Scipy.  Scipy version 0.13 or
 higher is recommended, but extensive testing on compatibility with various
-versions of scipy has not been done.  Lmfit does work with Python 2.7, and
-3.2 and 3.3.  No testing has been done with Python 3.4, but as the package
-is pure Python, relying only on scipy and numpy, no significant troubles
-are expected.  The `nose`_ framework is required for running the test
-suite, and IPython and matplotib are recommended.  If Pandas is available,
-it will be used in portions of lmfit.
+versions of scipy has not been done.  Lmfit works with Python 2.7, 3.3 and
+3.4.  No testing has been done with Python 3.5, but as the package is pure
+Python, relying only on scipy and numpy, no significant troubles are
+expected.  The `nose`_ framework is required for running the test suite,
+and IPython and matplotib are recommended.  If Pandas is available, it will
+be used in portions of lmfit.
 
 
 Downloads
diff --git a/doc/intro.rst b/doc/intro.rst
index c8e4acd..209df4b 100644
--- a/doc/intro.rst
+++ b/doc/intro.rst
@@ -4,7 +4,7 @@
 Getting started with Non-Linear Least-Squares Fitting
 ===========================================================
 
-The lmfit package is designed to provide simple tools to help you build of
+The lmfit package is designed to provide simple tools to help you build
 complex fitting models for non-linear least-squares problems and apply
 these models to real data.  This section gives an overview of the concepts
 and describes how to set up and perform simple fits.  Some basic knowledge
@@ -79,13 +79,13 @@ including:
 
 These shortcomings are really do solely to the use of traditional arrays of
 variables, as matches closely the implementation of the Fortran code.  The
-lmfit module overcomes these shortcomings by using a core reason for using
-Python -- objects.  The key concept for lmfit is to use :class:`Parameter`
+lmfit module overcomes these shortcomings by using objects -- a core reason for wokring with
+Python.  The key concept for lmfit is to use :class:`Parameter`
 objects instead of plain floating point numbers as the variables for the
 fit.  By using :class:`Parameter` objects (or the closely related
 :class:`Parameters` -- a dictionary of :class:`Parameter` objects), one can
 
-   a) not care about the order of variables, but refer to Parameters
+   a) forget about the order of variables and refer to Parameters
       by meaningful names.
    b) place bounds on Parameters as attributes, without worrying about order.
    c) fix Parameters, without having to rewrite the objective function.
@@ -117,9 +117,9 @@ as::
 
 At first look, we simply replaced a list of values with a dictionary,
 accessed by name -- not a huge improvement.  But each of the named
-:class:`Parameter` in the :class:`Parameters` object hold additional
+:class:`Parameter` in the :class:`Parameters` object holds additional
 attributes to modify the value during the fit.  For example, Parameters can
-be fixed or bounded.  This can be done when being defined::
+be fixed or bounded.  This can be done during definition::
 
     params = Parameters()
     params.add('amp', value=10, vary=False)
@@ -127,24 +127,24 @@ be fixed or bounded.  This can be done when being defined::
     params.add('phase', value=0.2)
     params.add('frequency', value=3.0, max=10)
 
-(where ``vary=False`` will prevent the value from changing in the fit, and
-``min=-0.0`` will set a lower bound on that parameters value) or after
-being defined by setting the corresponding attributes after they have been
+where ``vary=False`` will prevent the value from changing in the fit, and
+``min=0.0`` will set a lower bound on that parameters value). It can also be done
+later by setting the corresponding attributes after they have been
 created::
 
     params['amp'].vary = False
     params['decay'].min = 0.10
 
-Importantly, our function to be minimized remains unchanged.
+Importantly, our objective function remains unchanged.
 
 The `params` object can be copied and modified to make many user-level
 changes to the model and fitting process.  Of course, most of the
 information about how your data is modeled goes into the objective
-function, but the approach here allows some external control, that is by
-the **user** of the objective function instead of just by the author of the
+function, but the approach here allows some external control; that is, control by
+the **user** performing the fit, instead of by the author of the
 objective function.
 
 Finally, in addition to the :class:`Parameters` approach to fitting data,
-lmfit allows you to easily switch optimization methods without rewriting
-your objective function, and provides tools for writing fitting reports and
-for better determining the confidence levels for Parameters.
+lmfit allows switching optimization methods without changing
+the objective function, provides tools for writing fitting reports, and
+provides better determination of Parameters confidence levels.
diff --git a/doc/model.rst b/doc/model.rst
index dc2b87f..566090e 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -39,7 +39,7 @@ Example: Fit data to Gaussian profile
 Let's start with a simple and common example of fitting data to a Gaussian
 peak.  As we will see, there is a buit-in :class:`GaussianModel` class that
 provides a model function for a Gaussian profile, but here we'll build our
-own.  We start with a simple definition the model function:
+own.  We start with a simple definition of the model function:
 
     >>> from numpy import sqrt, pi, exp, linspace
     >>>
@@ -47,20 +47,20 @@ own.  We start with a simple definition the model function:
     ...    return amp * exp(-(x-cen)**2 /wid)
     ...
 
-that we want to use to fit to some data :math:`y(x)` represented by the
-arrays ``y`` and ``x``.  Using :func:`scipy.optimize.curve_fit` makes this
-easy to do, allowing us to do something like::
+We want to fit this objective function to data :math:`y(x)` represented by the
+arrays ``y`` and ``x``.  This can be done easily wit :func:`scipy.optimize.curve_fit`::
 
     >>> from scipy.optimize import curve_fit
     >>>
-    >>> x, y = read_data_from_somewhere(....)
+    >>> x = linspace(-10,10)
+    >>> y = y = gaussian(x, 2.33, 0.21, 1.51) + np.random.normal(0, 0.2, len(x))
     >>>
-    >>> init_vals = [5, 5, 1]     # for [amp, cen, wid]
+    >>> init_vals = [1, 0, 1]     # for [amp, cen, wid]
     >>> best_vals, covar = curve_fit(gaussian, x, y, p0=init_vals)
     >>> print best_vals
 
 
-That is, we read in data from somewhere, make an initial guess of the model
+We sample random data point, make an initial guess of the model
 values, and run :func:`scipy.optimize.curve_fit` with the model function,
 data arrays, and initial guesses.  The results returned are the optimal
 values for the parameters and the covariance matrix.  It's simple and very
@@ -72,12 +72,12 @@ such a function would be fairly simple (essentially, ``data - model``,
 possibly with some weighting), and we would need to define and use
 appropriately named parameters.  Though convenient, it is somewhat of a
 burden to keep the named parameter straight (on the other hand, with
-func:`scipy.optimize.curve_fit` you are required to remember the parameter
+:func:`scipy.optimize.curve_fit` you are required to remember the parameter
 order).  After doing this a few times it appears as a recurring pattern,
 and we can imagine automating this process.  That's where the
 :class:`Model` class comes in.
 
-The :class:`Model` allows us to easily wrap a model function such as the
+:class:`Model` allows us to easily wrap a model function such as the
 ``gaussian`` function.  This automatically generate the appropriate
 residual function, and determines the corresponding parameter names from
 the function signature itself::
@@ -97,9 +97,9 @@ see below) are used for Parameter names.  Thus, for the ``gaussian``
 function above, the parameters are named ``amp``, ``cen``, and ``wid``, and
 ``x`` is the independent variable -- all taken directly from the signature
 of the model function. As we will see below, you can specify what the
-independent variable is, and you can add or alter parameters too.
+independent variable is, and you can add or alter parameters, too.
 
-On creation of the model, parameters are *not* created.  The model knows
+The parameters are *not* created when the model is created. The model knows
 what the parameters should be named, but not anything about the scale and
 range of your data.  You will normally have to make these parameters and
 assign initial values and other attributes.  To help you do this, each
@@ -111,7 +111,7 @@ the expected names:
 This creates the :class:`Parameters` but doesn't necessarily give them
 initial values -- again, the model has no idea what the scale should be.
 You can set initial values for parameters with keyword arguments to
-:meth:`make_params`, as with:
+:meth:`make_params`:
 
 
     >>> params = gmod.make_params(cen=5, amp=200, wid=1)
@@ -133,7 +133,7 @@ Admittedly, this a slightly long-winded way to calculate a Gaussian
 function.   But now that the model is set up, we can also use its
 :meth:`fit` method to fit this model to data, as with::
 
-    result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+    >>> result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
 
 Putting everything together, the script to do such a fit (included in the
 ``examples`` folder with the source code) is:
@@ -171,22 +171,21 @@ parameter values.  These can be used to generate the following plot:
 which shows the data in blue dots, the best fit as a solid red line, and
 the initial fit as a dashed black line.
 
-We emphasize here that the fit to this model function was really performed
-with 2 lines of code::
+Note that the model fitting was really performed with 2 lines of code::
 
     gmod = Model(gaussian)
     result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
 
 These lines clearly express that we want to turn the ``gaussian`` function
 into a fitting model, and then fit the :math:`y(x)` data to this model,
-starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``, and
-compare well to :func:`scipy.optimize.curve_fit`::
+starting with values of 5 for ``amp``, 5 for ``cen`` and 1 for ``wid``.
+This is much more expressive than :func:`scipy.optimize.curve_fit`::
 
     best_vals, covar = curve_fit(gaussian, x, y, p0=[5, 5, 1])
 
-except that all the other features of lmfit are included such as that the
+In addition, all the other features of lmfit are included:
 :class:`Parameters` can have bounds and constraints and the result is a
-richer object that can be reused to explore the fit in more detail.
+rich object that can be reused to explore the model fit in detail.
 
 
 The :class:`Model` class
@@ -252,23 +251,25 @@ specifying one or more independent variables.
    :type data: ndarray-like
    :param params: parameters to use for fit.
    :type params: ``None`` (default) or Parameters
-   :param weights: weights to use fit.
+   :param weights: weights to use for residual calculation in fit.
    :type weights: ``None`` (default) or ndarray-like.
    :param method:  name of fitting method to use. See  :ref:`fit-methods-label` for details
    :type  method:  string (default ``leastsq``)
    :param scale_covar:  whether to automatically scale covariance matrix (``leastsq`` only)
    :type  scale_covar:  bool (default ``True``)
-   :param iter_cb:  function to be called at each fit iteration
+   :param iter_cb:  function to be called at each fit iteration. See :ref:`fit-itercb-label` for details.
    :type  iter_cb:  callable or ``None``
    :param verbose:  print a message when a new parameter is created due to a *hint*
    :type  verbose:  bool (default ``True``)
    :param kws:      additional keyword arguments to pass to model function.
-   :return:         :class:`ModeFitResult` object.
+   :return:         :class:`ModelFit` object.
 
    If ``params`` is ``None``, the internal ``params`` will be used. If it
-   is supplied, these will replace the internal ones.  If supplied,
-   ``weights`` must is an ndarray-like object of same size and shape as
-   ``data``.
+   is supplied, these will replace the internal ones.   If supplied,
+   ``weights`` will be used to weight the calculated residual so that the
+   quantitiy minimized in the least-squares sense is ``weights*(data -
+   fit)``.  ``weights`` must be an ndarray-like object of same size and
+   shape as ``data``.
 
    Note that other arguments for the model function (including all the
    independent variables!) will need to be passed in using keyword
@@ -322,6 +323,7 @@ specifying one or more independent variables.
 
    See :ref:`model_param_hints_section`.
 
+
 :class:`Model` class Attributes
 ---------------------------------
 
@@ -640,8 +642,11 @@ model while the :class:`ModelFit` is the messier, more complex (but perhaps
 more useful) object that represents a fit with a set of parameters to data
 with a model.
 
+
 A :class:`ModelFit` has several attributes holding values for fit results,
-and several methods for working with fits.
+and several methods for working with fits.  These include statistics
+inherited from :class:`Minimizer` useful for comparing different models,
+includind `chisqr`, `redchi`, `aic`, and `bic`.
 
 .. class:: ModelFit()
 
@@ -688,11 +693,126 @@ These methods are all inherited from :class:`Minimize` or from
    :param min_correl:   smallest correlation absolute value to show [0.1]
 
 
+.. method:: ModelFit.plot(datafmt='o', fitfmt='-', initfmt='--', yerr=None, numpoints=None, fig=None, data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None, ax_fit_kws=None, fig_kws=None)
+
+   Plot the fit results and residuals using matplotlib, if available.  The
+   plot will include two panels, one showing the fit residual, and the
+   other with the data points, the initial fit curve, and the best-fit
+   curve. If the fit model included weights or if ``yerr`` is specified,
+   errorbars will also be plotted.
+
+   :param datafmt: matplotlib format string for data curve.
+   :type  datafmt: ``None`` or string.
+   :param fitfmt:  matplotlib format string for best-fit curve.
+   :type fitfmt: ``None`` or string.
+   :param initfmt:  matplotlib format string for initial curve.
+   :type intfmt: ``None`` or string.
+   :param yerr:  array of uncertainties for data array.
+   :type  yerr: ``None`` or ndarray.
+   :param numpoints:  number of points to display
+   :type numpoints: ``None`` or integer
+   :param fig: matplotlib Figure to plot on.
+   :type fig:  ``None`` or matplotlib.figure.Figure
+   :param data_kws:  keyword arguments passed to plot for data curve.
+   :type data_kws: ``None`` or dictionary
+   :param fit_kws:  keyword arguments passed to plot for best-fit curve.
+   :type fit_kws: ``None`` or dictionary
+   :param init_kws:  keyword arguments passed to plot for initial curve.
+   :type init_kws: ``None`` or dictionary
+   :param ax_res_kws:  keyword arguments passed to creation of matplotlib axes for the residual plot.
+   :type ax_res_kws: ``None`` or dictionary
+   :param ax_fit_kws:  keyword arguments passed to creation of matplotlib axes for the fit plot.
+   :type ax_fit_kws: ``None`` or dictionary
+   :param fig_kws:  keyword arguments passed to creation of matplotlib figure.
+   :type fig_kws: ``None`` or dictionary
+   :returns:     matplotlib.figure.Figure
+
+   This combines :meth:`ModelFit.plot_fit` and :meth:`ModelFit.plot_residual`.
+
+   If ``yerr`` is specified or if the fit model included weights, then
+   matplotlib.axes.Axes.errorbar is used to plot the data.  If ``yerr`` is
+   not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
+
+   If ``fig`` is None then ``matplotlib.pyplot.figure(**fig_kws)`` is called.
+
+.. method:: ModelFit.plot_fit(ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None, numpoints=None,  data_kws=None, fit_kws=None, init_kws=None, ax_kws=None)
+
+   Plot the fit results using matplotlib, if available.  The plot will include
+   the data points, the initial fit curve, and the best-fit curve. If the fit
+   model included weights or if ``yerr`` is specified, errorbars will also
+   be plotted.
+
+   :param ax: matplotlib axes to plot on.
+   :type ax:  ``None`` or matplotlib.axes.Axes.
+   :param datafmt: matplotlib format string for data curve.
+   :type  datafmt: ``None`` or string.
+   :param fitfmt:  matplotlib format string for best-fit curve.
+   :type fitfmt: ``None`` or string.
+   :param initfmt:  matplotlib format string for initial curve.
+   :type intfmt: ``None`` or string.
+   :param yerr:  array of uncertainties for data array.
+   :type  yerr: ``None`` or ndarray.
+   :param numpoints:  number of points to display
+   :type numpoints: ``None`` or integer
+   :param data_kws:  keyword arguments passed to plot for data curve.
+   :type data_kws: ``None`` or dictionary
+   :param fit_kws:  keyword arguments passed to plot for best-fit curve.
+   :type fit_kws: ``None`` or dictionary
+   :param init_kws:  keyword arguments passed to plot for initial curve.
+   :type init_kws: ``None`` or dictionary
+   :param ax_kws:  keyword arguments passed to creation of matplotlib axes.
+   :type ax_kws: ``None`` or dictionary
+   :returns:     matplotlib.axes.Axes
+
+   For details about plot format strings and keyword arguments see
+   documentation of :func:`matplotlib.axes.Axes.plot`.
+
+   If ``yerr`` is specified or if the fit model included weights, then
+   matplotlib.axes.Axes.errorbar is used to plot the data.  If ``yerr`` is
+   not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
+
+   If ``ax`` is None then ``matplotlib.pyplot.gca(**ax_kws)`` is called.
+
+.. method:: ModelFit.plot_residuals(ax=None, datafmt='o', yerr=None, data_kws=None, fit_kws=None, ax_kws=None)
+
+  Plot the fit residuals (data - fit) using matplotlib.  If ``yerr`` is
+  supplied or if the model included weights, errorbars will also be plotted.
+
+   :param ax: matplotlib axes to plot on.
+   :type ax:  ``None`` or matplotlib.axes.Axes.
+   :param datafmt: matplotlib format string for data curve.
+   :type  datafmt: ``None`` or string.
+   :param yerr:  array of uncertainties for data array.
+   :type  yerr: ``None`` or ndarray.
+   :param numpoints:  number of points to display
+   :type numpoints: ``None`` or integer
+   :param data_kws:  keyword arguments passed to plot for data curve.
+   :type data_kws: ``None`` or dictionary
+   :param fit_kws:  keyword arguments passed to plot for best-fit curve.
+   :type fit_kws: ``None`` or dictionary
+   :param ax_kws:  keyword arguments passed to creation of matplotlib axes.
+   :type ax_kws: ``None`` or dictionary
+   :returns:     matplotlib.axes.Axes
+
+   For details about plot format strings and keyword arguments see
+   documentation of :func:`matplotlib.axes.Axes.plot`.
+
+   If ``yerr`` is specified or if the fit model included weights, then
+   matplotlib.axes.Axes.errorbar is used to plot the data.  If ``yerr`` is
+   not specified and the fit includes weights, ``yerr`` set to ``1/self.weights``
+
+   If ``ax`` is None then ``matplotlib.pyplot.gca(**ax_kws)`` is called.
+
+
 
 
 :class:`ModelFit` attributes
 ---------------------------------
 
+.. attribute:: aic
+
+   floating point best-fit Akaike Information Criterion statistic (see :ref:`fit-results-label`).
+
 .. attribute:: best_fit
 
    ndarray result of model function, evaluated at provided
@@ -702,9 +822,13 @@ These methods are all inherited from :class:`Minimize` or from
 
    dictionary with  parameter names as keys, and best-fit values as values.
 
+.. attribute:: bic
+
+   floating point best-fit Bayesian Information Criterion statistic (see :ref:`fit-results-label`).
+
 .. attribute:: chisqr
 
-   floating point best-fit chi-square statistic.
+   floating point best-fit chi-square statistic (see :ref:`fit-results-label`).
 
 .. attribute:: covar
 
@@ -741,7 +865,7 @@ These methods are all inherited from :class:`Minimize` or from
    must take take arguments of ``params, iter, resid, *args, **kws``, where
    ``params`` will have the current parameter values, ``iter`` the
    iteration, ``resid`` the current residual array, and ``*args`` and
-   ``**kws`` as passed to the objective function.
+   ``**kws`` as passed to the objective function.  See :ref:`fit-itercb-label`.
 
 .. attribute:: jacfcn
 
@@ -785,7 +909,7 @@ These methods are all inherited from :class:`Minimize` or from
 
 .. attribute::  redchi
 
-    floating point reduced chi-square statistic
+    floating point reduced chi-square statistic (see :ref:`fit-results-label`).
 
 .. attribute::  residual
 
@@ -801,8 +925,10 @@ These methods are all inherited from :class:`Minimize` or from
 
 .. attribute:: weights
 
-   ndarray (or ``None``) of weighting values used in fit.
-
+   ndarray (or ``None``) of weighting values to be used in fit.  If not
+   ``None``, it will be used as a multiplicative factor of the residual
+   array, so that ``weights*(data - fit)`` is minimized in the
+   least-squares sense.
 
 .. index:: Composite models
 
diff --git a/doc/parameters.rst b/doc/parameters.rst
index 1729def..e0c4601 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -5,13 +5,15 @@
 ================================================
 
 This chapter describes :class:`Parameter` objects which is the key concept
-of lmfit.  A :class:`Parameter` is the quantity to be optimized in all
-minimization problems, replacing the plain floating point number used in
-the optimization routines from :mod:`scipy.optimize`.  A :class:`Parameter`
-has a value that can be varied in the fit, fixed, have upper and/or lower
-bounds.  It can even have a value that is constrained by an algebraic
-expression of other Parameter values.  Since :class:`Parameters` live
-outside the core optimization routines, they can be used in **all**
+of lmfit.
+
+A :class:`Parameter` is the quantity to be optimized in all minimization
+problems, replacing the plain floating point number used in the
+optimization routines from :mod:`scipy.optimize`.  A :class:`Parameter` has
+a value that can be varied in the fit or have a fixed value, have upper
+and/or lower bounds.  It can even have a value that is constrained by an
+algebraic expression of other Parameter values.  Since :class:`Parameters`
+live outside the core optimization routines, they can be used in **all**
 optimization routines from :mod:`scipy.optimize`.  By using
 :class:`Parameter` objects instead of plain variables, the objective
 function does not have to be modified to reflect every change of what is
@@ -27,8 +29,8 @@ a name, this is replaced by a :class:`Parameters` class, which works as an
 ordered dictionary of :class:`Parameter` objects, with a few additional
 features and methods.  That is, while the concept of a :class:`Parameter`
 is central to lmfit, one normally creates and interacts with a
-:class:`Parameters` instance that contains many :class:`Parameter`
-objects.  The objective functions you write will take an instance of
+:class:`Parameters` instance that contains many :class:`Parameter` objects.
+The objective functions you write for lmfit will take an instance of
 :class:`Parameters` as its first argument.
 
 
@@ -52,8 +54,8 @@ The :class:`Parameter` class
 
 Each of these inputs is turned into an attribute of the same name.
 
-After a fit, a Parameter for a fitted variable (that is with vary =
-``True``) will have the :attr:`value` attribute holding the best-fit value.
+After a fit, a Parameter for a fitted variable (that is with ``vary =
+True``) may have its :attr:`value` attribute to hold the best-fit value.
 Depending on the success of the fit and fitting algorithm used, it may also
 have attributes :attr:`stderr` and :attr:`correl`.
 
@@ -89,10 +91,9 @@ feature.
    :param max:   upper bound for value
    :param expr:  mathematical expression to use to evaluate value during fit.
 
-   Each argument of :meth:`set` has a default value of ``None``, and will
-
-  be set only if the provided value is not ``None``.  You can use this to
-   update some Parameter attribute without affecting others, for example::
+Each argument of :meth:`set` has a default value of ``None``, and will
+be set only if the provided value is not ``None``.  You can use this to
+update some Parameter attribute without affecting others, for example::
 
        p1 = Parameter('a', value=2.0)
        p2 = Parameter('b', value=0.0)
@@ -130,11 +131,11 @@ The :class:`Parameters` class
 
 .. class:: Parameters()
 
-   create a Parameters object.  This is little more than a fancy
-   dictionary, with the restrictions that
+   create a Parameters object.  This is little more than a fancy ordered
+   dictionary, with the restrictions that:
 
-   1. keys must be valid Python symbol names (so that they can be used in
-      expressions of mathematical constraints).  This means the names must
+   1. keys must be valid Python symbol names, so that they can be used in
+      expressions of mathematical constraints.  This means the names must
       match ``[a-z_][a-z0-9_]*``  and cannot be a Python reserved word.
 
    2. values must be valid :class:`Parameter` objects.
@@ -173,15 +174,20 @@ The :class:`Parameters` class
                 ('wid2',  None, False, None, None, '2*wid1/3'))
 
 
-.. method:: valuesdict(self)
+.. method:: pretty_print(oneline=False)
+
+   prints a clean representation on the Parameters. If `oneline` is
+   `True`, the result will be printed to a single (long) line.
+
+.. method:: valuesdict()
 
-   return an ordered dictionary of name:value pairs containing the
-   :attr:`name` and :attr:`value` of a Parameter.
+   return an ordered dictionary of name:value pairs with the
+   Paramater name as the key and Parameter value as value.
 
    This is distinct from the :class:`Parameters` itself, as the dictionary
    values are not :class:`Parameter` objects, just the :attr:`value`.
-   This can be a very convenient way to get updated values in a objective
-   function.
+   Using :method:`valuesdict` can be a very convenient way to get updated
+   values in a objective function.
 
 .. method:: dumps(**kws):
 
diff --git a/doc/sphinx/theme/lmfitdoc/layout.html b/doc/sphinx/theme/lmfitdoc/layout.html
index 0681d42..6a31b9d 100644
--- a/doc/sphinx/theme/lmfitdoc/layout.html
+++ b/doc/sphinx/theme/lmfitdoc/layout.html
@@ -33,33 +33,28 @@
 {% block relbar1 %}
 <div>
 <table border=0>
-  <tr><td></td><td width=75% padding=5 align=left>
+  <tr><td></td><td width=85% padding=5 align=left>
        <a href="index.html" style="color: #157"> <font size=+3>LMFIT</font></a>
-     </td><td></td>
-     <td width=8% align=left>
+     </td>
+     <td width=7% align=left>
          <a href="contents.html" style="color: #882222">
          <font size+=1>Contents</font></a> </td>
-     <td width=8% align=left>
+     <td width=7% align=left>
           <a href="installation.html" style="color: #882222">
           <font size+=1>Download</font></a></td>
-     <td width=8% align=left>
-        <a href="https://github.com/lmfit/lmfit-py/" style="color: #882222">
-         <font size+=1>Develop</font></a></td>
+     <td></td>
   </tr>
   <tr><td></td><td width=75% padding=5 align=left>
         <a href="index.html" style="color: #157"> <font size=+2>
 	Non-Linear Least-Squares Minimization and Curve-Fitting for Python</font></a>
-     </td><td></td>
-     <td width=8% align=left>
-         <a href="intro.html" style="color: #882222">
-         <font size+=1>Introduction</font></a> </td>
-     <td width=8% align=left>
-         <a href="parameters.html" style="color: #882222">
-         <font size+=1>Parameters</font></a> </td>
-     <td width=8% align=left>
-         <a href="model.html" style="color: #882222">
-         <font size+=1>Models</font></a> </td>
-
+     </td>
+     <td width=7% align=left>
+         <a href="faq.html" style="color: #882222">
+         <font size+=1>FAQ</font></a> </td>
+     <td width=7% align=left>
+        <a href="https://github.com/lmfit/lmfit-py/" style="color: #882222">
+         <font size+=1>Develop</font></a></td>
+     <td></td>
   </tr>
 </table>
 </div>
diff --git a/doc/support.rst b/doc/support.rst
index deda01f..4fe2c3e 100644
--- a/doc/support.rst
+++ b/doc/support.rst
@@ -1,3 +1,5 @@
+.. _support_chapter:
+
 ===========================
 Getting Help
 ===========================
diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst
new file mode 100644
index 0000000..6ce0abe
--- /dev/null
+++ b/doc/whatsnew.rst
@@ -0,0 +1,97 @@
+.. _whatsnew_chapter:
+
+=====================
+Release Notes
+=====================
+
+.. _lmfit github repository:   http://github.com/lmfit/lmfit-py
+
+This section discusses changes between versions, especially significant
+changes to the use and behavior of the library.  This is not meant to be a
+comprehensive list of changes.  For such a complete record, consult the
+`lmfit github repository`_.
+
+.. _whatsnew_090_label:
+
+Version 0.9.0 Release Notes
+==========================================
+
+This upgrade makes an important, non-backward-compatible change to the way
+many fitting scripts and programs will work.  Scripts that work with
+version 0.8.3 will not work with version 0.9.0 and vice versa.  The change
+was not made lightly or without ample discussion, and is really an
+improvement.  Modifying scripts that did work with 0.8.3 to work with 0.9.0
+is easy, but needs to be done.
+
+
+
+Summary
+~~~~~~~~~~~~
+
+The upgrade from 0.8.3 to 0.9.0 introduced the :class:`MinimizerResult`
+class (see :ref:`fit-results-label`) which is now used to hold the return
+value from :func:`minimize` and :meth:`Minimizer.minimize`.  This returned
+object contains many goodness of fit statistics, and holds the optimized
+parameters from the fit.  Importantly, the parameters passed into
+:func:`minimize` and :meth:`Minimizer.minimize` are no longer modified by
+the fit. Instead, a copy of the passed-in parameters is made which is
+changed and returns as the :attr:`params` attribute of the returned
+:class:`MinimizerResult`.
+
+
+Impact
+~~~~~~~~~~~~~
+
+This upgrade means that a script that does::
+
+    my_pars = Parameters()
+    my_pars.add('amp',    value=300.0, min=0)
+    my_pars.add('center', value=  5.0, min=0, max=10)
+    my_pars.add('decay',  value=  1.0, vary=False)
+
+    result = minimize(objfunc, my_pars)
+
+will still work, but that ``my_pars`` will **NOT** be changed by the fit.
+Instead, ``my_pars`` is copied to an internal set of parameters that is
+changed in the fit, and this copy is then put in ``result.params``.  To
+look at fit results, use ``result.params``, not ``my_pars``.
+
+This has the effect that ``my_pars`` will still hold the starting parameter
+values, while all of the results from the fit are held in the ``result``
+object returned by :func:`minimize`.
+
+If you want to do an initial fit, then refine that fit to, for example, do
+a pre-fit, then refine that result different fitting method, such as::
+
+    result1 = minimize(objfunc, my_pars, method='nelder')
+    result1.params['decay'].vary = True
+    result2 = minimize(objfunc, result1.params, method='leastsq')
+
+and have access to all of the starting parameters ``my_pars``, the result of the
+first fit ``result1``, and the result of the final fit ``result2``.
+
+
+
+Discussion
+~~~~~~~~~~~~~~
+
+The main goal for making this change were to
+
+   1. give a better return value to :func:`minimize` and
+      :meth:`Minimizer.minimize` that can hold all of the information
+      about a fit.  By having the return value be an instance of the
+      :class:`MinimizerResult` class, it can hold an arbitrary amount of
+      information that is easily accessed by attribute name, and even
+      be given methods.  Using objects is good!
+
+   2. To limit or even elimate the amount of "state information" a
+      :class:`Minimizer` holds.  By state information, we mean how much of
+      the previous fit is remembered after a fit is done.  Keeping (and
+      especially using) such information about a previous fit means that
+      a :class:`Minimizer` might give different results even for the same
+      problem if run a second time.  While it's desirable to be able to
+      adjust a set of :class:`Parameters` re-run a fit to get an improved
+      result, doing this by changing an *internal attribute
+      (:attr:`Minimizer.params`) has the undesirable side-effect of not
+      being able to "go back", and makes it somewhat cumbersome to keep
+      track of changes made while adjusting parameters and re-running fits.
diff --git a/tests/NISTModels.py b/examples/NISTModels.py
similarity index 99%
copy from tests/NISTModels.py
copy to examples/NISTModels.py
index 0e07a2d..197856f 100644
--- a/tests/NISTModels.py
+++ b/examples/NISTModels.py
@@ -3,7 +3,7 @@ import sys
 from numpy import exp, log, log10, sin, cos, arctan, array
 from lmfit import Parameters
 thisdir, thisfile = os.path.split(__file__)
-NIST_DIR = os.path.join(thisdir, 'NIST_STRD')
+NIST_DIR = os.path.join(thisdir, '..', 'NIST_STRD')
 
 def read_params(params):
     if isinstance(params, Parameters):
diff --git a/examples/confidence_interval.py b/examples/confidence_interval.py
index 8899b8e..9ece8e3 100644
--- a/examples/confidence_interval.py
+++ b/examples/confidence_interval.py
@@ -5,7 +5,7 @@ Created on Sun Apr 15 19:47:45 2012
 @author: Tillsten
 """
 import numpy as np
-from lmfit import Parameters, minimize, conf_interval, report_fit, report_ci
+from lmfit import Parameters, Minimizer, conf_interval, report_fit, report_ci
 
 from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
 from scipy.optimize import leastsq
@@ -50,20 +50,23 @@ fit_params.add('period', value=2)
 fit_params.add('shift', value=0.0)
 fit_params.add('decay', value=0.02)
 
-out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+mini = Minimizer(residual, fit_params, fcn_args=(x,),
+                 fcn_kws={'data':data})
+out = mini.leastsq()
 
-fit = residual(fit_params, x)
+fit = residual(out.params, x)
 
 print( ' N fev = ', out.nfev)
 print( out.chisqr, out.redchi, out.nfree)
 
-report_fit(fit_params)
+report_fit(out.params)
 #ci=calc_ci(out)
-ci, tr = conf_interval(out, trace=True)
+
+ci, tr = conf_interval(mini, out, trace=True)
 report_ci(ci)
     
 if HASPYLAB:
-    names=fit_params.keys()
+    names=out.params.keys()
     i=0  
     gs=pylab.GridSpec(4,4)
     sx={}
diff --git a/examples/confidence_interval2.py b/examples/confidence_interval2.py
index 4d2f2ab..62a1f3d 100644
--- a/examples/confidence_interval2.py
+++ b/examples/confidence_interval2.py
@@ -44,23 +44,25 @@ fit_params.add('decay', value=0.010)
 fit_params.add('amp2', value=-10.0)
 fit_params.add('decay2', value=0.050)
 
-out = minimize(residual, fit_params, args=(x,), kws={'data':data})
-out.leastsq()
-ci, trace = conf_interval(out, trace=True)
+mini = Minimizer(residual, fit_params,
+                 fcn_args=(x,), fcn_kws={'data':data})
 
+out = mini.leastsq()
 
-names=fit_params.keys()
+ci, trace = conf_interval(mini, out, trace=True)
+
+names = out.params.keys()
 
 if HASPYLAB:
     pylab.rcParams['font.size']=8
-    pylab.plot(x,data)
+    pylab.plot(x, data)
     pylab.figure()
     cm=pylab.cm.coolwarm
     for i in range(4):
         for j in range(4):
             pylab.subplot(4,4,16-j*4-i)
             if i!=j:
-                x,y,m = conf_interval2d(out,names[i],names[j],20,20)
+                x,y,m = conf_interval2d(mini, out, names[i],names[j],20,20)
                 pylab.contourf(x,y,m,np.linspace(0,1,10),cmap=cm)
                 pylab.xlabel(names[i])
                 pylab.ylabel(names[j])
diff --git a/examples/doc_basic.py b/examples/doc_basic.py
index af92a55..cb09cc6 100644
--- a/examples/doc_basic.py
+++ b/examples/doc_basic.py
@@ -34,7 +34,7 @@ result = minimize(fcn2min, params, args=(x, data))
 final = data + result.residual
 
 # write error report
-report_fit(params)
+report_fit(result.params)
 
 # try to plot results
 try:
diff --git a/examples/doc_basic_valuesdict.py b/examples/doc_basic_valuesdict.py
index 8073106..9e16858 100644
--- a/examples/doc_basic_valuesdict.py
+++ b/examples/doc_basic_valuesdict.py
@@ -31,7 +31,7 @@ result = minimize(fcn2min, params, args=(x, data))
 final = data + result.residual
 
 # write error report
-report_fit(params)
+report_fit(result.params)
 
 # try to plot results
 try:
diff --git a/examples/doc_confidence1.py b/examples/doc_confidence1.py
index 81e415e..e8cca87 100644
--- a/examples/doc_confidence1.py
+++ b/examples/doc_confidence1.py
@@ -15,10 +15,9 @@ def residual(p):
 
    return 1/(a*x)+b-y
 
-mi = lmfit.minimize(residual, p)
-lmfit.printfuncs.report_fit(mi.params)
+minimizer = lmfit.Minimizer(residual, p)
+out = minimizer.leastsq()
+lmfit.printfuncs.report_fit(out.params)
 
-ci = lmfit.conf_interval(mi)
+ci = lmfit.conf_interval(minimizer, out)
 lmfit.printfuncs.report_ci(ci)
-
-
diff --git a/examples/doc_confidence2.py b/examples/doc_confidence2.py
index 489df88..38240d3 100644
--- a/examples/doc_confidence2.py
+++ b/examples/doc_confidence2.py
@@ -1,11 +1,10 @@
 import lmfit
 import numpy as np
 import matplotlib
-matplotlib.use('WXAgg')
+# matplotlib.use('WXAgg')
 
 import matplotlib.pyplot as plt
 
-
 x = np.linspace(1, 10, 250)
 np.random.seed(0)
 y = 3.0*np.exp(-x/2) -5.0*np.exp(-(x-0.1)/10.) + 0.1*np.random.randn(len(x))
@@ -17,36 +16,42 @@ def residual(p):
    v = p.valuesdict()
    return v['a1']*np.exp(-x/v['t1']) + v['a2']*np.exp(-(x-0.1)/v['t2'])-y
 
+# create Minimizer
+mini = lmfit.Minimizer(residual, p)
+
 # first solve with Nelder-Mead
-mi = lmfit.minimize(residual, p, method='Nelder')
+out1 = mini.minimize(method='Nelder')
 
-mi = lmfit.minimize(residual, p)
+# then solve with Levenberg-Marquardt using the
+# Nelder-Mead solution as a starting point
+out2 = mini.minimize(method='leastsq', params=out1.params)
 
-lmfit.printfuncs.report_fit(mi.params, min_correl=0.5)
+lmfit.report_fit(out2.params, min_correl=0.5)
 
-ci, trace = lmfit.conf_interval(mi, sigmas=[0.68,0.95], trace=True, verbose=False)
+ci, trace = lmfit.conf_interval(mini, out2, sigmas=[0.68,0.95],
+                                trace=True, verbose=False)
 lmfit.printfuncs.report_ci(ci)
 
-plot_type = 3
+plot_type = 2
 if plot_type == 0:
     plt.plot(x, y)
-    plt.plot(x, residual(p)+y )
+    plt.plot(x, residual(out2.params)+y )
 
 elif plot_type == 1:
-    cx, cy, grid = lmfit.conf_interval2d(mi,'a2','t2',30,30)
+    cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a2','t2',30,30)
     plt.contourf(cx, cy, grid, np.linspace(0,1,11))
     plt.xlabel('a2')
     plt.colorbar()
     plt.ylabel('t2')
 
 elif plot_type == 2:
-    cx, cy, grid = lmfit.conf_interval2d(mi,'a1','t2',30,30)
+    cx, cy, grid = lmfit.conf_interval2d(mini, out2, 'a1','t2',30,30)
     plt.contourf(cx, cy, grid, np.linspace(0,1,11))
     plt.xlabel('a1')
     plt.colorbar()
     plt.ylabel('t2')
 
-    
+
 elif plot_type == 3:
     cx1, cy1, prob = trace['a1']['a1'], trace['a1']['t2'],trace['a1']['prob']
     cx2, cy2, prob2 = trace['t2']['t2'], trace['t2']['a1'],trace['t2']['prob']
diff --git a/examples/doc_model1.py b/examples/doc_model1.py
index ced7da3..6561f1a 100644
--- a/examples/doc_model1.py
+++ b/examples/doc_model1.py
@@ -1,25 +1,25 @@
-#!/usr/bin/env python
-#<examples/doc_model1.py>
-from numpy import sqrt, pi, exp, linspace, loadtxt
-from lmfit import  Model
-
-import matplotlib.pyplot as plt
-
-data = loadtxt('model1d_gauss.dat')
-x = data[:, 0]
-y = data[:, 1]
-
-def gaussian(x, amp, cen, wid):
-    "1-d gaussian: gaussian(x, amp, cen, wid)"
-    return (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
-
-gmod = Model(gaussian)
-result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
-
-print(result.fit_report())
-
-plt.plot(x, y,         'bo')
-plt.plot(x, result.init_fit, 'k--')
-plt.plot(x, result.best_fit, 'r-')
-plt.show()
-#<end examples/doc_model1.py>
+#!/usr/bin/env python
+#<examples/doc_model1.py>
+from numpy import sqrt, pi, exp, linspace, loadtxt
+from lmfit import  Model
+
+import matplotlib.pyplot as plt
+
+data = loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+def gaussian(x, amp, cen, wid):
+    "1-d gaussian: gaussian(x, amp, cen, wid)"
+    return (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+
+gmod = Model(gaussian)
+result = gmod.fit(y, x=x, amp=5, cen=5, wid=1)
+
+print(result.fit_report())
+
+plt.plot(x, y,         'bo')
+plt.plot(x, result.init_fit, 'k--')
+plt.plot(x, result.best_fit, 'r-')
+plt.show()
+#<end examples/doc_model1.py>
diff --git a/examples/doc_model2.py b/examples/doc_model2.py
index afb41c0..d19861c 100644
--- a/examples/doc_model2.py
+++ b/examples/doc_model2.py
@@ -1,31 +1,31 @@
-#!/usr/bin/env python
-#<examples/model_doc2.py>
-from numpy import sqrt, pi, exp, linspace, loadtxt
-from lmfit import Model
-
-import matplotlib.pyplot as plt
-
-data = loadtxt('model1d_gauss.dat')
-x = data[:, 0]
-y = data[:, 1] + 0.25*x - 1.0
-
-def gaussian(x, amp, cen, wid):
-    "1-d gaussian: gaussian(x, amp, cen, wid)"
-    return (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
-
-def line(x, slope, intercept):
-    "line"
-    return slope * x + intercept
-
-mod = Model(gaussian) + Model(line)
-pars  = mod.make_params( amp=5, cen=5, wid=1, slope=0, intercept=1)
-
-result = mod.fit(y, pars, x=x)
-
-print(result.fit_report())
-
-plt.plot(x, y,         'bo')
-plt.plot(x, result.init_fit, 'k--')
-plt.plot(x, result.best_fit, 'r-')
-plt.show()
-#<end examples/model_doc2.py>
+#!/usr/bin/env python
+#<examples/model_doc2.py>
+from numpy import sqrt, pi, exp, linspace, loadtxt
+from lmfit import Model
+
+import matplotlib.pyplot as plt
+
+data = loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1] + 0.25*x - 1.0
+
+def gaussian(x, amp, cen, wid):
+    "1-d gaussian: gaussian(x, amp, cen, wid)"
+    return (amp/(sqrt(2*pi)*wid)) * exp(-(x-cen)**2 /(2*wid**2))
+
+def line(x, slope, intercept):
+    "line"
+    return slope * x + intercept
+
+mod = Model(gaussian) + Model(line)
+pars  = mod.make_params( amp=5, cen=5, wid=1, slope=0, intercept=1)
+
+result = mod.fit(y, pars, x=x)
+
+print(result.fit_report())
+
+plt.plot(x, y,         'bo')
+plt.plot(x, result.init_fit, 'k--')
+plt.plot(x, result.best_fit, 'r-')
+plt.show()
+#<end examples/model_doc2.py>
diff --git a/examples/doc_withreport.py b/examples/doc_withreport.py
index 6163719..a2f30ee 100644
--- a/examples/doc_withreport.py
+++ b/examples/doc_withreport.py
@@ -18,7 +18,7 @@ def residual(pars, x, data=None):
     per =  vals['period']
     shift = vals['shift']
     decay = vals['decay']
-    
+
     if abs(shift) > pi/2:
         shift = shift - sign(shift)*pi
     model = amp * sin(shift + x/per) * exp(-x*x*decay*decay)
@@ -44,7 +44,7 @@ fit_params.add('decay', value=0.02)
 
 out = minimize(residual, fit_params, args=(x,), kws={'data':data})
 
-fit = residual(fit_params, x)
-print(fit_report(fit_params))
+print(fit_report(out))
+
 
 #<end of examples/doc_withreport.py>
diff --git a/examples/example_covar.py b/examples/example_covar.py
index 4eea1ea..32cfc03 100644
--- a/examples/example_covar.py
+++ b/examples/example_covar.py
@@ -73,12 +73,12 @@ for scale_covar in (True, False):
         p_fit['line_slope'].value =0.0
         p_fit['line_off'].value   =0.0
 
-        myfit.leastsq()
+        out = myfit.leastsq()
         print '  sigma          = ', sigma
-        print '  chisqr         = ', myfit.chisqr
-        print '  reduced_chisqr = ', myfit.redchi
+        print '  chisqr         = ', out.chisqr
+        print '  reduced_chisqr = ', out.redchi
 
-        report_fit(p_fit, modelpars=p_true, show_correl=False)
+        report_fit(out.params, modelpars=p_true, show_correl=False)
         print '  =============================='
 
 
diff --git a/examples/example_derivfunc.py b/examples/example_derivfunc.py
index 32b87e7..555e90e 100644
--- a/examples/example_derivfunc.py
+++ b/examples/example_derivfunc.py
@@ -46,13 +46,13 @@ data = y + 0.15*np.random.normal(size=len(x))
 
 # fit without analytic derivative
 min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data':data})
-min1.leastsq()
-fit1 = func(params1, x)
+out1 = min1.leastsq()
+fit1 = func(out1.params, x)
 
 # fit with analytic derivative
 min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data':data})
-min2.leastsq(Dfun=dfunc, col_deriv=1)
-fit2 = func(params2, x)
+out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
+fit2 = func(out2.params, x)
 
 print '''Comparison of fit to exponential decay
 with and without analytic derivatives, to
@@ -68,11 +68,11 @@ Chi-square         |   %.4f    |   %.4f  |
    c               |   %.4f    |   %.4f  |
 ----------------------------------------------
 ''' %  (a, b, c,
-        min1.nfev,   min2.nfev,
-        min1.chisqr, min2.chisqr,
-        params1['a'].value, params2['a'].value,
-        params1['b'].value, params2['b'].value,
-        params1['c'].value, params2['c'].value )
+        out1.nfev,   out2.nfev,
+        out1.chisqr, out2.chisqr,
+        out1.params['a'].value, out2.params['a'].value,
+        out1.params['b'].value, out2.params['b'].value,
+        out1.params['c'].value, out2.params['c'].value )
 
 
 if HASPYLAB:
diff --git a/examples/example_lbfgsb.py b/examples/example_lbfgsb.py
index 772dea1..02bf43f 100644
--- a/examples/example_lbfgsb.py
+++ b/examples/example_lbfgsb.py
@@ -44,11 +44,11 @@ init = residual(fit_params, x)
 
 out = minimize(residual, fit_params, method='lbfgsb', args=(x,), kws={'data':data})
 
-fit = residual(fit_params, x)
+fit = residual(out.params, x)
 
-for name, par in fit_params.items():
+for name, par in out.params.items():
     nout = "%s:%s" % (name, ' '*(20-len(name)))
-    print "%s: %s (%s) " % (nout, par.value, p_true[name].value)
+    print "%s: %s (true=%s) " % (nout, par.value, p_true[name].value)
 
 #print out.chisqr, out.redchi, out.nfree
 #
@@ -59,8 +59,3 @@ if HASPYLAB:
     pylab.plot(x, init, 'k')
     pylab.plot(x, fit, 'b')
     pylab.show()
-
-
-
-
-
diff --git a/examples/fit_NIST_leastsq.py b/examples/fit_NIST_leastsq.py
deleted file mode 100644
index 525f562..0000000
--- a/examples/fit_NIST_leastsq.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# from __future__ import print_function
-import sys
-import math
-
-try:
-    import matplotlib
-    matplotlib.use('WXAgg')
-    import pylab
-    HASPYLAB = True
-except ImportError:
-    HASPYLAB = False
-
-from scipy.optimize import leastsq, curve_fit
-
-from NISTModels import Models, ReadNistData
-
-
-def ndig(a, b):
-    return int(0.5-math.log10(abs(abs(a)-abs(b))/abs(b)))
-
-def Compare_NIST_Results(DataSet, params, NISTdata):
-    print(' ======================================')
-    print(' %s: ' % DataSet)
-    print(' | Parameter Name |  Value Found   |  Certified Value | # Matching Digits |')
-    print( ' |----------------+----------------+------------------+-------------------|')
-
-    val_dig_min = 1000
-    err_dig_min = 1000
-    for i in range(NISTdata['nparams']):
-        parname = 'b%i' % (i+1)
-        par = params[parname]
-        thisval = par.value
-        certval = NISTdata['cert_values'][i]
-
-        thiserr = par.stderr
-        certerr = NISTdata['cert_stderr'][i]
-        vdig   = ndig(thisval, certval)
-        edig   = ndig(thiserr, certerr)
-
-        pname = (parname + ' value ' + ' '*14)[:14]
-        ename = (parname + ' stderr' + ' '*14)[:14]
-        print(' | %s | % -.7e | % -.7e   | %2i                |' % (pname, thisval, certval, vdig))
-        print(' | %s | % -.7e | % -.7e   | %2i                |' % (ename, thiserr, certerr, edig))
-
-        val_dig_min = min(val_dig_min, vdig)
-        err_dig_min = min(err_dig_min, edig)
-
-    print(' |----------------+----------------+------------------+-------------------|')
-    sumsq = NISTdata['sum_squares']
-    chi2 = 'xx' # myfit.chisqr
-    print(' | Sum of Squares | %.7e  | %.7e    |  %2i               |' % (chi2, sumsq,
-                                                                          ndig(chi2, sumsq)))
-    print(' |----------------+----------------+------------------+-------------------|')
-
-    print(' Worst agreement: %i digits for value, %i digits for error ' % (val_dig_min, err_dig_min))
-
-    return val_dig_min
-
-def NIST_Test(DataSet, start='start2', plot=True):
-
-    NISTdata = ReadNistData(DataSet)
-    resid, npar, dimx = Models[DataSet]
-    y = NISTdata['y']
-    x = NISTdata['x']
-
-    params = []
-    param_names = []
-    for i in range(npar):
-        pname = 'b%i' % (i+1)
-        cval  = NISTdata['cert_values'][i]
-        cerr  = NISTdata['cert_stderr'][i]
-        pval1 = NISTdata[start][i]
-        params.append(pval1)
-        param_names.append(pname)
-
-
-        #     myfit = Minimizer(resid, params, fcn_args=(x,), fcn_kws={'y':y},
-        #                       scale_covar=True)
-        #
-    print 'lsout ', params
-    lsout = leastsq(resid, params, args=(x, y), full_output=True)
-
-    print 'lsout ', lsout
-    print params   , len(x), len(y)
-
-    digs = Compare_NIST_Results(DataSet,  params, NISTdata)
-
-    if plot and HASPYLAB:
-        fit = -resid(params, x, )
-        pylab.plot(x, y, 'r+-')
-        pylab.plot(x, fit, 'ko--')
-        pylab.show()
-
-    return digs > 2
-
-msg1 = """
------ NIST StRD Models -----
-Select one of the Models listed below:
-and a starting point of 'start1' or 'start2'
-"""
-
-msg2 = """
-That is, use
-    python fit_NIST.py Bennet5 start1
-or go through all models and starting points with:
-    python fit_NIST.py all
-"""
-
-if __name__  == '__main__':
-    dset = 'Bennett5'
-    start = 'start2'
-    if len(sys.argv) < 2:
-        print(msg1)
-        out = ''
-        for d in sorted(Models.keys()):
-            out = out + ' %s ' % d
-            if len(out) > 55:
-                print( out)
-                out = ''
-        print(out)
-        print(msg2)
-
-        sys.exit()
-
-    if len(sys.argv) > 1:
-        dset = sys.argv[1]
-    if len(sys.argv) > 2:
-        start = sys.argv[2]
-    if dset.lower() == 'all':
-        tpass = 0
-        tfail = 0
-        failures = []
-        dsets = sorted(Models.keys())
-        for dset in dsets:
-            for start in ('start1', 'start2'):
-                if NIST_Test(dset, start=start, plot=False):
-                    tpass += 1
-                else:
-                    tfail += 1
-                    failures.append("   %s (starting at '%s')" % (dset, start))
-
-        print('--------------------------------------')
-        print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
-        print(' Tests Failed for:\n %s' % '\n '.join(failures))
-        print('--------------------------------------')
-    else:
-        NIST_Test(dset, start=start, plot=True)
-
diff --git a/examples/test_NIST_Strd.py b/examples/fit_NIST_lmfit.py
similarity index 96%
rename from examples/test_NIST_Strd.py
rename to examples/fit_NIST_lmfit.py
index d053bbf..d05dc5e 100644
--- a/examples/test_NIST_Strd.py
+++ b/examples/fit_NIST_lmfit.py
@@ -9,29 +9,25 @@ try:
     matplotlib.use('WXAgg')
     import pylab
     HASPYLAB = True
-
 except ImportError:
     HASPYLAB = False
 
-if 'nose' in arg:
-    HASPYLAB = False
 
 from lmfit import Parameters, minimize
 
 from NISTModels import Models, ReadNistData
 
-
 def ndig(a, b):
     "precision for NIST values"
     return round(-math.log10((abs(abs(a)-abs(b)) +1.e-15)/ abs(b)))
 
 
-def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
+def Compare_NIST_Results(DataSet, myfit, NISTdata):
     print(' ======================================')
     print(' %s: ' % DataSet)
     print(' | Parameter Name |  Value Found   |  Certified Value | # Matching Digits |')
     print(' |----------------+----------------+------------------+-------------------|')
-
+    params = myfit.params
     val_dig_min = 200
     err_dig_min = 200
     for i in range(NISTdata['nparams']):
@@ -87,10 +83,10 @@ def NIST_Test(DataSet, method='leastsq', start='start2', plot=True):
 
 
     myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y})
-    digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata)
+    digs = Compare_NIST_Results(DataSet, myfit, NISTdata)
 
     if plot and HASPYLAB:
-        fit = -resid(params, x, )
+        fit = -resid(myfit.params, x)
         pylab.plot(x, y, 'ro')
         pylab.plot(x, fit, 'k+-')
         pylab.show()
@@ -166,4 +162,3 @@ elif dset not in Models:
     print(usage)
 else:
     NIST_Test(dset, method=opts.method, start=start, plot=True)
-
diff --git a/examples/fit_multi_datasets.py b/examples/fit_multi_datasets.py
index 13b22b9..18aea2d 100644
--- a/examples/fit_multi_datasets.py
+++ b/examples/fit_multi_datasets.py
@@ -56,13 +56,13 @@ for iy in (2, 3, 4, 5):
     fit_params['sig_%i' % iy].expr='sig_1'
 
 # run the global fit to all the data sets
-minimize(objective, fit_params, args=(x, data))
-report_fit(fit_params)
+out = minimize(objective, fit_params, args=(x, data))
+report_fit(out.params)
 
 # plot the data sets and fits
 plt.figure()
 for i in range(5):
-    y_fit = gauss_dataset(fit_params, i, x)
+    y_fit = gauss_dataset(out.params, i, x)
     plt.plot(x, data[i, :], 'o', x, y_fit, '-')
 
 plt.show()
diff --git a/examples/fit_with_algebraic_constraint.py b/examples/fit_with_algebraic_constraint.py
index 5e6447f..7ec02bb 100644
--- a/examples/fit_with_algebraic_constraint.py
+++ b/examples/fit_with_algebraic_constraint.py
@@ -32,6 +32,7 @@ def residual(pars, x, sigma=None, data=None):
 n = 601
 xmin = 0.
 xmax = 20.0
+random.seed(0)
 x = linspace(xmin, xmax, n)
 
 data = (gaussian(x, 21, 8.1, 1.2) +
@@ -79,7 +80,3 @@ fit = residual(myfit.params, x)
 if HASPYLAB:
     pylab.plot(x, fit, 'k-')
     pylab.show()
-
-
-
-
diff --git a/examples/fit_with_bounds.py b/examples/fit_with_bounds.py
index 2e46e25..9408e1b 100644
--- a/examples/fit_with_bounds.py
+++ b/examples/fit_with_bounds.py
@@ -45,12 +45,12 @@ fit_params.add('decay', value=0.02, max=0.10, min=0.00)
 
 out = minimize(residual, fit_params, args=(x,), kws={'data':data})
 
-fit = residual(fit_params, x)
+fit = residual(out.params, x)
 
 print '# N_func_evals, N_free = ', out.nfev, out.nfree
 print '# chi-square, reduced chi-square = % .7g, % .7g' % (out.chisqr, out.redchi)
 
-report_fit(fit_params, show_correl=True, modelpars=p_true)
+report_fit(out.params, show_correl=True, modelpars=p_true)
 
 print 'Raw (unordered, unscaled) Covariance Matrix:'
 print out.covar
diff --git a/examples/fit_with_inequality.py b/examples/fit_with_inequality.py
new file mode 100644
index 0000000..67f579f
--- /dev/null
+++ b/examples/fit_with_inequality.py
@@ -0,0 +1,45 @@
+from numpy import linspace, random
+import matplotlib.pyplot as plt
+
+from lmfit import Parameters, Parameter, Minimizer, report_fit
+from lmfit.lineshapes import gaussian, lorentzian
+
+
+def residual(pars, x, data):
+    model =  gaussian(x,
+                      pars['amp_g'].value,
+                      pars['cen_g'].value,
+                      pars['wid_g'].value)
+    model += lorentzian(x,
+                        pars['amp_l'].value,
+                        pars['cen_l'].value,
+                        pars['wid_l'].value)
+    return (model - data)
+
+
+n = 601
+random.seed(0)
+x = linspace(0, 20.0, n)
+
+data = (gaussian(x,   21, 6.1, 1.2) +
+        lorentzian(x, 10, 9.6, 1.3) +
+        random.normal(scale=0.1,  size=n))
+
+pfit = Parameters()
+pfit.add(name='amp_g',  value=10)
+pfit.add(name='amp_l',  value=10)
+pfit.add(name='cen_g',  value=5)
+pfit.add(name='peak_split',  value=2.5, min=0, max=5, vary=True)
+pfit.add(name='cen_l',  expr='peak_split+cen_g')
+pfit.add(name='wid_g',  value=1)
+pfit.add(name='wid_l',  expr='wid_g')
+
+mini = Minimizer(residual, pfit, fcn_args=(x, data))
+out  = mini.leastsq()
+
+report_fit(out.params)
+
+best_fit = data + out.residual
+plt.plot(x, data, 'bo')
+plt.plot(x, best_fit, 'r--')
+plt.show()
diff --git a/examples/lmfit_and_emcee.py b/examples/lmfit_and_emcee.py
index 0d6d163..eb232c2 100644
--- a/examples/lmfit_and_emcee.py
+++ b/examples/lmfit_and_emcee.py
@@ -47,7 +47,7 @@ def starting_guess(mini, estimate_sigma=True):
     all points have the same uncertainty which will
     be also part of the sampled parameters.
     """
-    vals = [i.value for i in params.values() if i.vary]
+    vals = [i.value for i in mini.params.values() if i.vary]
     if estimate_sigma:
         vals.append(mini.residual.std())
     return vals
@@ -61,7 +61,7 @@ def create_all(mini, sigma=None):
     is added to the sampled parameters.
     """
     sigma_given = not sigma is None
-    lnprior = create_prior(params)
+    lnprior = create_prior(mini.params)
     lnprob = create_lnliklihood(mini, sigma=sigma)
     guess = starting_guess(mini, not sigma_given)
     if sigma_given:
@@ -121,4 +121,4 @@ if HASPYLAB:
         triangle.corner(samples, labels=params.keys()+['sigma'],
                       truths=[a, b, t1, t2, sigma])
     except ImportError:
-        print("Please install triangle_plot for a nice overview graphic")
\ No newline at end of file
+        print("Please install triangle_plot for a nice overview graphic")
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
index b325000..30d0d6b 100644
--- a/lmfit/__init__.py
+++ b/lmfit/__init__.py
@@ -46,7 +46,6 @@ from . import models
 from . import uncertainties
 from .uncertainties import ufloat, correlated_values
 
-from .ui import Fitter
 
 ## versioneer code
 from ._version import get_versions
diff --git a/lmfit/_version.py b/lmfit/_version.py
index ceec228..1a8da90 100644
--- a/lmfit/_version.py
+++ b/lmfit/_version.py
@@ -1,183 +1,183 @@
-
-# This file helps to compute a version number in source trees obtained from
-# git-archive tarball (such as those provided by githubs download-from-tag
-# feature). Distribution tarballs (built by setup.py sdist) and build
-# directories (produced by setup.py build) will contain a much shorter file
-# that just contains the computed version number.
-
-# This file is released into the public domain. Generated by
-# versioneer-0.12 (https://github.com/warner/python-versioneer)
-
-# these strings will be replaced by git during git-archive
-git_refnames = " (tag: 0.8.3)"
-git_full = "3d12eecc94cb6bbaf36e736082355c3006942608"
-
-# these strings are filled in when 'setup.py versioneer' creates _version.py
-tag_prefix = ""
-parentdir_prefix = "lmfit-"
-versionfile_source = "lmfit/_version.py"
-
-import os, sys, re, subprocess, errno
-
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
-    assert isinstance(commands, list)
-    p = None
-    for c in commands:
-        try:
-            # remember shell=False, so use git.cmd on windows, not just git
-            p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
-                                 stderr=(subprocess.PIPE if hide_stderr
-                                         else None))
-            break
-        except EnvironmentError:
-            e = sys.exc_info()[1]
-            if e.errno == errno.ENOENT:
-                continue
-            if verbose:
-                print("unable to run %s" % args[0])
-                print(e)
-            return None
-    else:
-        if verbose:
-            print("unable to find command, tried %s" % (commands,))
-        return None
-    stdout = p.communicate()[0].strip()
-    if sys.version >= '3':
-        stdout = stdout.decode()
-    if p.returncode != 0:
-        if verbose:
-            print("unable to run %s (error)" % args[0])
-        return None
-    return stdout
-
-
-def versions_from_parentdir(parentdir_prefix, root, verbose=False):
-    # Source tarballs conventionally unpack into a directory that includes
-    # both the project name and a version string.
-    dirname = os.path.basename(root)
-    if not dirname.startswith(parentdir_prefix):
-        if verbose:
-            print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
-                  (root, dirname, parentdir_prefix))
-        return None
-    return {"version": dirname[len(parentdir_prefix):], "full": ""}
-
-def git_get_keywords(versionfile_abs):
-    # the code embedded in _version.py can just fetch the value of these
-    # keywords. When used from setup.py, we don't want to import _version.py,
-    # so we do it with a regexp instead. This function is not used from
-    # _version.py.
-    keywords = {}
-    try:
-        f = open(versionfile_abs,"r")
-        for line in f.readlines():
-            if line.strip().startswith("git_refnames ="):
-                mo = re.search(r'=\s*"(.*)"', line)
-                if mo:
-                    keywords["refnames"] = mo.group(1)
-            if line.strip().startswith("git_full ="):
-                mo = re.search(r'=\s*"(.*)"', line)
-                if mo:
-                    keywords["full"] = mo.group(1)
-        f.close()
-    except EnvironmentError:
-        pass
-    return keywords
-
-def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
-    if not keywords:
-        return {} # keyword-finding function failed to find keywords
-    refnames = keywords["refnames"].strip()
-    if refnames.startswith("$Format"):
-        if verbose:
-            print("keywords are unexpanded, not using")
-        return {} # unexpanded, so not in an unpacked git-archive tarball
-    refs = set([r.strip() for r in refnames.strip("()").split(",")])
-    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
-    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
-    TAG = "tag: "
-    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
-    if not tags:
-        # Either we're using git < 1.8.3, or there really are no tags. We use
-        # a heuristic: assume all version tags have a digit. The old git %d
-        # expansion behaves like git log --decorate=short and strips out the
-        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
-        # between branches and tags. By ignoring refnames without digits, we
-        # filter out many common branch names like "release" and
-        # "stabilization", as well as "HEAD" and "master".
-        tags = set([r for r in refs if re.search(r'\d', r)])
-        if verbose:
-            print("discarding '%s', no digits" % ",".join(refs-tags))
-    if verbose:
-        print("likely tags: %s" % ",".join(sorted(tags)))
-    for ref in sorted(tags):
-        # sorting will prefer e.g. "2.0" over "2.0rc1"
-        if ref.startswith(tag_prefix):
-            r = ref[len(tag_prefix):]
-            if verbose:
-                print("picking %s" % r)
-            return { "version": r,
-                     "full": keywords["full"].strip() }
-    # no suitable tags, so we use the full revision id
-    if verbose:
-        print("no suitable tags, using full revision id")
-    return { "version": keywords["full"].strip(),
-             "full": keywords["full"].strip() }
-
-
-def git_versions_from_vcs(tag_prefix, root, verbose=False):
-    # this runs 'git' from the root of the source tree. This only gets called
-    # if the git-archive 'subst' keywords were *not* expanded, and
-    # _version.py hasn't already been rewritten with a short version string,
-    # meaning we're inside a checked out source tree.
-
-    if not os.path.exists(os.path.join(root, ".git")):
-        if verbose:
-            print("no .git in %s" % root)
-        return {}
-
-    GITS = ["git"]
-    if sys.platform == "win32":
-        GITS = ["git.cmd", "git.exe"]
-    stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
-                         cwd=root)
-    if stdout is None:
-        return {}
-    if not stdout.startswith(tag_prefix):
-        if verbose:
-            print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
-        return {}
-    tag = stdout[len(tag_prefix):]
-    stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
-    if stdout is None:
-        return {}
-    full = stdout.strip()
-    if tag.endswith("-dirty"):
-        full += "-dirty"
-    return {"version": tag, "full": full}
-
-
-def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
-    # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
-    # __file__, we can work backwards from there to the root. Some
-    # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
-    # case we can only use expanded keywords.
-
-    keywords = { "refnames": git_refnames, "full": git_full }
-    ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
-    if ver:
-        return ver
-
-    try:
-        root = os.path.abspath(__file__)
-        # versionfile_source is the relative path from the top of the source
-        # tree (where the .git directory might live) to this file. Invert
-        # this to find the root from __file__.
-        for i in range(len(versionfile_source.split(os.sep))):
-            root = os.path.dirname(root)
-    except NameError:
-        return default
-
-    return (git_versions_from_vcs(tag_prefix, root, verbose)
-            or versions_from_parentdir(parentdir_prefix, root, verbose)
-            or default)
+
+# This file helps to compute a version number in source trees obtained from
+# git-archive tarball (such as those provided by githubs download-from-tag
+# feature). Distribution tarballs (built by setup.py sdist) and build
+# directories (produced by setup.py build) will contain a much shorter file
+# that just contains the computed version number.
+
+# This file is released into the public domain. Generated by
+# versioneer-0.12 (https://github.com/warner/python-versioneer)
+
+# these strings will be replaced by git during git-archive
+git_refnames = " (tag: 0.9.1)"
+git_full = "db4f4e16b281eaa394f2ab24ab419b581557acd4"
+
+# these strings are filled in when 'setup.py versioneer' creates _version.py
+tag_prefix = ""
+parentdir_prefix = "lmfit-"
+versionfile_source = "lmfit/_version.py"
+
+import os, sys, re, subprocess, errno
+
+def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
+    assert isinstance(commands, list)
+    p = None
+    for c in commands:
+        try:
+            # remember shell=False, so use git.cmd on windows, not just git
+            p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
+                                 stderr=(subprocess.PIPE if hide_stderr
+                                         else None))
+            break
+        except EnvironmentError:
+            e = sys.exc_info()[1]
+            if e.errno == errno.ENOENT:
+                continue
+            if verbose:
+                print("unable to run %s" % args[0])
+                print(e)
+            return None
+    else:
+        if verbose:
+            print("unable to find command, tried %s" % (commands,))
+        return None
+    stdout = p.communicate()[0].strip()
+    if sys.version >= '3':
+        stdout = stdout.decode()
+    if p.returncode != 0:
+        if verbose:
+            print("unable to run %s (error)" % args[0])
+        return None
+    return stdout
+
+
+def versions_from_parentdir(parentdir_prefix, root, verbose=False):
+    # Source tarballs conventionally unpack into a directory that includes
+    # both the project name and a version string.
+    dirname = os.path.basename(root)
+    if not dirname.startswith(parentdir_prefix):
+        if verbose:
+            print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
+                  (root, dirname, parentdir_prefix))
+        return None
+    return {"version": dirname[len(parentdir_prefix):], "full": ""}
+
+def git_get_keywords(versionfile_abs):
+    # the code embedded in _version.py can just fetch the value of these
+    # keywords. When used from setup.py, we don't want to import _version.py,
+    # so we do it with a regexp instead. This function is not used from
+    # _version.py.
+    keywords = {}
+    try:
+        f = open(versionfile_abs,"r")
+        for line in f.readlines():
+            if line.strip().startswith("git_refnames ="):
+                mo = re.search(r'=\s*"(.*)"', line)
+                if mo:
+                    keywords["refnames"] = mo.group(1)
+            if line.strip().startswith("git_full ="):
+                mo = re.search(r'=\s*"(.*)"', line)
+                if mo:
+                    keywords["full"] = mo.group(1)
+        f.close()
+    except EnvironmentError:
+        pass
+    return keywords
+
+def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
+    if not keywords:
+        return {} # keyword-finding function failed to find keywords
+    refnames = keywords["refnames"].strip()
+    if refnames.startswith("$Format"):
+        if verbose:
+            print("keywords are unexpanded, not using")
+        return {} # unexpanded, so not in an unpacked git-archive tarball
+    refs = set([r.strip() for r in refnames.strip("()").split(",")])
+    # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
+    # just "foo-1.0". If we see a "tag: " prefix, prefer those.
+    TAG = "tag: "
+    tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
+    if not tags:
+        # Either we're using git < 1.8.3, or there really are no tags. We use
+        # a heuristic: assume all version tags have a digit. The old git %d
+        # expansion behaves like git log --decorate=short and strips out the
+        # refs/heads/ and refs/tags/ prefixes that would let us distinguish
+        # between branches and tags. By ignoring refnames without digits, we
+        # filter out many common branch names like "release" and
+        # "stabilization", as well as "HEAD" and "master".
+        tags = set([r for r in refs if re.search(r'\d', r)])
+        if verbose:
+            print("discarding '%s', no digits" % ",".join(refs-tags))
+    if verbose:
+        print("likely tags: %s" % ",".join(sorted(tags)))
+    for ref in sorted(tags):
+        # sorting will prefer e.g. "2.0" over "2.0rc1"
+        if ref.startswith(tag_prefix):
+            r = ref[len(tag_prefix):]
+            if verbose:
+                print("picking %s" % r)
+            return { "version": r,
+                     "full": keywords["full"].strip() }
+    # no suitable tags, so we use the full revision id
+    if verbose:
+        print("no suitable tags, using full revision id")
+    return { "version": keywords["full"].strip(),
+             "full": keywords["full"].strip() }
+
+
+def git_versions_from_vcs(tag_prefix, root, verbose=False):
+    # this runs 'git' from the root of the source tree. This only gets called
+    # if the git-archive 'subst' keywords were *not* expanded, and
+    # _version.py hasn't already been rewritten with a short version string,
+    # meaning we're inside a checked out source tree.
+
+    if not os.path.exists(os.path.join(root, ".git")):
+        if verbose:
+            print("no .git in %s" % root)
+        return {}
+
+    GITS = ["git"]
+    if sys.platform == "win32":
+        GITS = ["git.cmd", "git.exe"]
+    stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
+                         cwd=root)
+    if stdout is None:
+        return {}
+    if not stdout.startswith(tag_prefix):
+        if verbose:
+            print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
+        return {}
+    tag = stdout[len(tag_prefix):]
+    stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
+    if stdout is None:
+        return {}
+    full = stdout.strip()
+    if tag.endswith("-dirty"):
+        full += "-dirty"
+    return {"version": tag, "full": full}
+
+
+def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
+    # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
+    # __file__, we can work backwards from there to the root. Some
+    # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
+    # case we can only use expanded keywords.
+
+    keywords = { "refnames": git_refnames, "full": git_full }
+    ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
+    if ver:
+        return ver
+
+    try:
+        root = os.path.abspath(__file__)
+        # versionfile_source is the relative path from the top of the source
+        # tree (where the .git directory might live) to this file. Invert
+        # this to find the root from __file__.
+        for i in range(len(versionfile_source.split(os.sep))):
+            root = os.path.dirname(root)
+    except NameError:
+        return default
+
+    return (git_versions_from_vcs(tag_prefix, root, verbose)
+            or versions_from_parentdir(parentdir_prefix, root, verbose)
+            or default)
diff --git a/lmfit/asteval.py b/lmfit/asteval.py
index cd6ca7f..70fa16c 100644
--- a/lmfit/asteval.py
+++ b/lmfit/asteval.py
@@ -110,6 +110,11 @@ class Interpreter:
         self.node_handlers['tryexcept'] = self.node_handlers['try']
         self.node_handlers['tryfinally'] = self.node_handlers['try']
 
+        self.no_deepcopy = []
+        for key, val in symtable.items():
+            if (callable(val) or 'numpy.lib.index_tricks' in repr(val)):
+                self.no_deepcopy.append(key)
+
 
     def unimplemented(self, node):
         "unimplemented nodes"
@@ -325,6 +330,9 @@ class Interpreter:
                 errmsg = "invalid symbol name (reserved word?) %s" % node.id
                 self.raise_exception(node, exc=NameError, msg=errmsg)
             sym = self.symtable[node.id] = val
+            if node.id in self.no_deepcopy:
+                self.no_deepcopy.pop(node.id)
+
         elif node.__class__ == ast.Attribute:
             if node.ctx.__class__ == ast.Load:
                 msg = "cannot assign to attribute %s" % node.attr
@@ -652,6 +660,8 @@ class Interpreter:
                                              args=args, kwargs=kwargs,
                                              vararg=node.args.vararg,
                                              varkws=node.args.kwarg)
+        if node.name in self.no_deepcopy:
+            self.no_deepcopy.pop(node.name)
 
 
 class Procedure(object):
diff --git a/lmfit/astutils.py b/lmfit/astutils.py
index 719bf27..9df7146 100644
--- a/lmfit/astutils.py
+++ b/lmfit/astutils.py
@@ -34,7 +34,7 @@ FROM_PY = ('ArithmeticError', 'AssertionError', 'AttributeError',
            'Exception', 'False', 'FloatingPointError', 'GeneratorExit',
            'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
            'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
-           'MemoryError', 'NameError', 'None', 'NotImplemented',
+           'MemoryError', 'NameError', 'None',
            'NotImplementedError', 'OSError', 'OverflowError',
            'ReferenceError', 'RuntimeError', 'RuntimeWarning',
            'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError',
diff --git a/lmfit/confidence.py b/lmfit/confidence.py
index 8d5d57e..97e40ce 100644
--- a/lmfit/confidence.py
+++ b/lmfit/confidence.py
@@ -41,10 +41,10 @@ def restore_vals(tmp_params, params):
         params[para_key].value, params[para_key].stderr = tmp_params[para_key]
 
 
-def conf_interval(minimizer, p_names=None, sigmas=(0.674, 0.95, 0.997),
+def conf_interval(minimizer, result, p_names=None, sigmas=(0.674, 0.95, 0.997),
                   trace=False, maxiter=200, verbose=False, prob_func=None):
     r"""Calculates the confidence interval for parameters
-    from the given minimizer.
+    from the given a MinimizerResult, output from minimize.
 
     The parameter for which the ci is calculated will be varied, while
     the remaining parameters are re-optimized for minimizing chi-square.
@@ -56,7 +56,9 @@ def conf_interval(minimizer, p_names=None, sigmas=(0.674, 0.95, 0.997),
     Parameters
     ----------
     minimizer : Minimizer
-        The minimizer to use, should be already fitted via leastsq.
+        The minimizer to use, holding objective function.
+    result : MinimizerResult
+        The result of running minimize().
     p_names : list, optional
         Names of the parameters for which the ci is calculated. If None,
         the ci is calculated for every parameter.
@@ -114,8 +116,8 @@ def conf_interval(minimizer, p_names=None, sigmas=(0.674, 0.95, 0.997),
 
     This makes it possible to plot the dependence between free and fixed.
     """
-    ci = ConfidenceInterval(minimizer, p_names, prob_func, sigmas, trace,
-                            verbose, maxiter)
+    ci = ConfidenceInterval(minimizer, result, p_names, prob_func, sigmas,
+                            trace, verbose, maxiter)
     output = ci.calc_all_ci()
     if trace:
         return output, ci.trace_dict
@@ -139,23 +141,24 @@ class ConfidenceInterval(object):
     """
     Class used to calculate the confidence interval.
     """
-    def __init__(self, minimizer, p_names=None, prob_func=None,
+    def __init__(self, minimizer, result, p_names=None, prob_func=None,
                  sigmas=(0.674, 0.95, 0.997), trace=False, verbose=False,
                  maxiter=50):
         """
 
         """
-        if p_names is None:
-            params = minimizer.params
-            self.p_names = [i for i in params if params[i].vary]
-        else:
-            self.p_names = p_names
+        self.verbose = verbose
+        self.minimizer = minimizer
+        self.result = result
+        self.params = result.params
+        self.org = copy_vals(self.params)
+        self.best_chi = result.chisqr
 
-        # used to detect that .leastsq() has run!
-        if not hasattr(minimizer, 'covar'):
-            minimizer.leastsq()
+        if p_names is None:
+            p_names = [i for i in self.params if self.params[i].vary]
 
-        self.fit_params = [minimizer.params[p] for p in self.p_names]
+        self.p_names = p_names
+        self.fit_params = [self.params[p] for p in self.p_names]
 
         # check that there are at least 2 true variables!
         # check that all stderrs are sensible (including not None or NaN)
@@ -176,9 +179,6 @@ class ConfidenceInterval(object):
         if trace:
             self.trace_dict = dict([(i, []) for i in self.p_names])
 
-        self.verbose = verbose
-        self.minimizer = minimizer
-        self.params = minimizer.params
         self.trace = trace
         self.maxiter = maxiter
         self.min_rel_change = 1e-5
@@ -186,22 +186,19 @@ class ConfidenceInterval(object):
         self.sigmas = list(sigmas)
         self.sigmas.sort()
 
-        # copy the best fit values.
-        self.org = copy_vals(minimizer.params)
-        self.best_chi = minimizer.chisqr
-
     def calc_all_ci(self):
         """
         Calculates all cis.
         """
         out = {}
+
         for p in self.p_names:
             out[p] = (self.calc_ci(p, -1)[::-1] +
                       [(0., self.params[p].value)] +
                       self.calc_ci(p, 1))
         if self.trace:
             self.trace_dict = map_trace_to_names(self.trace_dict,
-                                                 self.minimizer.params)
+                                                 self.params)
 
         return out
 
@@ -212,16 +209,15 @@ class ConfidenceInterval(object):
         """
 
         if isinstance(para, str):
-            para = self.minimizer.params[para]
+            para = self.params[para]
 
         #function used to calculate the pro
         calc_prob = lambda val, prob: self.calc_prob(para, val, prob)
         if self.trace:
-            x = [i.value for i in self.minimizer.params.values()]
+            x = [i.value for i in self.params.values()]
             self.trace_dict[para.name].append(x + [0])
 
         para.vary = False
-        self.minimizer.prepare_fit(self.params)
         limit, max_prob = self.find_limit(para, direction)
         start_val = para.value.copy()
         a_limit = start_val.copy()
@@ -254,7 +250,7 @@ class ConfidenceInterval(object):
         return ret
 
     def reset_vals(self):
-        restore_vals(self.org, self.minimizer.params)
+        restore_vals(self.org, self.params)
 
     def find_limit(self, para, direction):
         """
@@ -279,6 +275,7 @@ class ConfidenceInterval(object):
         while old_prob < max(self.sigmas):
             i = i + 1
             limit += step * direction
+
             new_prob = self.calc_prob(para, limit)
             rel_change = (new_prob - old_prob) / max(new_prob, old_prob, 1.e-12)
             old_prob = new_prob
@@ -303,25 +300,23 @@ class ConfidenceInterval(object):
     def calc_prob(self, para, val, offset=0., restore=False):
         """Returns the probability for given Value."""
         if restore:
-            restore_vals(self.org, self.minimizer.params)
+            restore_vals(self.org, self.params)
         para.value = val
-        save_para = self.minimizer.params[para.name]
-        self.minimizer.params[para.name] = para
-        self.minimizer.prepare_fit(para)
-        self.minimizer.leastsq()
-        out = self.minimizer
+        save_para = self.params[para.name]
+        self.params[para.name] = para
+        self.minimizer.prepare_fit(self.params)
+        out = self.minimizer.leastsq()
         prob = self.prob_func(out.ndata, out.ndata - out.nfree,
-            out.chisqr, self.best_chi)
+                              out.chisqr, self.best_chi)
 
         if self.trace:
             x = [i.value for i in out.params.values()]
             self.trace_dict[para.name].append(x + [prob])
-        self.minimizer.params[para.name] = save_para
+        self.params[para.name] = save_para
         return prob - offset
 
-
-def conf_interval2d(minimizer, x_name, y_name, nx=10, ny=10, limits=None,
-                    prob_func=None):
+def conf_interval2d(minimizer, result, x_name, y_name, nx=10, ny=10,
+                    limits=None, prob_func=None):
     r"""Calculates confidence regions for two fixed parameters.
 
     The method is explained in *conf_interval*: here we are fixing
@@ -329,8 +324,10 @@ def conf_interval2d(minimizer, x_name, y_name, nx=10, ny=10, limits=None,
 
     Parameters
     ----------
-    minimizer : minimizer
-        The minimizer to use, should be already fitted via leastsq.
+    minimizer : Minimizer
+        The minimizer to use, holding objective function.
+    result : MinimizerResult
+        The result of running minimize().
     x_name : string
         The name of the parameter which will be the x direction.
     y_name : string
@@ -353,10 +350,9 @@ def conf_interval2d(minimizer, x_name, y_name, nx=10, ny=10, limits=None,
     Examples
     --------
 
-    >>> mini = minimize(some_func, params)
-    >>> mini.leastsq()
-    True
-    >>> x,y,gr = conf_interval2d('para1','para2')
+    >>> mini = Minimizer(some_func, params)
+    >>> result = mini.leastsq()
+    >>> x, y, gr = conf_interval2d(mini, result, 'para1','para2')
     >>> plt.contour(x,y,gr)
 
     Other Parameters
@@ -366,17 +362,16 @@ def conf_interval2d(minimizer, x_name, y_name, nx=10, ny=10, limits=None,
         Default (``None``) uses built-in f_compare (F test).
     """
     # used to detect that .leastsq() has run!
-    if not hasattr(minimizer, 'covar'):
-        minimizer.leastsq()
+    params = result.params
 
-    best_chi = minimizer.chisqr
-    org = copy_vals(minimizer.params)
+    best_chi = result.chisqr
+    org = copy_vals(result.params)
 
     if prob_func is None or not hasattr(prob_func, '__call__'):
         prob_func = f_compare
 
-    x = minimizer.params[x_name]
-    y = minimizer.params[y_name]
+    x = params[x_name]
+    y = params[y_name]
 
     if limits is None:
         (x_upper, x_lower) = (x.value + 5 * x.stderr, x.value - 5
@@ -396,25 +391,24 @@ def conf_interval2d(minimizer, x_name, y_name, nx=10, ny=10, limits=None,
 
     def calc_prob(vals, restore=False):
         if restore:
-            restore_vals(org, minimizer.params)
+            restore_vals(org, result.params)
         x.value = vals[0]
         y.value = vals[1]
-        save_x = minimizer.params[x.name]
-        save_y = minimizer.params[y.name]
-        minimizer.params[x.name] = x
-        minimizer.params[y.name] = y
-        minimizer.prepare_fit([x, y])
-        minimizer.leastsq()
-        out = minimizer
+        save_x = result.params[x.name]
+        save_y = result.params[y.name]
+        result.params[x.name] = x
+        result.params[y.name] = y
+        minimizer.prepare_fit(params=result.params)
+        out = minimizer.leastsq()
         prob = prob_func(out.ndata, out.ndata - out.nfree, out.chisqr,
                          best_chi, nfix=2.)
-        minimizer.params[x.name] = save_x
-        minimizer.params[y.name] = save_y
+        result.params[x.name] = save_x
+        result.params[y.name] = save_y
         return prob
 
     out = x_points, y_points, np.apply_along_axis(calc_prob, -1, grid)
 
     x.vary, y.vary = True, True
-    restore_vals(org, minimizer.params)
-    minimizer.chisqr = best_chi
+    restore_vals(org, result.params)
+    result.chisqr = best_chi
     return out
diff --git a/lmfit/lineshapes.py b/lmfit/lineshapes.py
index d99c9be..b685376 100644
--- a/lmfit/lineshapes.py
+++ b/lmfit/lineshapes.py
@@ -2,6 +2,7 @@
 """
 basic model line shapes and distribution functions
 """
+from __future__ import division
 from numpy import (pi, log, exp, sqrt, arctan, cos, where)
 from numpy.testing import assert_allclose
 
@@ -44,10 +45,18 @@ def voigt(x, amplitude=1.0, center=0.0, sigma=1.0, gamma=None):
 def pvoigt(x, amplitude=1.0, center=0.0, sigma=1.0, fraction=0.5):
     """1 dimensional pseudo-voigt:
     pvoigt(x, amplitude, center, sigma, fraction)
-       = amplitude*(1-fraction)*gaussion(x, center,sigma) +
+       = amplitude*(1-fraction)*gaussion(x, center, sigma_g) +
          amplitude*fraction*lorentzian(x, center, sigma)
+
+    where sigma_g (the sigma for the Gaussian component) is
+
+        sigma_g = sigma / sqrt(2*log(2)) ~= sigma / 1.17741
+
+    so that the Gaussian and Lorentzian components have the
+    same FWHM of 2*sigma.
     """
-    return ((1-fraction)*gaussian(x, amplitude, center, sigma) +
+    sigma_g = sigma / sqrt(2*log2)
+    return ((1-fraction)*gaussian(x, amplitude, center, sigma_g) +
                fraction*lorentzian(x, amplitude, center, sigma))
 
 def pearson7(x, amplitude=1.0, center=0.0, sigma=1.0, expon=1.0):
@@ -113,7 +122,7 @@ def expgaussian(x, amplitude=1, center=0, sigma=1.0, gamma=1.0):
     """
     gss = gamma*sigma*sigma
     arg1 = gamma*(center +gss/2.0 - x)
-    arg2 = (center + gss - x)/s2
+    arg2 = (center + gss - x)/(s2*sigma)
     return amplitude*(gamma/2) * exp(arg1) * erfc(arg2)
 
 def donaich(x, amplitude=1.0, center=0, sigma=1.0, gamma=0.0):
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index a5191a7..02db740 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -37,7 +37,6 @@ except ImportError:
     pass
 
 from .asteval import Interpreter
-from .astutils import NameFinder
 from .parameter import Parameter, Parameters
 
 # use locally modified version of uncertainties package
@@ -54,32 +53,28 @@ def asteval_with_uncertainties(*vals, **kwargs):
     _obj = kwargs.get('_obj', None)
     _pars = kwargs.get('_pars', None)
     _names = kwargs.get('_names', None)
-    _asteval = kwargs.get('_asteval', None)
-    if (_obj is None or
-        _pars is None or
-        _names is None or
-        _asteval is None or
-        _obj.ast is None):
+    _asteval = _pars._asteval
+    if (_obj is None or  _pars is None or _names is None or
+        _asteval is None or _obj._expr_ast is None):
         return 0
     for val, name in zip(vals, _names):
         _asteval.symtable[name] = val
-    return _asteval.eval(_obj.ast)
+    return _asteval.eval(_obj._expr_ast)
 
 wrap_ueval = uncertainties.wrap(asteval_with_uncertainties)
 
-def eval_stderr(obj, uvars, _names, _pars, _asteval):
+def eval_stderr(obj, uvars, _names, _pars):
     """evaluate uncertainty and set .stderr for a parameter `obj`
     given the uncertain values `uvars` (a list of uncertainties.ufloats),
     a list of parameter names that matches uvars, and a dict of param
     objects, keyed by name.
 
-    This uses the uncertainties package wrapped function to evaluate
-    the uncertainty for an arbitrary expression (in obj.ast) of parameters.
+    This uses the uncertainties package wrapped function to evaluate the
+    uncertainty for an arbitrary expression (in obj._expr_ast) of parameters.
     """
-    if not isinstance(obj, Parameter) or not hasattr(obj, 'ast'):
+    if not isinstance(obj, Parameter) or getattr(obj, '_expr_ast', None) is None:
         return
-    uval = wrap_ueval(*uvars, _obj=obj, _names=_names,
-                      _pars=_pars, _asteval=_asteval)
+    uval = wrap_ueval(*uvars, _obj=obj, _names=_names, _pars=_pars)
     try:
         obj.stderr = uval.std_dev()
     except:
@@ -96,14 +91,6 @@ class MinimizerException(Exception):
         return "\n%s" % (self.msg)
 
 
-def check_ast_errors(error):
-    """check for errors derived from asteval, raise MinimizerException"""
-    if len(error) > 0:
-        for err in error:
-            msg = '%s: %s' % (err.get_error())
-            return msg
-    return None
-
 def _differential_evolution(func, x0, **kwds):
     """
     A wrapper for differential_evolution that can be used with scipy.minimize
@@ -124,8 +111,8 @@ SCALAR_METHODS = {'nelder': 'Nelder-Mead',
                   'cg': 'CG',
                   'bfgs': 'BFGS',
                   'newton': 'Newton-CG',
-                  'lbfgs': 'L-BFGS-B',
-                  'l-bfgs':'L-BFGS-B',
+                  'lbfgsb': 'L-BFGS-B',
+                  'l-bfgsb':'L-BFGS-B',
                   'tnc': 'TNC',
                   'cobyla': 'COBYLA',
                   'slsqp': 'SLSQP',
@@ -134,6 +121,30 @@ SCALAR_METHODS = {'nelder': 'Nelder-Mead',
                   'differential_evolution': 'differential_evolution'}
 
 
+class MinimizerResult(object):
+    """ The result of a minimization.
+
+    Attributes
+    ----------
+    params : Parameters
+        The best-fit parameters
+    success : bool
+        Whether the minimization was successful
+    status : int
+        Termination status of the optimizer. Its value depends on the
+        underlying solver. Refer to `message` for details.
+
+    Notes
+    -----
+    additional attributes not listed above depending of the
+    specific solver. Since this class is essentially a subclass of dict
+    with attribute accessors, one can see which attributes are available
+    using the `keys()` method.
+    """
+    def __init__(self, **kws):
+        for key, val in kws.items():
+            setattr(self, key, val)
+
 class Minimizer(object):
     """A general minimizer for curve fitting"""
     err_nonparam = ("params must be a minimizer.Parameters() instance or list "
@@ -202,8 +213,8 @@ class Minimizer(object):
         self.nfev = 0
         self.nfree = 0
         self.ndata = 0
-        self.nvarys = 0
         self.ier = 0
+        self._abort = False
         self.success = True
         self.errorbars = False
         self.message = None
@@ -212,16 +223,9 @@ class Minimizer(object):
         self.redchi = None
         self.covar = None
         self.residual = None
-        self.var_map = []
-        self.vars = []
-        self.params = []
-        self.updated = []
+
+        self.params = params
         self.jacfcn = None
-        self.asteval = Interpreter()
-        self.namefinder = NameFinder()
-        self.__prepared = False
-        self.__set_params(params)
-        # self.prepare_fit()
 
     @property
     def values(self):
@@ -232,42 +236,7 @@ class Minimizer(object):
             Parameter values in a simple dictionary.
         """
 
-        return dict([(name, p.value) for name, p in self.params.items()])
-
-    def __update_paramval(self, name):
-        """
-        update parameter value, including setting bounds.
-        For a constrained parameter (one with an expr defined),
-        this first updates (recursively) all parameters on which
-        the parameter depends (using the 'deps' field).
-       """
-        # Has this param already been updated?
-        # if this is called as an expression dependency,
-        # it may have been!
-        if self.updated[name]:
-            return
-        par = self.params[name]
-        if getattr(par, 'expr', None) is not None:
-            if getattr(par, 'ast', None) is None:
-                par.ast = self.asteval.parse(par.expr)
-            if par.deps is not None:
-                for dep in par.deps:
-                    self.__update_paramval(dep)
-            par.value = self.asteval.run(par.ast)
-            out = check_ast_errors(self.asteval.error)
-            if out is not None:
-                self.asteval.raise_exception(None)
-        self.asteval.symtable[name] = par.value
-        self.updated[name] = True
-
-    def update_constraints(self):
-        """
-        Update all constrained parameters, checking that dependencies are
-        evaluated as needed.
-        """
-        self.updated = dict([(name, False) for name in self.params])
-        for name in self.params:
-            self.__update_paramval(name)
+        return dict([(name, p.value) for name, p in self.result.params.items()])
 
     def __residual(self, fvars):
         """
@@ -278,58 +247,54 @@ class Minimizer(object):
         calculate the residual.
         """
         # set parameter values
-        for varname, val in zip(self.var_map, fvars):
-            # self.params[varname].value = val
-            par = self.params[varname]
-            par.value = par.from_internal(val)
-        self.nfev = self.nfev + 1
-
-        self.update_constraints()
-        out = self.userfcn(self.params, *self.userargs, **self.userkws)
+        if self._abort:
+            return None
+        params = self.result.params
+        for name, val in zip(self.result.var_names, fvars):
+            params[name].value = params[name].from_internal(val)
+        self.result.nfev = self.result.nfev + 1
+
+        out = self.userfcn(params, *self.userargs, **self.userkws)
         if callable(self.iter_cb):
-            self.iter_cb(self.params, self.nfev, out,
-                         *self.userargs, **self.userkws)
-        return out
+            abort = self.iter_cb(params, self.result.nfev, out,
+                                 *self.userargs, **self.userkws)
+            self._abort = self._abort or abort
+        if not self._abort:
+            return np.asarray(out).ravel()
 
     def __jacobian(self, fvars):
         """
         analytical jacobian to be used with the Levenberg-Marquardt
 
         modified 02-01-2012 by Glenn Jones, Aberystwyth University
+        modified 06-29-2015 M Newville to apply gradient scaling
+               for bounded variables (thanks to JJ Helmus, N Mayorov)
         """
-        for varname, val in zip(self.var_map, fvars):
-            # self.params[varname].value = val
-            self.params[varname].value = self.params[varname].from_internal(val)
-
-        self.nfev = self.nfev + 1
-        self.update_constraints()
-        # computing the jacobian
-        return self.jacfcn(self.params, *self.userargs, **self.userkws)
-
-    def __set_params(self, params):
-        """ set internal self.params from a Parameters object or
-        a list/tuple of Parameters"""
-        if params is None or isinstance(params, Parameters):
-            self.params = params
-        elif isinstance(params, (list, tuple)):
-            _params = Parameters()
-            for _par in params:
-                if not isinstance(_par, Parameter):
-                    raise MinimizerException(self.err_nonparam)
-                else:
-                    _params[_par.name] = _par
-            self.params = _params
+        pars  = self.result.params
+        grad_scale = ones_like(fvars)
+        for ivar, name in enumerate(self.result.var_names):
+            val = fvars[ivar]
+            pars[name].value = pars[name].from_internal(val)
+            grad_scale[ivar] = pars[name].scale_gradient(val)
+
+        self.result.nfev = self.result.nfev + 1
+        pars.update_constraints()
+        # compute the jacobian for "internal" unbounded variables,
+        # the rescale for bounded "external" variables.
+        jac = self.jacfcn(pars, *self.userargs, **self.userkws)
+        if self.col_deriv:
+            jac = (jac.transpose()*grad_scale).transpose()
         else:
-            raise MinimizerException(self.err_nonparam)
+            jac = jac*grad_scale
+        return jac
 
-    def penalty(self, params):
+    def penalty(self, fvars):
         """
         Penalty function for scalar minimizers:
 
         Parameters
         ----------
-        params : lmfit.parameter.Parameters object
-            The model parameters
+        fvars : array of values for the variable parameters
 
         Returns
         -------
@@ -337,56 +302,56 @@ class Minimizer(object):
             The user evaluated user-supplied objective function. If the
             objective function is an array, return the array sum-of-squares
         """
-        r = self.__residual(params)
+        r = self.__residual(fvars)
         if isinstance(r, ndarray):
             r = (r*r).sum()
         return r
 
     def prepare_fit(self, params=None):
         """
-        Prepares parameters for fitting
+        Prepares parameters for fitting,
+        return array of initial values
         """
         # determine which parameters are actually variables
         # and which are defined expressions.
-        if params is None and self.params is not None and self.__prepared:
-            return
-        if params is not None and self.params is None:
-            self.__set_params(params)
-        self.nfev = 0
-        self.var_map = []
-        self.vars = []
-        self.errorbars = False
-        for name, par in self.params.items():
+        result = self.result = MinimizerResult()
+        if params is not None:
+            self.params = params
+        if isinstance(self.params, Parameters):
+            result.params = deepcopy(self.params)
+        elif isinstance(self.params, (list, tuple)):
+            result.params = Parameters()
+            for par in self.params:
+                if not isinstance(par, Parameter):
+                    raise MinimizerException(self.err_nonparam)
+                else:
+                    result.params[par.name] = par
+        elif self.params is None:
+            raise MinimizerException(self.err_nonparam)
+
+        # determine which parameters are actually variables
+        # and which are defined expressions.
+
+        result.var_names = [] # note that this *does* belong to self...
+        result.init_vals = []
+        result.params.update_constraints()
+        result.nfev = 0
+        result.errorbars = False
+        result.aborted = False
+        for name, par in self.result.params.items():
             par.stderr = None
             par.correl = None
-
             if par.expr is not None:
-                par.ast = self.asteval.parse(par.expr)
-                check_ast_errors(self.asteval.error)
                 par.vary = False
-                par.deps = []
-                self.namefinder.names = []
-                self.namefinder.generic_visit(par.ast)
-                for symname in self.namefinder.names:
-                    if (symname in self.params and
-                        symname not in par.deps):
-                        par.deps.append(symname)
-            elif par.vary:
-                self.var_map.append(name)
-                self.vars.append(par.setup_bounds())
-
-            self.asteval.symtable[name] = par.value
+            if par.vary:
+                result.var_names.append(name)
+                result.init_vals.append(par.setup_bounds())
+
             par.init_value = par.value
             if par.name is None:
                 par.name = name
-
-        self.nvarys = len(self.vars)
-
-        # now evaluate make sure initial values
-        # are used to set values of the defined expressions.
-        # this also acts as a check of expression syntax.
-        self.update_constraints()
-        self.__prepared = True
+        result.nvarys = len(result.var_names)
+        return result
 
     def unprepare_fit(self):
         """
@@ -395,11 +360,7 @@ class Minimizer(object):
 
         removes ast compilations of constraint expressions
         """
-        self.__prepared = False
-        self.params = deepcopy(self.params)
-        for par in self.params.values():
-            if hasattr(par, 'ast'):
-                delattr(par, 'ast')
+        pass
 
     @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
                        'and method=\'L-BFGS-B\' instead')
@@ -414,27 +375,8 @@ class Minimizer(object):
             scipy.optimize.lbfgsb.fmin_l_bfgs_b function.
 
         """
+        raise NotImplementedError("use scalar_minimize(method='L-BFGS-B')")
 
-        self.prepare_fit()
-        lb_kws = dict(factr=1000.0, approx_grad=True, m=20,
-                      maxfun=2000 * (self.nvarys + 1),
-                      )
-        lb_kws.update(self.kws)
-        lb_kws.update(kws)
-
-        xout, fout, info = scipy_lbfgsb(self.penalty, self.vars, **lb_kws)
-        self.nfev = info['funcalls']
-        self.message = info['task']
-        self.chisqr = self.residual = self.__residual(xout)
-        self.ndata = 1
-        self.nfree = 1
-        if isinstance(self.residual, ndarray):
-            self.chisqr = (self.chisqr**2).sum()
-            self.ndata = len(self.residual)
-            self.nfree = self.ndata - self.nvarys
-        self.redchi = self.chisqr/self.nfree
-        self.unprepare_fit()
-        return
 
     @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
                        'and method=\'Nelder-Mead\' instead')
@@ -447,29 +389,9 @@ class Minimizer(object):
         kws : dict
             Minimizer options to pass to the scipy.optimize.fmin minimizer.
         """
+        raise NotImplementedError("use scalar_minimize(method='Nelder-Mead')")
 
-        self.prepare_fit()
-        fmin_kws = dict(full_output=True, disp=False, retall=True,
-                        ftol=1.e-4, xtol=1.e-4,
-                        maxfun=5000 * (self.nvarys + 1))
-
-        fmin_kws.update(kws)
-
-        ret = scipy_fmin(self.penalty, self.vars, **fmin_kws)
-        xout, fout, niter, funccalls, warnflag, allvecs = ret
-        self.nfev = funccalls
-        self.chisqr = self.residual = self.__residual(xout)
-        self.ndata = 1
-        self.nfree = 1
-        if isinstance(self.residual, ndarray):
-            self.chisqr = (self.chisqr**2).sum()
-            self.ndata = len(self.residual)
-            self.nfree = self.ndata - self.nvarys
-        self.redchi = self.chisqr/self.nfree
-        self.unprepare_fit()
-        return
-
-    def scalar_minimize(self, method='Nelder-Mead', **kws):
+    def scalar_minimize(self, method='Nelder-Mead', params=None, **kws):
         """
         Use one of the scalar minimization methods from
         scipy.optimize.minimize.
@@ -491,6 +413,8 @@ class Minimizer(object):
                 'SLSQP'
                 'differential_evolution'
 
+        params : Parameters, optional
+           Parameters to use as starting points.
         kws : dict, optional
             Minimizer options pass to scipy.optimize.minimize.
 
@@ -512,10 +436,13 @@ class Minimizer(object):
         """
         if not HAS_SCALAR_MIN:
             raise NotImplementedError
-        self.prepare_fit()
+
+        result = self.prepare_fit(params=params)
+        vars   = result.init_vals
+        params = result.params
 
         fmin_kws = dict(method=method,
-                        options={'maxiter': 1000 * (self.nvarys + 1)})
+                        options={'maxiter': 1000 * (len(vars) + 1)})
         fmin_kws.update(self.kws)
         fmin_kws.update(kws)
 
@@ -536,37 +463,43 @@ class Minimizer(object):
 
         if method == 'differential_evolution':
             fmin_kws['method'] = _differential_evolution
-            pars = self.params
-            bounds = [(pars[par].min, pars[par].max) for par in pars]
+            bounds = [(par.min, par.max) for par in params.values()]
             if not np.all(np.isfinite(bounds)):
                 raise ValueError('With differential evolution finite bounds '
                                  'are required for each parameter')
-            bounds = [(-np.pi / 2., np.pi / 2.)] * len(self.vars)
+            bounds = [(-np.pi / 2., np.pi / 2.)] * len(vars)
             fmin_kws['bounds'] = bounds
-            
+
             # in scipy 0.14 this can be called directly from scipy_minimize
             # When minimum scipy is 0.14 the following line and the else
             # can be removed.
-            ret = _differential_evolution(self.penalty, self.vars, **fmin_kws)
+            ret = _differential_evolution(self.penalty, vars, **fmin_kws)
         else:
-            ret = scipy_minimize(self.penalty, self.vars, **fmin_kws)
-
-        xout = ret.x
-        self.message = ret.message
-
-        self.nfev = ret.nfev
-        self.chisqr = self.residual = self.__residual(xout)
-        self.ndata = 1
-        self.nfree = 1
-        if isinstance(self.residual, ndarray):
-            self.chisqr = (self.chisqr**2).sum()
-            self.ndata = len(self.residual)
-            self.nfree = self.ndata - self.nvarys
-        self.redchi = self.chisqr / self.nfree
-        self.unprepare_fit()
-        return ret.success
-
-    def leastsq(self, **kws):
+            ret = scipy_minimize(self.penalty, vars, **fmin_kws)
+
+        result.aborted = self._abort
+        self._abort = False
+
+        for attr in dir(ret):
+            if not attr.startswith('_'):
+                setattr(result, attr, getattr(ret, attr))
+
+        result.chisqr = result.residual = self.__residual(ret.x)
+        result.nvarys = len(vars)
+        result.ndata = 1
+        result.nfree = 1
+        if isinstance(result.residual, ndarray):
+            result.chisqr = (result.chisqr**2).sum()
+            result.ndata = len(result.residual)
+            result.nfree = result.ndata - result.nvarys
+        result.redchi = result.chisqr / result.nfree
+        _log_likelihood = result.ndata * np.log(result.redchi)
+        result.aic = _log_likelihood + 2 * result.nvarys
+        result.bic = _log_likelihood + np.log(result.ndata) * result.nvarys
+
+        return result
+
+    def leastsq(self, params=None, **kws):
         """
         Use Levenberg-Marquardt minimization to perform a fit.
         This assumes that ModelParameters have been stored, and a function to
@@ -581,6 +514,8 @@ class Minimizer(object):
 
         Parameters
         ----------
+        params : Parameters, optional
+           Parameters to use as starting points.
         kws : dict, optional
             Minimizer options to pass to scipy.optimize.leastsq.
 
@@ -589,43 +524,55 @@ class Minimizer(object):
         success : bool
             True if fit was successful, False if not.
         """
-        self.prepare_fit()
-        lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7,
-                     gtol=1.e-7, maxfev=2000*(self.nvarys+1), Dfun=None)
+        result = self.prepare_fit(params=params)
+        vars   = result.init_vals
+        nvars = len(vars)
+        lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7, col_deriv=False,
+                     gtol=1.e-7, maxfev=2000*(nvars+1), Dfun=None)
 
         lskws.update(self.kws)
         lskws.update(kws)
 
+        self.col_deriv = False
         if lskws['Dfun'] is not None:
             self.jacfcn = lskws['Dfun']
+            self.col_deriv = lskws['col_deriv']
             lskws['Dfun'] = self.__jacobian
 
         # suppress runtime warnings during fit and error analysis
         orig_warn_settings = np.geterr()
         np.seterr(all='ignore')
-        lsout = scipy_leastsq(self.__residual, self.vars, **lskws)
-        _best, _cov, infodict, errmsg, ier = lsout
 
-        self.residual = resid = infodict['fvec']
-        self.ier = ier
-        self.lmdif_message = errmsg
-        self.message = 'Fit succeeded.'
-        self.success = ier in [1, 2, 3, 4]
-
-        if ier == 0:
-            self.message = 'Invalid Input Parameters.'
+        lsout = scipy_leastsq(self.__residual, vars, **lskws)
+        _best, _cov, infodict, errmsg, ier = lsout
+        result.aborted = self._abort
+        self._abort = False
+
+        result.residual = resid = infodict['fvec']
+        result.ier = ier
+        result.lmdif_message = errmsg
+        result.message = 'Fit succeeded.'
+        result.success = ier in [1, 2, 3, 4]
+        if result.aborted:
+            result.message = 'Fit aborted by user callback.'
+            result.success = False
+        elif ier == 0:
+            result.message = 'Invalid Input Parameters.'
         elif ier == 5:
-            self.message = self.err_maxfev % lskws['maxfev']
+            result.message = self.err_maxfev % lskws['maxfev']
         else:
-            self.message = 'Tolerance seems to be too small.'
+            result.message = 'Tolerance seems to be too small.'
 
-        self.nfev = infodict['nfev']
-        self.ndata = len(resid)
+        result.ndata = len(resid)
 
-        sum_sqr = (resid**2).sum()
-        self.chisqr = sum_sqr
-        self.nfree = (self.ndata - self.nvarys)
-        self.redchi = sum_sqr / self.nfree
+        result.chisqr = (resid**2).sum()
+        result.nfree = (result.ndata - nvars)
+        result.redchi = result.chisqr / result.nfree
+        _log_likelihood = result.ndata * np.log(result.redchi)
+        result.aic = _log_likelihood + 2 * nvars
+        result.bic = _log_likelihood + np.log(result.ndata) * nvars
+
+        params = result.params
 
         # need to map _best values to params, then calculate the
         # grad for the variable parameters
@@ -641,45 +588,44 @@ class Minimizer(object):
         if len(np.shape(grad)) == 0:
             grad = np.array([grad])
 
-        for ivar, varname in enumerate(self.var_map):
-            par = self.params[varname]
-            grad[ivar] = par.scale_gradient(_best[ivar])
-            vbest[ivar] = par.value
+        for ivar, name in enumerate(result.var_names):
+            grad[ivar] = params[name].scale_gradient(_best[ivar])
+            vbest[ivar] = params[name].value
 
         # modified from JJ Helmus' leastsqbound.py
         infodict['fjac'] = transpose(transpose(infodict['fjac']) /
                                      take(grad, infodict['ipvt'] - 1))
-        rvec = dot(triu(transpose(infodict['fjac'])[:self.nvarys, :]),
-                   take(eye(self.nvarys), infodict['ipvt'] - 1, 0))
+        rvec = dot(triu(transpose(infodict['fjac'])[:nvars, :]),
+                   take(eye(nvars), infodict['ipvt'] - 1, 0))
         try:
-            self.covar = inv(dot(transpose(rvec), rvec))
+            result.covar = inv(dot(transpose(rvec), rvec))
         except (LinAlgError, ValueError):
-            self.covar = None
+            result.covar = None
 
         has_expr = False
-        for par in self.params.values():
+        for par in params.values():
             par.stderr, par.correl = 0, None
             has_expr = has_expr or par.expr is not None
 
         # self.errorbars = error bars were successfully estimated
-        self.errorbars = self.covar is not None
-
-        if self.covar is not None:
+        result.errorbars = (result.covar is not None)
+        if result.aborted:
+            result.errorbars = False
+        if result.errorbars:
             if self.scale_covar:
-                self.covar = self.covar * sum_sqr / self.nfree
-
-            for ivar, varname in enumerate(self.var_map):
-                par = self.params[varname]
-                par.stderr = sqrt(self.covar[ivar, ivar])
+                result.covar *= result.redchi
+            for ivar, name in enumerate(result.var_names):
+                par = params[name]
+                par.stderr = sqrt(result.covar[ivar, ivar])
                 par.correl = {}
                 try:
-                    self.errorbars = self.errorbars and (par.stderr > 0.0)
-                    for jvar, varn2 in enumerate(self.var_map):
+                    result.errorbars = result.errorbars and (par.stderr > 0.0)
+                    for jvar, varn2 in enumerate(result.var_names):
                         if jvar != ivar:
-                            par.correl[varn2] = (self.covar[ivar, jvar] /
-                                 (par.stderr * sqrt(self.covar[jvar, jvar])))
+                            par.correl[varn2] = (result.covar[ivar, jvar] /
+                                 (par.stderr * sqrt(result.covar[jvar, jvar])))
                 except:
-                    self.errorbars = False
+                    result.errorbars = False
 
             uvars = None
             if has_expr:
@@ -689,26 +635,23 @@ class Minimizer(object):
                 #   re-evaluate contrained parameters to extract stderr
                 #   and then set Parameters back to best-fit value
                 try:
-                    uvars = uncertainties.correlated_values(vbest, self.covar)
+                    uvars = uncertainties.correlated_values(vbest, result.covar)
                 except (LinAlgError, ValueError):
                     uvars = None
-
                 if uvars is not None:
-                    for par in self.params.values():
-                        eval_stderr(par, uvars, self.var_map,
-                                    self.params, self.asteval)
+                    for par in params.values():
+                        eval_stderr(par, uvars, result.var_names, params)
                     # restore nominal values
-                    for v, nam in zip(uvars, self.var_map):
-                        self.asteval.symtable[nam] = v.nominal_value
+                    for v, nam in zip(uvars, result.var_names):
+                        params[nam].value = v.nominal_value
 
-        if not self.errorbars:
-            self.message = '%s. Could not estimate error-bars'  % self.message
+        if not result.errorbars:
+            result.message = '%s. Could not estimate error-bars'% result.message
 
         np.seterr(**orig_warn_settings)
-        self.unprepare_fit()
-        return self.success
+        return result
 
-    def minimize(self, method='leastsq'):
+    def minimize(self, method='leastsq', params=None, **kws):
         """
         Perform the minimization.
 
@@ -730,14 +673,20 @@ class Minimizer(object):
             'slsqp'                  -    Sequential Linear Squares Programming
             'differential_evolution' -    differential evolution
 
+        params : Parameters, optional
+            parameters to use as starting values
+
         Returns
         -------
-        success : bool
-            Whether the fit was successful.
+        result : MinimizerResult
+
+            MinimizerResult object contains updated params, fit statistics, etc.
+
         """
 
         function = self.leastsq
-        kwargs = {}
+        kwargs = {'params': params}
+        kwargs.update(kws)
 
         user_method = method.lower()
         if user_method.startswith('least'):
@@ -753,10 +702,8 @@ class Minimizer(object):
             function = self.fmin
         elif user_method.startswith('lbfgsb'):
             function = self.lbfgsb
-
         return function(**kwargs)
 
-
 def minimize(fcn, params, method='leastsq', args=None, kws=None,
              scale_covar=True, iter_cb=None, **fit_kws):
     """
@@ -823,5 +770,4 @@ def minimize(fcn, params, method='leastsq', args=None, kws=None,
     """
     fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
                        iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
-    fitter.minimize(method=method)
-    return fitter
+    return fitter.minimize(method=method)
diff --git a/lmfit/model.py b/lmfit/model.py
index e7be75f..d2b96ed 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -10,11 +10,71 @@ import numpy as np
 from . import Parameters, Parameter, Minimizer
 from .printfuncs import fit_report
 
+from collections import MutableSet
+
 try:
     from collections import OrderedDict
 except ImportError:
     from ordereddict import OrderedDict
 
+class OrderedSet(MutableSet):
+    """from http://code.activestate.com/recipes/576694-orderedset/"""
+    def __init__(self, iterable=None):
+        self.end = end = []
+        end += [None, end, end]         # sentinel node for doubly linked list
+        self.map = {}                   # key --> [key, prev, next]
+        if iterable is not None:
+            self |= iterable
+
+    def __len__(self):
+        return len(self.map)
+
+    def __contains__(self, key):
+        return key in self.map
+
+    def add(self, key):
+        if key not in self.map:
+            end = self.end
+            curr = end[1]
+            curr[2] = end[1] = self.map[key] = [key, curr, end]
+
+    def discard(self, key):
+        if key in self.map:
+            key, prev, next = self.map.pop(key)
+            prev[2] = next
+            next[1] = prev
+
+    def __iter__(self):
+        end = self.end
+        curr = end[2]
+        while curr is not end:
+            yield curr[0]
+            curr = curr[2]
+
+    def __reversed__(self):
+        end = self.end
+        curr = end[1]
+        while curr is not end:
+            yield curr[0]
+            curr = curr[1]
+
+    def pop(self, last=True):
+        if not self:
+            raise KeyError('set is empty')
+        key = self.end[1][0] if last else self.end[2][0]
+        self.discard(key)
+        return key
+
+    def __repr__(self):
+        if not self:
+            return '%s()' % (self.__class__.__name__,)
+        return '%s(%r)' % (self.__class__.__name__, list(self))
+
+    def __eq__(self, other):
+        if isinstance(other, OrderedSet):
+            return len(self) == len(other) and list(self) == list(other)
+        return set(self) == set(other)
+
 
 # Use pandas.isnull for aligning missing data is pandas is available.
 # otherwise use numpy.isnan
@@ -105,8 +165,8 @@ class Model(object):
             raise ValueError(self._invalid_missing)
         self.missing = missing
         self.opts = kws
-        self.param_hints = {}
-        self._param_names = set()
+        self.param_hints = OrderedDict()
+        self._param_names = OrderedSet()
         self._parse_params()
         if self.independent_vars is None:
             self.independent_vars = []
@@ -229,8 +289,7 @@ class Model(object):
             if (self._strip_prefix(arg) not in allargs or
                 arg in self._forbidden_args):
                 raise ValueError(self._invalid_par % (arg, fname))
-
-        self._param_names = set(names)
+        self._param_names = OrderedSet(names)
 
     def set_param_hint(self, name, **kwargs):
         """set hints for parameter, including optional bounds
@@ -247,7 +306,7 @@ class Model(object):
             name = name[npref:]
 
         if name not in self.param_hints:
-            self.param_hints[name] = {}
+            self.param_hints[name] = OrderedDict()
         hints = self.param_hints[name]
         for key, val in kwargs.items():
             if key in self._hint_names:
@@ -263,7 +322,6 @@ class Model(object):
         if 'verbose' in kwargs:
             verbose = kwargs['verbose']
         params = Parameters()
-
         for name in self.param_names:
             par = Parameter(name=name)
             basename = name[len(self._prefix):]
@@ -314,7 +372,7 @@ class Model(object):
         diff = self.eval(params, **kwargs) - data
         if weights is not None:
             diff *= weights
-        return np.asarray(diff)  # for compatibility with pandas.Series
+        return np.asarray(diff).ravel()  # for compatibility with pandas.Series
 
     def _handle_missing(self, data):
         "handle missing data"
@@ -409,7 +467,7 @@ class Model(object):
 
         Returns
         -------
-        lmfit.ModelFit
+        lmfit.ModelResult
 
         Examples
         --------
@@ -437,7 +495,7 @@ class Model(object):
             params = deepcopy(params)
 
         # If any kwargs match parameter names, override params.
-        param_kwargs = set(kwargs.keys()) & self.param_names
+        param_kwargs = set(kwargs.keys()) & set(self.param_names)
         for name in param_kwargs:
             p = kwargs[name]
             if isinstance(p, Parameter):
@@ -496,8 +554,9 @@ class Model(object):
         if fit_kws is None:
             fit_kws = {}
 
-        output = ModelFit(self, params, method=method, iter_cb=iter_cb,
-                          scale_covar=scale_covar, fcn_kws=kwargs, **fit_kws)
+        output = ModelResult(self, params, method=method, iter_cb=iter_cb,
+                             scale_covar=scale_covar, fcn_kws=kwargs,
+                             **fit_kws)
         output.fit(data=data, weights=weights)
         output.components = self.components
         return output
@@ -623,7 +682,7 @@ class CompositeModel(Model):
         out.update(self.left._make_all_args(params=params, **kwargs))
         return out
 
-class ModelFit(Minimizer):
+class ModelResult(Minimizer):
     """Result from Model fit
 
     Attributes
@@ -692,20 +751,27 @@ class ModelFit(Minimizer):
         if data is not None:
             self.data = data
         if params is not None:
-            self.params = params
+            self.init_params = params
         if weights is not None:
             self.weights = weights
         if method is not None:
             self.method = method
         self.userargs = (self.data, self.weights)
         self.userkws.update(kwargs)
-        self.init_params = deepcopy(self.params)
-        self.init_values = self.model._make_all_args(self.init_params)
-        self.init_fit    = self.model.eval(params=self.init_params, **self.userkws)
+        self.init_fit    = self.model.eval(params=self.params, **self.userkws)
+
+        _ret = self.minimize(method=self.method)
 
-        self.minimize(method=self.method)
-        self.best_fit = self.model.eval(params=self.params, **self.userkws)
-        self.best_values = self.model._make_all_args(self.params)
+        for attr in dir(_ret):
+            if not attr.startswith('_') :
+                try:
+                    setattr(self, attr, getattr(_ret, attr))
+                except AttributeError:
+                    pass
+
+        self.init_values = self.model._make_all_args(self.init_params)
+        self.best_values = self.model._make_all_args(_ret.params)
+        self.best_fit    = self.model.eval(params=_ret.params, **self.userkws)
 
     def eval(self, **kwargs):
         self.userkws.update(kwargs)
@@ -715,19 +781,13 @@ class ModelFit(Minimizer):
         self.userkws.update(kwargs)
         return self.model.eval_components(params=self.params, **self.userkws)
 
-    def fit_report(self, modelpars=None, show_correl=True, min_correl=0.1):
+    def fit_report(self,  **kwargs):
         "return fit report"
-        stats_report = fit_report(self, modelpars=modelpars,
-                                 show_correl=show_correl,
-                                 min_correl=min_correl)
-        buff = ['[[Model]]']
-        buff.append('    %s' % self.model._reprstring(long=True))
-        buff = '\n'.join(buff)
-        out = '%s\n%s' % (buff, stats_report)
-        return out
+        return '[[Model]]\n    %s\n%s\n' % (self.model._reprstring(long=True),
+                                            fit_report(self, **kwargs))
 
     @_ensureMatplotlib
-    def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
+    def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
                  numpoints=None,  data_kws=None, fit_kws=None, init_kws=None,
                  ax_kws=None):
         """Plot the fit results using matplotlib.
@@ -747,6 +807,8 @@ class ModelFit(Minimizer):
             matplotlib format string for fitted curve
         initfmt : string, optional
             matplotlib format string for initial conditions for the fit
+        yerr : ndarray, optional
+            array of uncertainties for data array
         numpoints : int, optional
             If provided, the final and initial fit curves are evaluated not
             only at data points, but refined to contain `numpoints` points in
@@ -770,15 +832,16 @@ class ModelFit(Minimizer):
         For details about plot format strings and keyword arguments see
         documentation of matplotlib.axes.Axes.plot.
 
-        If the fit model included weights, then matplotlib.axes.Axes.errorbar
-        is used to plot the data, with yerr set to 1 / self.weights**0.5
+        If yerr is specified or if the fit model included weights, then
+        matplotlib.axes.Axes.errorbar is used to plot the data.  If yerr is
+        not specified and the fit includes weights, yerr set to 1/self.weights
 
         If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
 
         See Also
         --------
-        ModelFit.plot_residuals : Plot the fit residuals using matplotlib.
-        ModelFit.plot : Plot the fit results and residuals using matplotlib.
+        ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
+        ModelResult.plot : Plot the fit results and residuals using matplotlib.
         """
         if data_kws is None:
             data_kws = {}
@@ -803,23 +866,26 @@ class ModelFit(Minimizer):
 
         # make a dense array for x-axis if data is not dense
         if numpoints is not None and len(self.data) < numpoints:
-            x_array_dense = np.linspace(x_array[0], x_array[-1], numpoints)
+            x_array_dense = np.linspace(min(x_array), max(x_array), numpoints)
         else:
             x_array_dense = x_array
 
         ax.plot(x_array_dense, self.model.eval(self.init_params,
                 **{independent_var: x_array_dense}), initfmt,
-                label='initial for ' + self.model.name, **init_kws)
+                label='init', **init_kws)
         ax.plot(x_array_dense, self.model.eval(self.params,
                 **{independent_var: x_array_dense}), fitfmt,
-                label=self.model.name, **fit_kws)
+                label='best-fit', **fit_kws)
 
-        if self.weights is not None:
-            ax.errorbar(x_array, self.data, yerr=1/self.weights**0.5,
+        if yerr is None and self.weights is not None:
+            yerr = 1.0/self.weights
+        if yerr is not None:
+            ax.errorbar(x_array, self.data, yerr=yerr,
                         fmt=datafmt, label='data', **data_kws)
         else:
             ax.plot(x_array, self.data, datafmt, label='data', **data_kws)
 
+        ax.set_title(self.model.name)
         ax.set_xlabel(independent_var)
         ax.set_ylabel('y')
         ax.legend()
@@ -827,8 +893,8 @@ class ModelFit(Minimizer):
         return ax
 
     @_ensureMatplotlib
-    def plot_residuals(self, ax=None, datafmt='o', data_kws=None, fit_kws=None,
-                       ax_kws=None):
+    def plot_residuals(self, ax=None, datafmt='o', yerr=None, data_kws=None,
+                       fit_kws=None, ax_kws=None):
         """Plot the fit residuals using matplotlib.
 
         The method will plot residuals of the fit using matplotlib, including:
@@ -842,6 +908,8 @@ class ModelFit(Minimizer):
             current pyplot axis or create one if there is none.
         datafmt : string, optional
             matplotlib format string for data points
+        yerr : ndarray, optional
+            array of uncertainties for data array
         data_kws : dictionary, optional
             keyword arguments passed on to the plot function for data points
         fit_kws : dictionary, optional
@@ -858,15 +926,16 @@ class ModelFit(Minimizer):
         For details about plot format strings and keyword arguments see
         documentation of matplotlib.axes.Axes.plot.
 
-        If the fit model included weights, then matplotlib.axes.Axes.errorbar
-        is used to plot the data, with yerr set to 1 / self.weights**0.5
+        If yerr is specified or if the fit model included weights, then
+        matplotlib.axes.Axes.errorbar is used to plot the data.  If yerr is
+        not specified and the fit includes weights, yerr set to 1/self.weights
 
         If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
 
         See Also
         --------
-        ModelFit.plot_fit : Plot the fit results using matplotlib.
-        ModelFit.plot : Plot the fit results and residuals using matplotlib.
+        ModelResult.plot_fit : Plot the fit results using matplotlib.
+        ModelResult.plot : Plot the fit results and residuals using matplotlib.
         """
         if data_kws is None:
             data_kws = {}
@@ -889,25 +958,28 @@ class ModelFit(Minimizer):
 
         x_array = self.userkws[independent_var]
 
-        ax.axhline(0, label=self.model.name, **fit_kws)
+        ax.axhline(0, **fit_kws)
 
-        if self.weights is not None:
-            ax.errorbar(x_array, self.eval() - self.data,
-                        yerr=1 / self.weights**0.5, fmt=datafmt,
-                        label='residuals', **data_kws)
+        if yerr is None and self.weights is not None:
+            yerr = 1.0/self.weights
+        if yerr is not None:
+            ax.errorbar(x_array, self.eval() - self.data, yerr=yerr,
+                        fmt=datafmt, label='residuals', **data_kws)
         else:
             ax.plot(x_array, self.eval() - self.data, datafmt,
                     label='residuals', **data_kws)
 
+        ax.set_title(self.model.name)
         ax.set_ylabel('residuals')
         ax.legend()
 
         return ax
 
     @_ensureMatplotlib
-    def plot(self, datafmt='o', fitfmt='-', initfmt='--', numpoints=None,
-             fig=None, data_kws=None, fit_kws=None, init_kws=None,
-             ax_res_kws=None, ax_fit_kws=None, fig_kws=None):
+    def plot(self, datafmt='o', fitfmt='-', initfmt='--', yerr=None,
+             numpoints=None, fig=None, data_kws=None, fit_kws=None,
+             init_kws=None, ax_res_kws=None, ax_fit_kws=None,
+             fig_kws=None):
         """Plot the fit results and residuals using matplotlib.
 
         The method will produce a matplotlib figure with both results of the
@@ -922,6 +994,8 @@ class ModelFit(Minimizer):
             matplotlib format string for fitted curve
         initfmt : string, optional
             matplotlib format string for initial conditions for the fit
+        yerr : ndarray, optional
+            array of uncertainties for data array
         numpoints : int, optional
             If provided, the final and initial fit curves are evaluated not
             only at data points, but refined to contain `numpoints` points in
@@ -949,17 +1023,18 @@ class ModelFit(Minimizer):
 
         Notes
         ----
-        The method combines ModelFit.plot_fit and ModelFit.plot_residuals.
+        The method combines ModelResult.plot_fit and ModelResult.plot_residuals.
 
-        If the fit model included weights, then matplotlib.axes.Axes.errorbar
-        is used to plot the data, with yerr set to 1 / self.weights**0.5
+        If yerr is specified or if the fit model included weights, then
+        matplotlib.axes.Axes.errorbar is used to plot the data.  If yerr is
+        not specified and the fit includes weights, yerr set to 1/self.weights
 
         If `fig` is None then matplotlib.pyplot.figure(**fig_kws) is called.
 
         See Also
         --------
-        ModelFit.plot_fit : Plot the fit results using matplotlib.
-        ModelFit.plot_residuals : Plot the fit residuals using matplotlib.
+        ModelResult.plot_fit : Plot the fit results using matplotlib.
+        ModelResult.plot_residuals : Plot the fit residuals using matplotlib.
         """
         if data_kws is None:
             data_kws = {}
@@ -986,10 +1061,11 @@ class ModelFit(Minimizer):
         ax_res = fig.add_subplot(gs[0], **ax_res_kws)
         ax_fit = fig.add_subplot(gs[1], sharex=ax_res, **ax_fit_kws)
 
-        self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt,
+        self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt, yerr=yerr,
                       initfmt=initfmt, numpoints=numpoints, data_kws=data_kws,
-                      fit_kws=fit_kws, init_kws={}, ax_kws=ax_fit_kws)
-        self.plot_residuals(ax=ax_res, datafmt=datafmt, data_kws=data_kws,
-                            fit_kws=fit_kws, ax_kws=ax_res_kws)
+                      fit_kws=fit_kws, init_kws=init_kws, ax_kws=ax_fit_kws)
+        self.plot_residuals(ax=ax_res, datafmt=datafmt, yerr=yerr,
+                            data_kws=data_kws, fit_kws=fit_kws,
+                            ax_kws=ax_res_kws)
 
         return fig
diff --git a/lmfit/models.py b/lmfit/models.py
index ad43dca..11a56a7 100644
--- a/lmfit/models.py
+++ b/lmfit/models.py
@@ -192,9 +192,11 @@ class VoigtModel(Model):
 
 class PseudoVoigtModel(Model):
     __doc__ = pvoigt.__doc__ + COMMON_DOC if pvoigt.__doc__ else ""
+    fwhm_factor = 2.0
     def __init__(self, *args, **kwargs):
         super(PseudoVoigtModel, self).__init__(pvoigt, *args, **kwargs)
         self.set_param_hint('fraction', value=0.5)
+        self.set_param_hint('fwhm',  expr=fwhm_expr(self))
 
     def guess(self, data, x=None, negative=False, **kwargs):
         pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
@@ -354,11 +356,23 @@ class RectangleModel(Model):
 class ExpressionModel(Model):
     """Model from User-supplied expression
 
-%s
-    """ % COMMON_DOC
+Parameters
+----------
+expr:    string of mathematical expression for model.
+independent_vars: list of strings to be set as variable names
+missing: None, 'drop', or 'raise'
+    None: Do not check for null or missing values.
+    'drop': Drop null or missing observations in data.
+        Use pandas.isnull if pandas is available; otherwise,
+        silently fall back to numpy.isnan.
+    'raise': Raise a (more helpful) exception when data contains null
+        or missing values.
+prefix: NOT supported for ExpressionModel
+"""
 
     idvar_missing  = "No independent variable found in\n %s"
     idvar_notfound = "Cannot find independent variables '%s' in\n %s"
+    no_prefix      = "ExpressionModel does not support `prefix` argument"
     def __init__(self, expr, independent_vars=None, init_script=None,
                  *args, **kwargs):
 
@@ -370,8 +384,8 @@ class ExpressionModel(Model):
             self.asteval.eval(init_script)
 
         # save expr as text, parse to ast, save for later use
-        self.expr = expr
-        self.astcode = self.asteval.parse(expr)
+        self.expr = expr.strip()
+        self.astcode = self.asteval.parse(self.expr)
 
         # find all symbol names found in expression
         sym_names = get_ast_names(self.astcode)
@@ -401,6 +415,8 @@ class ExpressionModel(Model):
             raise ValueError(self.idvar_notfound % (lost, self.expr))
 
         kwargs['independent_vars'] = independent_vars
+        if 'prefix' in kwargs:
+            raise Warning(self.no_prefix)
 
         def _eval(**kwargs):
             for name, val in kwargs.items():
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index 68990f1..f1d11f2 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -4,7 +4,7 @@ Parameter class
 from __future__ import division
 from numpy import arcsin, cos, sin, sqrt, inf, nan
 import json
-import sys
+from copy import deepcopy
 try:
     from collections import OrderedDict
 except ImportError:
@@ -12,7 +12,13 @@ except ImportError:
 
 from . import uncertainties
 
-from .astutils import valid_symbol_name
+from .asteval import Interpreter
+from .astutils import get_ast_names, valid_symbol_name
+
+def check_ast_errors(expr_eval):
+    """check for errors derived from asteval"""
+    if len(expr_eval.error) > 0:
+        expr_eval.raise_exception(None)
 
 class Parameters(OrderedDict):
     """
@@ -29,26 +35,99 @@ class Parameters(OrderedDict):
     dumps() / dump()
     loads() / load()
     """
-    def __init__(self, *args, **kwds):
+    def __init__(self, asteval=None, *args, **kwds):
         super(Parameters, self).__init__(self)
+        self._asteval = asteval
+        if asteval is None:
+            self._asteval = Interpreter()
         self.update(*args, **kwds)
 
-    def __setitem__(self, key, value):
+    def __deepcopy__(self, memo):
+        _pars = Parameters()
+        for key, val in self._asteval.symtable.items():
+            if key not in self._asteval.no_deepcopy:
+                _pars._asteval.symtable[key] = deepcopy(val, memo)
+        for key, par in self.items():
+            if isinstance(par, Parameter):
+                name = par.name
+                _pars.add(name, value=par.value, min=par.min, max=par.max)
+                _pars[name].vary = par.vary
+                _pars[name].stderr = par.stderr
+                _pars[name].correl = par.correl
+                _pars[name].init_value = par.init_value
+                _pars[name].expr = par.expr
+                _pars._asteval.symtable[name] = par.value
+        return _pars
+
+    def __setitem__(self, key, par):
         if key not in self:
             if not valid_symbol_name(key):
                 raise KeyError("'%s' is not a valid Parameters name" % key)
-        if value is not None and not isinstance(value, Parameter):
-            raise ValueError("'%s' is not a Parameter" % value)
-        OrderedDict.__setitem__(self, key, value)
-        value.name = key
+        if par is not None and not isinstance(par, Parameter):
+            raise ValueError("'%s' is not a Parameter" % par)
+        OrderedDict.__setitem__(self, key, par)
+        par.name = key
+        par._expr_eval = self._asteval
+        self._asteval.symtable[key] = par.value
+        self.update_constraints()
 
     def __add__(self, other):
         "add Parameters objects"
         if not isinstance(other, Parameters):
             raise ValueError("'%s' is not a Parameters object" % other)
+        out = deepcopy(self)
+        out.update(other)
+        return out
+
+    def __iadd__(self, other):
+        "add/assign Parameters objects"
+        if not isinstance(other, Parameters):
+            raise ValueError("'%s' is not a Parameters object" % other)
         self.update(other)
         return self
 
+    def update_constraints(self):
+        """update all constrained parameters, checking that
+        dependencies are evaluated as needed.
+        """
+        _updated = dict([(name, False) for name in self.keys()])
+        def _update_param(name):
+            """update a parameter value, including setting bounds.
+            For a constrained parameter (one with an expr defined),
+            this first updates (recursively) all parameters on which
+            the parameter depends (using the 'deps' field).
+            """
+            # Has this param already been updated?
+            # if this an expression dependency, it may have been
+            if _updated[name]:
+                return
+            par = self.__getitem__(name)
+            if par._expr_eval is None:
+                par._expr_eval = self._asteval
+            if par._expr is not None:
+                par.expr = par._expr
+            if par._expr_ast is not None:
+                for dep in par._expr_deps:
+                    if dep in self.keys():
+                        _update_param(dep)
+            self._asteval.symtable[name] = par.value
+            _updated[name] = True
+
+        for name in self.keys():
+            _update_param(name)
+
+    def pretty_repr(self, oneline=False):
+        if oneline:
+            return super(Parameters, self).__repr__()
+        s = "Parameters({\n"
+        for key in self.keys():
+            s += "    '%s': %s, \n" % (key, self[key])
+        s += "    })\n"
+        return s
+
+    def pretty_print(self, oneline=False):
+        print(self.pretty_repr(oneline=oneline))
+
     def add(self, name, value=None, vary=True, min=None, max=None, expr=None):
         """
         Convenience function for adding a Parameter:
@@ -225,7 +304,9 @@ class Parameter(object):
         self.max = max
         self.vary = vary
         self._expr = expr
-        self.deps   = None
+        self._expr_ast = None
+        self._expr_eval = None
+        self._expr_deps = []
         self.stderr = None
         self.correl = None
         self.from_internal = lambda val: val
@@ -249,6 +330,8 @@ class Parameter(object):
             Mathematical expression used to constrain the value during the fit.
             To remove a constraint you must supply an empty string.
         """
+
+        self.__set_expression(expr)
         if value is not None:
             self._val = value
         if vary is not None:
@@ -257,8 +340,6 @@ class Parameter(object):
             self.min = min
         if max is not None:
             self.max = max
-        if expr is not None:
-            self.expr = expr
 
     def _init_bounds(self):
         """make sure initial bounds are self-consistent"""
@@ -283,14 +364,16 @@ class Parameter(object):
         """set state for pickle"""
         (self.name, self.value, self.vary, self.expr, self.min,
          self.max, self.stderr, self.correl, self.init_value) = state
-        self._val = self.value
+        self._expr_ast = None
+        self._expr_eval = None
+        self._expr_deps = []
         self._init_bounds()
 
     def __repr__(self):
         s = []
         if self.name is not None:
             s.append("'%s'" % self.name)
-        sval = repr(self._val)
+        sval = repr(self._getval())
         if not self.vary and self._expr is None:
             sval = "value=%s (fixed)" % (sval)
         elif self.stderr is not None:
@@ -362,15 +445,23 @@ class Parameter(object):
                 pass
         if not self.vary and self._expr is None:
             return self._val
+        if not hasattr(self, '_expr_eval'):
+            self._expr_eval = None
+        if not hasattr(self, '_expr_ast'):
+            self._expr_ast = None
+        if self._expr_ast is not None and self._expr_eval is not None:
+            self._val = self._expr_eval(self._expr_ast)
+            check_ast_errors(self._expr_eval)
+
         if self.min is None:
             self.min = -inf
         if self.max is None:
             self.max =  inf
         if self.max < self.min:
             self.max, self.min = self.min, self.max
-        if abs((1.0*self.max - self.min)/max(self.max, self.min)) < 1.e-13:
+        if (abs((1.0*self.max - self.min)/
+                max(abs(self.max), abs(self.min), 1.e-13)) < 1.e-13):
             raise ValueError("Parameter '%s' has min == max" % self.name)
-
         try:
             if self.min > -inf:
                 self._val = max(self.min, self._val)
@@ -380,6 +471,10 @@ class Parameter(object):
             self._val = nan
         return self._val
 
+    def set_expr_eval(self, evaluator):
+        "set expression evaluator instance"
+        self._expr_eval = evaluator
+
     @property
     def value(self):
         "The numerical value of the Parameter, with bounds applied"
@@ -389,6 +484,9 @@ class Parameter(object):
     def value(self, val):
         "Set the numerical Parameter value."
         self._val = val
+        if not hasattr(self, '_expr_eval'):  self._expr_eval = None
+        if self._expr_eval is not None:
+            self._expr_eval.symtable[self.name] = val
 
     @property
     def expr(self):
@@ -403,9 +501,20 @@ class Parameter(object):
         The mathematical expression used to constrain the value during the fit.
         To remove a constraint you must supply an empty string.
         """
+        self.__set_expression(val)
+
+    def __set_expression(self, val):
         if val == '':
             val = None
         self._expr = val
+        if val is not None:
+            self.vary = False
+        if not hasattr(self, '_expr_eval'):  self._expr_eval = None
+        if val is None: self._expr_ast = None
+        if val is not None and self._expr_eval is not None:
+            self._expr_ast = self._expr_eval.parse(val)
+            check_ast_errors(self._expr_eval)
+            self._expr_deps = get_ast_names(self._expr_ast)
 
     def __str__(self):
         "string"
diff --git a/lmfit/ui/__init__.py b/lmfit/ui/__init__.py
index aea8f83..e835e21 100644
--- a/lmfit/ui/__init__.py
+++ b/lmfit/ui/__init__.py
@@ -14,13 +14,23 @@ else:
 try:
     import IPython
 except ImportError:
-    pass
+    warnings.warn("lmfit.Fitter will use basic mode, not IPython: need matplotlib")
 else:
     _ipy_msg1 = "lmfit.Fitter will use basic mode, not IPython: need IPython2."
     _ipy_msg2 = "lmfit.Fitter will use basic mode, not IPython: could not get IPython version"
+    _ipy_msg3 = "lmfit.Fitter will use basic mode, not IPython: need ipywidgets."
     try:
-        if IPython.release.version_info[0] < 2:
+        major_version = IPython.release.version_info[0]
+        if major_version < 2:
             warnings.warn(_ipy_msg1)
+        elif major_version > 3:
+            # After IPython 3, widgets were moved to a separate package.
+            # There is a shim to allow the old import, but the package has to be
+            # installed for that to work.
+            try:
+                import ipywidgets
+            except ImportError:
+                warnings.warn(_ipy_msg3)
         else:
             # has_ipython = iPython installed and we are in an IPython session.
             has_ipython = IPython.get_ipython() is not None
diff --git a/lmfit/ui/basefitter.py b/lmfit/ui/basefitter.py
index ae51dea..1f8f815 100644
--- a/lmfit/ui/basefitter.py
+++ b/lmfit/ui/basefitter.py
@@ -5,7 +5,7 @@ from ..model import Model
 from ..models import ExponentialModel  # arbitrary default
 from ..asteval import Interpreter
 from ..astutils import NameFinder
-from ..minimizer import check_ast_errors
+from ..parameter import check_ast_errors
 
 
 _COMMON_DOC = """
diff --git a/lmfit/ui/ipy_fitter.py b/lmfit/ui/ipy_fitter.py
index fb94dd2..80edda5 100644
--- a/lmfit/ui/ipy_fitter.py
+++ b/lmfit/ui/ipy_fitter.py
@@ -12,19 +12,32 @@ import IPython
 from IPython.display import display, clear_output
 # Widgets were only experimental in IPython 2.x, but this does work there.
 # Handle the change in naming from 2.x to 3.x.
-if IPython.release.version_info[0] == 2:
+IPY2 = IPython.release.version_info[0] == 2
+IPY3 = IPython.release.version_info[0] == 3
+if IPY2:
     from IPython.html.widgets import DropdownWidget as Dropdown
     from IPython.html.widgets import ButtonWidget as Button
-    from IPython.html.widgets import ContainerWidget as Box
+    from IPython.html.widgets import ContainerWidget
     from IPython.html.widgets import FloatTextWidget as FloatText
     from IPython.html.widgets import CheckboxWidget as Checkbox
-else:
+    class HBox(ContainerWidget):
+        def __init__(self, *args, **kwargs):
+           self.add_class('hbox')
+           super(self, ContainerWidget).__init__(*args, **kwargs)
+elif IPY3:
     # as of IPython 3.x:
     from IPython.html.widgets import Dropdown
     from IPython.html.widgets import Button
-    from IPython.html.widgets import Box
+    from IPython.html.widgets import HBox
     from IPython.html.widgets import FloatText
     from IPython.html.widgets import Checkbox
+else:
+    # as of IPython 4.x+:
+    from ipywidgets import Dropdown
+    from ipywidgets import Button
+    from ipywidgets import HBox
+    from ipywidgets import FloatText
+    from ipywidgets import Checkbox
 
 
 class ParameterWidgetGroup(object):
@@ -37,8 +50,11 @@ class ParameterWidgetGroup(object):
         # Define widgets.
         self.value_text = FloatText(description=par.name,
                                     min=self.par.min, max=self.par.max)
+        self.value_text.width = 100
         self.min_text = FloatText(description='min', max=self.par.max)
+        self.min_text.width = 100
         self.max_text = FloatText(description='max', min=self.par.min)
+        self.max_text.width = 100
         self.min_checkbox = Checkbox(description='min')
         self.max_checkbox = Checkbox(description='max')
         self.vary_checkbox = Checkbox(description='vary')
@@ -103,7 +119,7 @@ class ParameterWidgetGroup(object):
 
     def _on_vary_change(self, name, value):
         self.par.vary = value
-        self.value_text.disabled = not value
+        # self.value_text.disabled = not value
 
     def close(self):
         # one convenience method to close (i.e., hide and disconnect) all
@@ -116,12 +132,11 @@ class ParameterWidgetGroup(object):
         self.max_checkbox.close()
 
     def _repr_html_(self):
-        box = Box()
+        box = HBox()
         box.children = [self.value_text, self.vary_checkbox,
-                        self.min_text, self.min_checkbox,
-                        self.max_text, self.max_checkbox]
+                        self.min_checkbox, self.min_text,
+                        self.max_checkbox, self.max_text]
         display(box)
-        box.add_class('hbox')
 
     # Make it easy to set the widget attributes directly.
     @property
@@ -199,9 +214,15 @@ class NotebookFitter(MPLFitter):
                 data_style={}, init_style={}, best_style={}, **kwargs):
         # Dropdown menu of all subclasses of Model, incl. user-defined.
         self.models_menu = Dropdown()
-        if all_models is None:
-            all_models = dict([(m.__name__, m) for m in Model.__subclasses__()])
-        self.models_menu.values = all_models
+        # Dropbox API is very different between IPy 2.x and 3.x.
+        if IPY2:
+            if all_models is None:
+                all_models = dict([(m.__name__, m) for m in Model.__subclasses__()])
+            self.models_menu.values = all_models
+        else:
+            if all_models is None:
+                all_models = [(m.__name__, m) for m in Model.__subclasses__()]
+            self.models_menu.options = all_models
         self.models_menu.on_trait_change(self._on_model_value_change,
                                              'value')
         # Button to trigger fitting.
@@ -220,10 +241,9 @@ class NotebookFitter(MPLFitter):
 
     def _repr_html_(self):
         display(self.models_menu)
-        button_box = Box()
+        button_box = HBox()
         button_box.children = [self.fit_button, self.guess_button]
         display(button_box)
-        button_box.add_class('hbox')
         for pw in self.param_widgets:
             display(pw)
         self.plot()
diff --git a/tests/NISTModels.py b/tests/NISTModels.py
index 0e07a2d..197856f 100644
--- a/tests/NISTModels.py
+++ b/tests/NISTModels.py
@@ -3,7 +3,7 @@ import sys
 from numpy import exp, log, log10, sin, cos, arctan, array
 from lmfit import Parameters
 thisdir, thisfile = os.path.split(__file__)
-NIST_DIR = os.path.join(thisdir, 'NIST_STRD')
+NIST_DIR = os.path.join(thisdir, '..', 'NIST_STRD')
 
 def read_params(params):
     if isinstance(params, Parameters):
diff --git a/tests/test_NIST_Strd.py b/tests/test_NIST_Strd.py
index d3a1c37..aec9a8f 100644
--- a/tests/test_NIST_Strd.py
+++ b/tests/test_NIST_Strd.py
@@ -88,11 +88,11 @@ def NIST_Dataset(DataSet, method='leastsq', start='start2',
         params.add(pname, value=pval1)
 
     myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y})
-    digs, buff = Compare_NIST_Results(DataSet, myfit, params, NISTdata)
+    digs, buff = Compare_NIST_Results(DataSet, myfit, myfit.params, NISTdata)
     if verbose:
         print(buff)
     if plot and HASPYLAB:
-        fit = -resid(params, x, )
+        fit = -resid(myfit.params, x, )
         pylab.plot(x, y, 'ro')
         pylab.plot(x, fit, 'k+-')
         pylab.show()
@@ -265,4 +265,3 @@ def test_Thurber():
 
 if __name__ == '__main__':
     run_interactive()
-
diff --git a/tests/test_algebraic_constraint.py b/tests/test_algebraic_constraint.py
index 8d26942..f5cc972 100644
--- a/tests/test_algebraic_constraint.py
+++ b/tests/test_algebraic_constraint.py
@@ -31,18 +31,19 @@ def test_constraints1():
             x*0.5)
 
 
-    pfit = [Parameter(name='amp_g',  value=10),
-            Parameter(name='cen_g',  value=9),
-            Parameter(name='wid_g',  value=1),
-
-            Parameter(name='amp_tot',  value=20),
-            Parameter(name='amp_l',  expr='amp_tot - amp_g'),
-            Parameter(name='cen_l',  expr='1.5+cen_g'),
-            Parameter(name='wid_l',  expr='2*wid_g'),
-
-            Parameter(name='line_slope', value=0.0),
-            Parameter(name='line_off', value=0.0)]
-
+    pfit = Parameters()
+    pfit.add(name='amp_g',  value=10)
+    pfit.add(name='cen_g',  value=9)
+    pfit.add(name='wid_g',  value=1)
+    
+    pfit.add(name='amp_tot',  value=20)
+    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
+    pfit.add(name='cen_l',  expr='1.5+cen_g')
+    pfit.add(name='wid_l',  expr='2*wid_g')
+    
+    pfit.add(name='line_slope', value=0.0)
+    pfit.add(name='line_off', value=0.0)
+            
     sigma = 0.021  # estimate of data error (for all data points)
 
     myfit = Minimizer(residual, pfit,
@@ -94,15 +95,16 @@ def test_constraints2():
             random.normal(scale=0.23,  size=n) +
             x*0.5)
 
-    pfit = [Parameter(name='amp_g',  value=10),
-            Parameter(name='cen_g',  value=9),
-            Parameter(name='wid_g',  value=1),
-
-            Parameter(name='amp_tot',  value=20),
-            Parameter(name='amp_l',  expr='amp_tot - amp_g'),
-            Parameter(name='cen_l',  expr='1.5+cen_g'),
-            Parameter(name='line_slope', value=0.0),
-            Parameter(name='line_off', value=0.0)]
+    pfit = Parameters()
+    pfit.add(name='amp_g',  value=10)
+    pfit.add(name='cen_g',  value=9)
+    pfit.add(name='wid_g',  value=1)
+    
+    pfit.add(name='amp_tot',  value=20)
+    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
+    pfit.add(name='cen_l',  expr='1.5+cen_g')
+    pfit.add(name='line_slope', value=0.0)
+    pfit.add(name='line_off', value=0.0)
 
     sigma = 0.021  # estimate of data error (for all data points)
 
@@ -114,7 +116,7 @@ def test_constraints2():
         """ """
         return 2*wpar
 
-    myfit.asteval.symtable['wfun'] = width_func
+    myfit.params._asteval.symtable['wfun'] = width_func
     myfit.params.add(name='wid_l', expr='wfun(wid_g)')
 
     myfit.leastsq()
diff --git a/tests/test_algebraic_constraint2.py b/tests/test_algebraic_constraint2.py
index 7f5e24f..b271fb4 100644
--- a/tests/test_algebraic_constraint2.py
+++ b/tests/test_algebraic_constraint2.py
@@ -51,17 +51,18 @@ def test_constraints(with_plot=True):
     if with_plot:
         pylab.plot(x, data, 'r+')
 
-    pfit = [Parameter(name='amp_g',  value=10),
-            Parameter(name='cen_g',  value=9),
-            Parameter(name='wid_g',  value=1),
-
-            Parameter(name='amp_tot',  value=20),
-            Parameter(name='amp_l',  expr='amp_tot - amp_g'),
-            Parameter(name='cen_l',  expr='1.5+cen_g'),
-            Parameter(name='wid_l',  expr='2*wid_g'),
-
-            Parameter(name='line_slope', value=0.0),
-            Parameter(name='line_off', value=0.0)]
+    pfit = Parameters()
+    pfit.add(name='amp_g',  value=10)
+    pfit.add(name='cen_g',  value=9)
+    pfit.add(name='wid_g',  value=1)
+    
+    pfit.add(name='amp_tot',  value=20)
+    pfit.add(name='amp_l',  expr='amp_tot - amp_g')
+    pfit.add(name='cen_l',  expr='1.5+cen_g')
+    pfit.add(name='wid_l',  expr='2*wid_g')
+    
+    pfit.add(name='line_slope', value=0.0)
+    pfit.add(name='line_off', value=0.0)
 
     sigma = 0.021  # estimate of data error (for all data points)
 
diff --git a/tests/test_basicfit.py b/tests/test_basicfit.py
index 97f4f15..98b6338 100644
--- a/tests/test_basicfit.py
+++ b/tests/test_basicfit.py
@@ -39,8 +39,8 @@ def test_basic():
     assert(result.nfev < 500)
     assert(result.chisqr > 1)
     assert(result.nvarys == 4)
-    assert_paramval(params['amp'],   5.03, tol=0.05)
-    assert_paramval(params['omega'], 2.0, tol=0.05)
+    assert_paramval(result.params['amp'],   5.03, tol=0.05)
+    assert_paramval(result.params['omega'], 2.0, tol=0.05)
 
 
 if __name__ == '__main__':
diff --git a/tests/test_bounded_jacobian.py b/tests/test_bounded_jacobian.py
new file mode 100644
index 0000000..810a505
--- /dev/null
+++ b/tests/test_bounded_jacobian.py
@@ -0,0 +1,43 @@
+from lmfit import Parameters, minimize, fit_report
+from lmfit_testutils import assert_paramval, assert_paramattr
+
+import numpy as np
+
+
+def test_bounded_jacobian():
+    pars = Parameters()
+    pars.add('x0', value=2.0)
+    pars.add('x1', value=2.0, min=1.5)
+
+    global jac_count
+
+    jac_count = 0
+
+    def resid(params):
+        x0 = params['x0'].value
+        x1 = params['x1'].value
+        return np.array([10 * (x1 - x0*x0), 1-x0])
+
+    def jac(params):
+        global jac_count
+        jac_count += 1
+        x0 = params['x0'].value
+        return np.array([[-20*x0, 10], [-1, 0]])
+    
+    out0 = minimize(resid, pars, Dfun=None)
+
+    assert_paramval(out0.params['x0'], 1.2243, tol=0.02)
+    assert_paramval(out0.params['x1'], 1.5000, tol=0.02)
+    assert(jac_count == 0)
+
+    out1 = minimize(resid, pars, Dfun=jac)
+
+    assert_paramval(out1.params['x0'], 1.2243, tol=0.02)
+    assert_paramval(out1.params['x1'], 1.5000, tol=0.02)
+    assert(jac_count > 5)
+
+    print(fit_report(out1, show_correl=True))
+
+
+if __name__ == '__main__':
+    test_bounded_jacobian()
diff --git a/tests/test_bounds.py b/tests/test_bounds.py
index 0ae9417..99c962d 100644
--- a/tests/test_bounds.py
+++ b/tests/test_bounds.py
@@ -40,15 +40,15 @@ def test_bounds():
 
     out = minimize(residual, fit_params, args=(x,), kws={'data':data})
 
-    fit = residual(fit_params, x)
+    fit = residual(out.params, x)
 
     assert(out.nfev  > 10)
     assert(out.nfree > 50)
     assert(out.chisqr > 1.0)
 
     print(fit_report(out, show_correl=True, modelpars=p_true))
-    assert_paramval(fit_params['decay'], 0.01, tol=1.e-2)
-    assert_paramval(fit_params['shift'], 0.123, tol=1.e-2)
+    assert_paramval(out.params['decay'], 0.01, tol=1.e-2)
+    assert_paramval(out.params['shift'], 0.123, tol=1.e-2)
 
 if __name__ == '__main__':
     test_bounds()
diff --git a/tests/test_confidence.py b/tests/test_confidence.py
new file mode 100644
index 0000000..dc449e3
--- /dev/null
+++ b/tests/test_confidence.py
@@ -0,0 +1,44 @@
+import numpy as np
+from numpy.testing import assert_allclose
+
+import lmfit
+from lmfit_testutils import assert_paramval
+
+def residual(params, x, data):
+    a = params['a'].value
+    b = params['b'].value
+    return data - 1.0/(a*x)+b
+
+def test_confidence1():
+    x = np.linspace(0.3,10,100)
+    np.random.seed(0)
+   
+    y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
+    
+    pars = lmfit.Parameters()
+    pars.add_many(('a', 0.1), ('b', 1))
+    
+    minimizer = lmfit.Minimizer(residual, pars, fcn_args=(x, y) )
+    out = minimizer.leastsq()
+    # lmfit.report_fit(out)
+    
+    assert(out.nfev >   5)
+    assert(out.nfev < 500)
+    assert(out.chisqr < 3.0)
+    assert(out.nvarys == 2)
+
+    assert_paramval(out.params['a'],  0.1, tol=0.1)
+    assert_paramval(out.params['b'], -2.0, tol=0.1)
+
+    ci = lmfit.conf_interval(minimizer, out)
+    assert_allclose(ci['b'][0][0],  0.997,  rtol=0.01)
+    assert_allclose(ci['b'][0][1], -2.022,  rtol=0.01)
+    assert_allclose(ci['b'][2][0],  0.674,  rtol=0.01)
+    assert_allclose(ci['b'][2][1], -1.997,  rtol=0.01)
+    assert_allclose(ci['b'][5][0],  0.95,   rtol=0.01)
+    assert_allclose(ci['b'][5][1], -1.96,   rtol=0.01)
+
+   # lmfit.printfuncs.report_ci(ci)
+
+if __name__ == '__main__':
+    test_confidence1()
diff --git a/tests/test_default_kws.py b/tests/test_default_kws.py
index 4d58f03..8ab835f 100644
--- a/tests/test_default_kws.py
+++ b/tests/test_default_kws.py
@@ -21,5 +21,4 @@ def test_default_inputs_gauss():
     result2 = g.fit(y, x=x, amplitude=1, center=0, sigma=0.5, fit_kws=fit_option2)
 
     assert_true(result1.values!=result2.values)
-
-    return
\ No newline at end of file
+    return
diff --git a/tests/test_itercb.py b/tests/test_itercb.py
new file mode 100644
index 0000000..f77eb48
--- /dev/null
+++ b/tests/test_itercb.py
@@ -0,0 +1,29 @@
+import numpy as np
+from lmfit import Parameters, minimize, report_fit
+from lmfit.models import LinearModel, GaussianModel
+from lmfit.lineshapes import gaussian
+
+def per_iteration(pars, iter, resid, *args, **kws):
+    """iteration callback, will abort at iteration 23
+    """
+    # print( iter, ', '.join(["%s=%.4f" % (p.name, p.value) for p in pars.values()]))
+    return iter == 23
+
+def test_itercb():
+    x = np.linspace(0, 20, 401)
+    y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
+    y = y  - .20*x + 3.333 + np.random.normal(scale=0.23,  size=len(x))
+    mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
+
+    pars = mod.make_params(peak_amplitude=21.0,
+                           peak_center=7.0,
+                           peak_sigma=2.0,
+                           bkg_intercept=2,
+                           bkg_slope=0.0)
+
+    out = mod.fit(y, pars, x=x, iter_cb=per_iteration)
+
+    assert(out.nfev == 23)
+    assert(out.aborted)
+    assert(not out.errorbars)
+    assert(not out.success)
diff --git a/tests/test_model.py b/tests/test_model.py
index fe3165c..b6ea042 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -160,6 +160,45 @@ class CommonTests(object):
         # to be overridden for models that do not accept indep vars
         pass
 
+    def test_aic(self):
+        model = self.model
+
+        # Pass Parameters object.
+        params = model.make_params(**self.guess())
+        result = model.fit(self.data, params, x=self.x)
+        aic = result.aic
+        self.assertTrue(aic < 0) # aic must be negative
+
+        # Pass extra unused Parameter.
+        params.add("unused_param", value=1.0, vary=True)
+        result = model.fit(self.data, params, x=self.x)
+        aic_extra = result.aic
+        self.assertTrue(aic_extra < 0)   # aic must be negative
+        self.assertTrue(aic < aic_extra) # the extra param should lower the aic
+
+
+    def test_bic(self):
+        model = self.model
+
+        # Pass Parameters object.
+        params = model.make_params(**self.guess())
+        result = model.fit(self.data, params, x=self.x)
+        bic = result.bic
+        self.assertTrue(bic < 0) # aic must be negative
+
+        # Compare to AIC
+        aic = result.aic
+        self.assertTrue(aic < bic) # aic should be lower than bic
+
+        # Pass extra unused Parameter.
+        params.add("unused_param", value=1.0, vary=True)
+        result = model.fit(self.data, params, x=self.x)
+        bic_extra = result.bic
+        self.assertTrue(bic_extra < 0)   # bic must be negative
+        self.assertTrue(bic < bic_extra) # the extra param should lower the bic
+
+
+
 
 class TestUserDefiniedModel(CommonTests, unittest.TestCase):
     # mainly aimed at checking that the API does what it says it does
diff --git a/tests/test_multidatasets.py b/tests/test_multidatasets.py
index bc23abf..985a70c 100644
--- a/tests/test_multidatasets.py
+++ b/tests/test_multidatasets.py
@@ -2,7 +2,6 @@
 # example fitting to multiple (simulated) data sets
 #
 import numpy as np
-import matplotlib.pyplot as plt
 from lmfit import minimize, Parameters, fit_report
 from lmfit.lineshapes import gaussian
 
@@ -73,4 +72,3 @@ def test_multidatasets():
 
 if __name__ == '__main__':
     test_multidatasets()
-
diff --git a/tests/test_nose.py b/tests/test_nose.py
index 35f09a8..3b25976 100644
--- a/tests/test_nose.py
+++ b/tests/test_nose.py
@@ -56,11 +56,14 @@ def test_simple():
     final = data + result.residual
 
     # write error report
-    report_fit(params)
+    print(" --> SIMPLE --> ")
+    print(result.params)
+    report_fit(result.params)
 
     #assert that the real parameters are found
 
-    for para, val in zip(params.values(), [5, 0.025, -.1, 2]):
+    for para, val in zip(result.params.values(), [5, 0.025, -.1, 2]):
+        
         check(para, val)
 
 def test_lbfgsb():
@@ -102,11 +105,11 @@ def test_lbfgsb():
 
     fit = residual(fit_params, x)
 
-    for name, par in fit_params.items():
+    for name, par in out.params.items():
         nout = "%s:%s" % (name, ' '*(20-len(name)))
         print("%s: %s (%s) " % (nout, par.value, p_true[name].value))
 
-    for para, true_para in zip(fit_params.values(), p_true.values()):
+    for para, true_para in zip(out.params.values(), p_true.values()):
         check_wo_stderr(para, true_para.value)
 
 def test_derive():
@@ -125,7 +128,7 @@ def test_derive():
         b = pars['b'].value
         c = pars['c'].value
         v = np.exp(-b*x)
-        return [v, -a*x*v, np.ones(len(x))]
+        return np.array([v, -a*x*v, np.ones(len(x))])
 
     def f(var, x):
         return var[0]* np.exp(-var[1] * x)+var[2]
@@ -147,14 +150,15 @@ def test_derive():
 
     # fit without analytic derivative
     min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data':data})
-    min1.leastsq()
-    fit1 = func(params1, x)
+    out1 = min1.leastsq()
+    fit1 = func(out1.params, x)
 
     # fit with analytic derivative
     min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data':data})
-    min2.leastsq(Dfun=dfunc, col_deriv=1)
-    fit2 = func(params2, x)
+    out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
+    fit2 = func(out2.params, x)
 
+    
     print ('''Comparison of fit to exponential decay
     with and without analytic derivatives, to
        model = a*exp(-b*x) + c
@@ -169,15 +173,15 @@ def test_derive():
        c               |   %.4f    |   %.4f  |
     ----------------------------------------------
     ''' %  (a, b, c,
-            min1.nfev,   min2.nfev,
-            min1.chisqr, min2.chisqr,
-            params1['a'].value, params2['a'].value,
-            params1['b'].value, params2['b'].value,
-            params1['c'].value, params2['c'].value ))
+            out1.nfev,   out2.nfev,
+            out1.chisqr, out2.chisqr,
+            out1.params['a'].value, out2.params['a'].value,
+            out1.params['b'].value, out2.params['b'].value,
+            out1.params['c'].value, out2.params['c'].value ))
 
-    check_wo_stderr(min1.params['a'], min2.params['a'].value, 0.00005)
-    check_wo_stderr(min1.params['b'], min2.params['b'].value, 0.00005)
-    check_wo_stderr(min1.params['c'], min2.params['c'].value, 0.00005)
+    check_wo_stderr(out1.params['a'], out2.params['a'].value, 0.00005)
+    check_wo_stderr(out1.params['b'], out2.params['b'].value, 0.00005)
+    check_wo_stderr(out1.params['c'], out2.params['c'].value, 0.00005)
 
 def test_peakfit():
     def residual(pars, x, data=None):
@@ -222,15 +226,15 @@ def test_peakfit():
     init = residual(fit_params, x)
 
 
-    myfit.leastsq()
+    out = myfit.leastsq()
 
     # print(' N fev = ', myfit.nfev)
     # print(myfit.chisqr, myfit.redchi, myfit.nfree)
 
-    report_fit(fit_params)
+    report_fit(out.params)
 
-    fit = residual(fit_params, x)
-    check_paras(fit_params, org_params)
+    fit = residual(out.params, x)
+    check_paras(out.params, org_params)
 
 
 def test_scalar_minimize_has_no_uncertainties():
@@ -266,21 +270,47 @@ def test_scalar_minimize_has_no_uncertainties():
     params.add('omega', value= 3.0)
 
     mini = Minimizer(fcn2min, params, fcn_args=(x, data))
-    mini.minimize()
-    assert_(np.isfinite(mini.params['amp'].stderr))
-    print(mini.errorbars)
-    assert_(mini.errorbars == True)
-    mini.minimize(method='nelder-mead')
-    assert_(mini.params['amp'].stderr is None)
-    assert_(mini.params['decay'].stderr is None)
-    assert_(mini.params['shift'].stderr is None)
-    assert_(mini.params['omega'].stderr is None)
-    assert_(mini.params['amp'].correl is None)
-    assert_(mini.params['decay'].correl is None)
-    assert_(mini.params['shift'].correl is None)
-    assert_(mini.params['omega'].correl is None)
-    assert_(mini.errorbars == False)
+    out = mini.minimize()
+    assert_(np.isfinite(out.params['amp'].stderr))
+    print(out.errorbars)
+    assert_(out.errorbars == True)
+    out2 = mini.minimize(method='nelder-mead')
+    assert_(out2.params['amp'].stderr is None)
+    assert_(out2.params['decay'].stderr is None)
+    assert_(out2.params['shift'].stderr is None)
+    assert_(out2.params['omega'].stderr is None)
+    assert_(out2.params['amp'].correl is None)
+    assert_(out2.params['decay'].correl is None)
+    assert_(out2.params['shift'].correl is None)
+    assert_(out2.params['omega'].correl is None)
+    assert_(out2.errorbars == False)
+
+
+def test_multidimensional_fit_GH205():
+    # test that you don't need to flatten the output from the objective
+    # function. Tests regression for GH205.
+    pos = np.linspace(0, 99, 100)
+    xv, yv = np.meshgrid(pos, pos)
+    f = lambda xv, yv, lambda1, lambda2: (np.sin(xv * lambda1)
+                                             + np.cos(yv * lambda2))
+
+    data = f(xv, yv, 0.3, 3)
+    assert_(data.ndim, 2)
+
+    def fcn2min(params, xv, yv, data):
+        """ model decaying sine wave, subtract data"""
+        lambda1 = params['lambda1'].value
+        lambda2 = params['lambda2'].value
+        model = f(xv, yv, lambda1, lambda2)
+        return model - data
+
+    # create a set of Parameters
+    params = Parameters()
+    params.add('lambda1', value=0.4)
+    params.add('lambda2', value=3.2)
 
+    mini = Minimizer(fcn2min, params, fcn_args=(xv, yv, data))
+    res = mini.minimize()
 
 class CommonMinimizerTest(unittest.TestCase):
 
@@ -352,15 +382,15 @@ class CommonMinimizerTest(unittest.TestCase):
             raise SkipTest
 
         print(self.minimizer)
-        self.mini.scalar_minimize(method=self.minimizer)
+        out = self.mini.scalar_minimize(method=self.minimizer)
 
-        fit = self.residual(self.fit_params, self.x)
+        fit = self.residual(out.params, self.x)
 
-        for name, par in self.fit_params.items():
+        for name, par in out.params.items():
             nout = "%s:%s" % (name, ' '*(20-len(name)))
             print("%s: %s (%s) " % (nout, par.value, self.p_true[name].value))
 
-        for para, true_para in zip(self.fit_params.values(),
+        for para, true_para in zip(out.params.values(),
                                    self.p_true.values()):
             check_wo_stderr(para, true_para.value, sig=sig)
 
diff --git a/tests/test_params_set.py b/tests/test_params_set.py
new file mode 100644
index 0000000..b89d8ea
--- /dev/null
+++ b/tests/test_params_set.py
@@ -0,0 +1,41 @@
+import numpy as np
+from numpy.testing import assert_allclose
+from lmfit import Parameters, minimize, report_fit
+from lmfit.lineshapes import gaussian
+from lmfit.models import VoigtModel
+
+def test_param_set():
+    np.random.seed(2015)
+    x = np.arange(0, 20, 0.05)
+    y = gaussian(x, amplitude=15.43, center=4.5, sigma=2.13)
+    y = y + 0.05 - 0.01*x + np.random.normal(scale=0.03, size=len(x))
+
+    model  = VoigtModel()
+    params = model.guess(y, x=x)
+
+    # test #1:  gamma is constrained to equal sigma
+    sigval = params['gamma'].value
+    assert(params['gamma'].expr == 'sigma')
+    assert_allclose(params['gamma'].value, sigval, 1e-4, 1e-4, '', True)
+
+    # test #2: explicitly setting a param value should work, even when
+    #          it had been an expression.  The value will be left as fixed
+    gamval = 0.87543
+    params['gamma'].set(value=gamval)
+    assert(params['gamma'].expr is None)
+    assert(not params['gamma'].vary)
+    assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
+
+    # test #3: explicitly setting an expression should work
+    params['gamma'].set(expr='sigma/2.0')
+    assert(params['gamma'].expr is not None)
+    assert(not params['gamma'].vary)
+    assert_allclose(params['gamma'].value, sigval/2.0, 1e-4, 1e-4, '', True)
+
+    # test #4: explicitly setting a param value WITH vary=True
+    #          will set it to be variable
+    gamval = 0.7777
+    params['gamma'].set(value=gamval, vary=True)
+    assert(params['gamma'].expr is None)
+    assert(params['gamma'].vary)
+    assert_allclose(params['gamma'].value, gamval, 1e-4, 1e-4, '', True)
diff --git a/tests/test_stepmodel.py b/tests/test_stepmodel.py
index 31a67c7..cabf118 100644
--- a/tests/test_stepmodel.py
+++ b/tests/test_stepmodel.py
@@ -3,8 +3,6 @@ from lmfit import fit_report
 from lmfit.models import StepModel, ConstantModel
 from lmfit_testutils import assert_paramval, assert_paramattr
 
-import matplotlib.pyplot as plt
-
 def get_data():
     x  = np.linspace(0, 10, 201)
     dat = np.ones_like(x)
@@ -58,4 +56,3 @@ def test_stepmodel_erf():
 if __name__ == '__main__':
     # test_stepmodel_linear()
     test_stepmodel_erf()
-

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/lmfit-py.git



More information about the debian-science-commits mailing list