[lmfit-py] 01/06: Imported Upstream version 0.8.2+dfsg.1

Frédéric-Emmanuel Picca picca at moszumanska.debian.org
Tue Dec 30 17:57:47 UTC 2014


This is an automated email from the git hooks/post-receive script.

picca pushed a commit to branch master
in repository lmfit-py.

commit 48cdb0ce526dc04dc6e2088dd5883badc3ed488f
Author: Picca Frédéric-Emmanuel <picca at debian.org>
Date:   Tue Dec 30 08:10:55 2014 +0100

    Imported Upstream version 0.8.2+dfsg.1
---
 LICENSE                            |   2 +
 THANKS.txt                         |  31 +-
 doc/_images/model_fit3a.png        | Bin 0 -> 30423 bytes
 doc/_images/model_fit3b.png        | Bin 0 -> 29907 bytes
 doc/builtin_models.rst             |   8 +-
 doc/fitting.rst                    |  72 ++--
 doc/installation.rst               |  41 +--
 doc/model.rst                      | 289 ++++++++-------
 doc/parameters.rst                 |   7 +-
 examples/doc_model2.py             |   5 -
 examples/doc_model3.py             |  59 ++++
 examples/fit1.py                   |  63 ----
 examples/fit_pvoigt.py             |  99 ------
 examples/fit_pvoigt2.py            |  87 -----
 examples/fit_pvoigt_NelderMead.py  |  99 ------
 examples/fit_pvoigt_NelderMead2.py |  85 -----
 examples/lmfit_and_emcee.py        | 124 +++++++
 examples/m1.py                     |  26 --
 examples/model1d_doc1.py           |  23 --
 examples/model1d_doc2.py           |  38 --
 examples/models.py                 | 128 -------
 examples/models_doc2.py            |  30 --
 examples/peakfit_1.py              |  75 ----
 examples/plot_fit.py               |  26 ++
 examples/plot_fit2.py              |  25 ++
 examples/use_models1d.py           |  44 ---
 lmfit/__init__.py                  |   2 +-
 lmfit/_differentialevolution.py    | 696 +++++++++++++++++++++++++++++++++++++
 lmfit/_version.py                  |   4 +-
 lmfit/confidence.py                |   3 +-
 lmfit/minimizer.py                 | 344 ++++++++++++++----
 lmfit/model.py                     | 589 +++++++++++++++++++++++++------
 lmfit/parameter.py                 | 134 +++++--
 tests/test_basicfit.py             |   2 +
 tests/test_nose.py                 | 114 ++----
 35 files changed, 2073 insertions(+), 1301 deletions(-)

diff --git a/LICENSE b/LICENSE
index 4511525..174874e 100644
--- a/LICENSE
+++ b/LICENSE
@@ -6,7 +6,9 @@ The LMFIT-py code is distribution under the following license:
   Copyright (c) 2014 Matthew Newville, The University of Chicago
                      Till Stensitzki, Freie Universitat Berlin
                      Daniel B. Allen, Johns Hopkins University
+                     Michal Rawlik, Eidgenossische Technische Hochschule, Zurich
                      Antonino Ingargiola, University of California, Los Angeles
+                     A. R. J. Nelson, Australian Nuclear Science and Technology Organisation
 
   Permission to use and redistribute the source code or binary forms of this
   software and its documentation, with or without modification is hereby
diff --git a/THANKS.txt b/THANKS.txt
index 70bf7e9..4436a80 100644
--- a/THANKS.txt
+++ b/THANKS.txt
@@ -1,17 +1,24 @@
 Many people have contributed to lmfit.
 
-Matthew Newville wrote the original implementation.
-Till Stensitzki wrote the improved estimates of confidence intervals,  and
-     contributed many tests, bug fixes, and documentation.
+Matthew Newville wrote the original version and maintains the project.
+Till Stensitzki wrote the improved estimates of confidence intervals, and
+    contributed many tests, bug fixes, and documentation.
 Daniel B. Allan wrote much of the high level Model code, and many
-     improvements to the testing and documentation.
-Antonino Ingargiola wrote much of the high level Model code and
-     provided many bug fixes.
+    improvements to the testing and documentation.
+Antonino Ingargiola wrote much of the high level Model code and provided
+    many bug fixes.
 J. J. Helmus wrote the MINUT bounds for leastsq, originally in
-     leastsqbounds.py, and ported to lmfit.
-E. O. Le Bigot wrote the uncertainties package, a version of which is
-     used is lmfit.
+    leastsqbounds.py, and ported to lmfit.
+E. O. Le Bigot wrote the uncertainties package, a version of which is used
+    by lmfit.
+Michal Rawlik added plotting capabilities for Models.
+A. R. J. Nelson added differential_evolution, and greatly improved the code
+    in the docstrings.
 
-Additional patches, bug fixes, and suggestions have come from
-  Christohp Deil, Francois Boulogne, Colin Brosseau, nmearl,
-  Gustavo Pasquevich, LiCode, and Ben Gamari
+Additional patches, bug fixes, and suggestions have come from Christoph
+    Deil, Francois Boulogne, Thomas Caswell, Colin Brosseau, nmearl,
+    Gustavo Pasquevich, Clemens Prescher, LiCode, and Ben Gamari.
+
+The lmfit code obviously depends on, and owes a very large debt to the code
+in scipy.optimize.  Several discussions on the scipy-user and lmfit mailing
+lists have also led to improvements in this code.
diff --git a/doc/_images/model_fit3a.png b/doc/_images/model_fit3a.png
new file mode 100644
index 0000000..26242e4
Binary files /dev/null and b/doc/_images/model_fit3a.png differ
diff --git a/doc/_images/model_fit3b.png b/doc/_images/model_fit3b.png
new file mode 100644
index 0000000..c2e273d
Binary files /dev/null and b/doc/_images/model_fit3b.png differ
diff --git a/doc/builtin_models.rst b/doc/builtin_models.rst
index 5bb1911..9b3c02c 100644
--- a/doc/builtin_models.rst
+++ b/doc/builtin_models.rst
@@ -527,13 +527,13 @@ expression is parsed), so that you can define functions to be used
 in your expression.
 
 As a probably unphysical example, to make a model that is the derivative of
-a Gaussian function times the logarithm of a Lorenztian function you may
+a Gaussian function times the logarithm of a Lorentzian function you may
 could to define this in a script::
 
     >>> script = """
     def mycurve(x, amp, cen, sig):
         loren = lorentzian(x, amplitude=amp, center=cen, sigma=sig)
-        gauss = gaussian(x, amplitude=ampa, center=cen, sigma=sig)
+        gauss = gaussian(x, amplitude=amp, center=cen, sigma=sig)
         return log(loren)*gradient(gauss)/gradient(x)
     """
 
@@ -551,7 +551,7 @@ and `wid`, and build a model that can be used to fit data.
 Example 1: Fit Peaked data to Gaussian, Lorentzian, and  Voigt profiles
 ------------------------------------------------------------------------
 
-Here, we will fit data to three similar lineshapes, in order to decide which
+Here, we will fit data to three similar line shapes, in order to decide which
 might be the better model.  We will start with a Gaussian profile, as in
 the previous chapter, but use the built-in :class:`GaussianModel` instead
 of one we write ourselves.  This is a slightly different version from the
@@ -747,7 +747,7 @@ constant:
 After constructing step-like data, we first create a :class:`StepModel`
 telling it to use the ``erf`` form (see details above), and a
 :class:`ConstantModel`.  We set initial values, in one case using the data
-and :meth:`guess` method for the intial step function paramaters, and
+and :meth:`guess` method for the initial step function paramaters, and
 :meth:`make_params` arguments for the linear component.
 After making a composite model, we run :meth:`fit` and report the
 results, which give::
diff --git a/doc/fitting.rst b/doc/fitting.rst
index 6bd8aa5..2db7d02 100644
--- a/doc/fitting.rst
+++ b/doc/fitting.rst
@@ -156,35 +156,38 @@ Fitting Methods <fit-methods-table>`.
 
  Table of Supported Fitting Methods:
 
- +-----------------------+--------------------+---------------------+-------------------------+
- | Fitting               | ``method`` arg to  | :class:`Minimizer`  | ``method`` arg to       |
- | Method                | :func:`minimize`   | method              | :meth:`scalar_minimize` |
- +=======================+====================+=====================+=========================+
- | Levenberg-Marquardt   |  ``leastsq``       | :meth:`leastsq`     |   Not available         |
- +-----------------------+--------------------+---------------------+-------------------------+
- | Nelder-Mead           |  ``nelder``        | :meth:`fmin`        | ``Nelder-Mead``         |
- +-----------------------+--------------------+---------------------+-------------------------+
- | L-BFGS-B              |  ``lbfgsb``        | :meth:`lbfgsb`      | ``L-BFGS-B``            |
- +-----------------------+--------------------+---------------------+-------------------------+
- | Powell                |  ``powell``        |                     | ``Powell``              |
- +-----------------------+--------------------+---------------------+-------------------------+
- | Conjugate Gradient    |  ``cg``            |                     | ``CG``                  |
- +-----------------------+--------------------+---------------------+-------------------------+
- | Newton-CG             |  ``newton``        |                     | ``Newton-CG``           |
- +-----------------------+--------------------+---------------------+-------------------------+
- | COBYLA                |  ``cobyla``        |                     |  ``COBYLA``             |
- +-----------------------+--------------------+---------------------+-------------------------+
- | COBYLA                |  ``cobyla``        |                     |  ``COBYLA``             |
- +-----------------------+--------------------+---------------------+-------------------------+
- | Truncated Newton      |  ``tnc``           |                     |  ``TNC``                |
- +-----------------------+--------------------+---------------------+-------------------------+
- | Trust Newton-CGn      |  ``trust-ncg``     |                     |  ``trust-ncg``          |
- +-----------------------+--------------------+---------------------+-------------------------+
- | Dogleg                |  ``dogleg``        |                     |  ``dogleg``             |
- +-----------------------+--------------------+---------------------+-------------------------+
- | Sequential Linear     |  ``slsqp``         |                     |  ``SLSQP``              |
- | Squares Programming   |                    |                     |                         |
- +-----------------------+--------------------+---------------------+-------------------------+
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Fitting Meth          | ``method`` arg to            | :class:`Minimizer`  | ``method`` arg to           |
+ |                       | :func:`minimize`             | method              | :meth:`scalar_minimize`     |
+ +=======================+==============================+=====================+=============================+
+ | Levenberg-Marquardt   |  ``leastsq``                 | :meth:`leastsq`     |   Not available             |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Nelder-Mead           |  ``nelder``                  | :meth:`fmin`        | ``Nelder-Mead``             |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | L-BFGS-B              |  ``lbfgsb``                  | :meth:`lbfgsb`      | ``L-BFGS-B``                |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Powell                |  ``powell``                  |                     | ``Powell``                  |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Conjugate Gradient    |  ``cg``                      |                     | ``CG``                      |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Newton-CG             |  ``newton``                  |                     | ``Newton-CG``               |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | COBYLA                |  ``cobyla``                  |                     |  ``COBYLA``                 |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | COBYLA                |  ``cobyla``                  |                     |  ``COBYLA``                 |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Truncated Newton      |  ``tnc``                     |                     |  ``TNC``                    |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Trust Newton-CGn      |  ``trust-ncg``               |                     |  ``trust-ncg``              |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Dogleg                |  ``dogleg``                  |                     |  ``dogleg``                 |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Sequential Linear     |  ``slsqp``                   |                     |  ``SLSQP``                  |
+ | Squares Programming   |                              |                     |                             |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
+ | Differential          |  ``differential_evolution``  |                     | ``differential_evolution``  |
+ | Evolution             |                              |                     |                             |
+ +-----------------------+------------------------------+---------------------+-----------------------------+
 
 .. note::
 
@@ -336,6 +339,7 @@ The Minimizer object has a few public methods:
 
 .. method:: lbfgsb(**kws)
 
+
    perform fit with L-BFGS-B algorithm.  Keywords will be passed directly to
    :func:`scipy.optimize.fmin_l_bfgs_b`.
 
@@ -351,6 +355,10 @@ The Minimizer object has a few public methods:
     |   maxfun         | 2000*(nvar+1)  | maximum number of function calls (nvar= # of variables)    |
     +------------------+----------------+------------------------------------------------------------+
 
+.. warning::
+
+  :meth:`lbfgsb` is deprecated.  Use :meth:`minimize` with ``method='lbfgsb'``.
+
 .. method:: fmin(**kws)
 
    perform fit with Nelder-Mead downhill simplex algorithm.  Keywords will be passed directly to
@@ -367,6 +375,10 @@ The Minimizer object has a few public methods:
     |   maxfun         | 5000*(nvar+1)  | maximum number of function calls (nvar= # of variables)    |
     +------------------+----------------+------------------------------------------------------------+
 
+.. warning::
+
+  :meth:`fmin` is deprecated.  Use :meth:`minimize` with ``method='nelder'``.
+
 
 .. method:: scalar_minimize(method='Nelder-Mead', hess=None, tol=None, **kws)
 
@@ -446,5 +458,3 @@ which would write out::
     [[Correlations]] (unreported correlations are <  0.100)
         C(period, shift)             =  0.797
         C(amp, decay)                =  0.582
-
-
diff --git a/doc/installation.rst b/doc/installation.rst
index 9a90056..dcdeaac 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -15,7 +15,7 @@ higher is recommended, but extensive testing on compatibility with various
 versions of scipy has not been done.  Lmfit does work with Python 2.7, and
 3.2 and 3.3.  No testing has been done with Python 3.4, but as the package
 is pure Python, relying only on scipy and numpy, no significant troubles
-are expected.  The `nose`_ frameworkt is required for running the test
+are expected.  The `nose`_ framework is required for running the test
 suite, and IPython and matplotib are recommended.  If Pandas is available,
 it will be used in portions of lmfit.
 
@@ -56,7 +56,6 @@ and install using::
    python setup.py install
 
 
-
 Testing
 ~~~~~~~~~~
 
@@ -66,48 +65,18 @@ routinely run on the development version.  Running ``nosetests`` should run
 all of these tests to completion without errors or failures.
 
 Many of the examples in this documentation are distributed with lmfit in
-the ``examples`` folder, and sould also run for you.  Many of these require
+the ``examples`` folder, and should also run for you.  Many of these require
 
 
 Acknowledgements
 ~~~~~~~~~~~~~~~~~~
 
-LMFIT was originally written by Matthew Newville.  Substantial code and
-documentation improvements, especially for improved estimates of confidence
-intervals was provided by Till Stensitzki.  Much of the work on improved
-unit testing and high-level model functions was done by Daniel B. Allen,
-with substantial input from Antonino Ingargiola.  Many valuable suggestions
-for improvements have come from Christoph Deil.  The implementation of
-parameter bounds as described in the MINUIT documentation is taken from
-Jonathan J. Helmus' leastsqbound code, with permission.  The code for
-propagation of uncertainties is taken from Eric O. Le Bigot's uncertainties
-package, with permission.  The code obviously depends on, and owes a very
-large debt to the code in scipy.optimize.  Several discussions on the scipy
-mailing lists have also led to improvements in this code.
+.. literalinclude:: ../THANKS.txt
+
 
 License
 ~~~~~~~~~~~~~
 
 The LMFIT-py code is distribution under the following license:
 
-  Copyright (c) 2014 Matthew Newville, The University of Chicago, Till
-  Stensitzki, Freie Universitat Berlin, Daniel B. Allen, Johns Hopkins
-  University, Antonino Ingargiola, University of California, Los Angeles
-
-  Permission to use and redistribute the source code or binary forms of this
-  software and its documentation, with or without modification is hereby
-  granted provided that the above notice of copyright, these terms of use,
-  and the disclaimer of warranty below appear in the source code and
-  documentation, and that none of the names of above institutions or
-  authors appear in advertising or endorsement of works derived from this
-  software without specific prior written permission from all parties.
-
-  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-  DEALINGS IN THIS SOFTWARE.
-
-
+.. literalinclude:: ../LICENSE
diff --git a/doc/model.rst b/doc/model.rst
index 82551db..dc2b87f 100644
--- a/doc/model.rst
+++ b/doc/model.rst
@@ -11,7 +11,7 @@ has a parametrized model function meant to explain some phenomena and wants
 to adjust the numerical values for the model to most closely match some
 data.  With :mod:`scipy`, such problems are commonly solved with
 :func:`scipy.optimize.curve_fit`, which is a wrapper around
-:func:`scipy.optimize.leastsq`.  Since Lmit's :func:`minimize` is also a
+:func:`scipy.optimize.leastsq`.  Since Lmfit's :func:`minimize` is also a
 high-level wrapper around :func:`scipy.optimize.leastsq` it can be used for
 curve-fitting problems, but requires more effort than using
 :func:`scipy.optimize.curve_fit`.
@@ -23,7 +23,7 @@ function.  This is closer in spirit to :func:`scipy.optimize.curve_fit`,
 but with the advantages of using :class:`Parameters` and lmfit.
 
 In addition to allowing you turn any model function into a curve-fitting
-method, Lmfit also provides canonical definitions for many known lineshapes
+method, Lmfit also provides canonical definitions for many known line shapes
 such as Gaussian or Lorentzian peaks and Exponential decays that are widely
 used in many scientific domains.  These are available in the :mod:`models`
 module that will be discussed in more detail in the next chapter
@@ -102,7 +102,7 @@ independent variable is, and you can add or alter parameters too.
 On creation of the model, parameters are *not* created.  The model knows
 what the parameters should be named, but not anything about the scale and
 range of your data.  You will normally have to make these parameters and
-assign initiald values and other attributes.  To help you do this, each
+assign initial values and other attributes.  To help you do this, each
 model has a :meth:`make_params` method that will generate parameters with
 the expected names:
 
@@ -188,7 +188,6 @@ except that all the other features of lmfit are included such as that the
 :class:`Parameters` can have bounds and constraints and the result is a
 richer object that can be reused to explore the fit in more detail.
 
-.. module:: model
 
 The :class:`Model` class
 =======================================
@@ -196,7 +195,7 @@ The :class:`Model` class
 The :class:`Model` class provides a general way to wrap a pre-defined
 function as a fitting model.
 
-.. class::  Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix='' [, name=None[, **kws]]]]]])
+.. class::  Model(func[, independent_vars=None[, param_names=None[, missing=None[, prefix=''[, name=None[, **kws]]]]]])
 
     Create a model based on the user-supplied function.  This uses
     introspection to automatically converting argument names of the
@@ -210,27 +209,28 @@ function as a fitting model.
     :type param_names: ``None`` (default) or list of strings
     :param missing: how to handle missing values.
     :type missing: one of ``None`` (default), 'none', 'drop', or 'raise'.
-    :param prefix: prefix to add to all parameter names to distinguish components.
+    :param prefix: prefix to add to all parameter names to distinguish components in a :class:`CompositeModel`.
     :type prefix: string
-    :param name: name for the model. When ``None`` (default) the name is the same as the model function (``func``).
+    :param name: name for the model. When ``None`` (default) the name is the same  as the model function (``func``).
     :type name: ``None`` or string.
-    :param kws:   addtional keyword arguments to pass to model function.
+    :param kws:   additional keyword arguments to pass to model function.
+
 
+Of course, the model function will have to return an array that will be the
+same size as the data being modeled.  Generally this is handled by also
+specifying one or more independent variables.
 
-    Of course, the model function will have to return an array that will be
-    the same size as the data being modeled.  Generally this is handled by
-    also specifying one or more independent variables.
 
 :class:`Model` class Methods
 ---------------------------------
 
-.. method:: eval(params=None[, **kws])
+.. method:: Model.eval(params=None[, **kws])
 
    evaluate the model function for a set of parameters and inputs.
 
    :param params: parameters to use for fit.
    :type params: ``None`` (default) or Parameters
-   :param kws:    addtional keyword arguments to pass to model function.
+   :param kws:    additional keyword arguments to pass to model function.
    :return:       ndarray for model given the parameters and other arguments.
 
    If ``params`` is ``None``, the values for all parameters are expected to
@@ -243,7 +243,7 @@ function as a fitting model.
    in using keyword arguments.
 
 
-.. method:: fit(data[, params=None[, weights=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **kws]]]]]])
+.. method:: Model.fit(data[, params=None[, weights=None[, method='leastsq'[, scale_covar=True[, iter_cb=None[, **kws]]]]]])
 
    perform a fit of the model to the ``data`` array with a set of
    parameters.
@@ -262,7 +262,7 @@ function as a fitting model.
    :type  iter_cb:  callable or ``None``
    :param verbose:  print a message when a new parameter is created due to a *hint*
    :type  verbose:  bool (default ``True``)
-   :param kws:      addtional keyword arguments to pass to model function.
+   :param kws:      additional keyword arguments to pass to model function.
    :return:         :class:`ModeFitResult` object.
 
    If ``params`` is ``None``, the internal ``params`` will be used. If it
@@ -275,13 +275,13 @@ function as a fitting model.
    arguments.
 
 
-.. method:: guess(data, **kws)
+.. method:: Model.guess(data, **kws)
 
    Guess starting values for model parameters.
 
     :param data: data array used to guess parameter values
     :type func:  ndarray
-    :param kws:  addtional options to pass to model function.
+    :param kws:  additional options to pass to model function.
     :return: :class:`Parameters` with guessed initial values for each parameter.
 
    by default this is left to raise a ``NotImplementedError``, but may be
@@ -290,7 +290,7 @@ function as a fitting model.
    the parameters.
 
 
-.. method:: make_params(**kws)
+.. method:: Model.make_params(**kws)
 
    Create a set of parameters for model.
 
@@ -301,9 +301,9 @@ function as a fitting model.
     parameter.
 
 
-.. method:: set_param_hint(name, value=None[, min=None[, max=None[, vary=True[, expr=None]]]])
+.. method:: Model.set_param_hint(name, value=None[, min=None[, max=None[, vary=True[, expr=None]]]])
 
-   set *hints* to use when creating parameters with :meth:`make_param` for
+   set *hints* to use when creating parameters with :meth:`Model.make_param` for
    the named parameter.  This is especially convenient for setting initial
    values.  The ``name`` can include the models ``prefix`` or not.
 
@@ -325,13 +325,6 @@ function as a fitting model.
 :class:`Model` class Attributes
 ---------------------------------
 
-.. attribute:: components
-
-   a list of instances of :class:`Model` that make up a *composite model*.
-   See :ref:`composite_models_section`.  Normally, you will not need to use
-   this, but is used by :class:`Model` itself when constructing a composite
-   model from two or more models.
-
 .. attribute:: func
 
    The model function used to calculate the model.
@@ -340,10 +333,6 @@ function as a fitting model.
 
    list of strings for names of the independent variables.
 
-.. attribute:: is_composite
-
-   Boolean value for whether model is a composite model.
-
 .. attribute:: missing
 
    describes what to do for missing values.  The choices are
@@ -448,8 +437,8 @@ function you are modeling:
 independent variable
     a function argument that is not a parameter or otherwise part of the
     model, and that will be required to be explicitly provided as a
-    keyword argument for each fit with :meth:`fit` or evaluation
-    with :meth:`eval`.
+    keyword argument for each fit with :meth:`Model.fit` or evaluation
+    with :meth:`Model.eval`.
 
 Note that independent variables are not required to be arrays, or even
 floating point numbers.
@@ -522,10 +511,10 @@ fit.  There are four different ways to do this initialization that can be
 used in any combination:
 
   1. You can supply initial values in the definition of the model function.
-  2. You can initialize the parameters when creating parameters with :meth:`make_params`.
-  3. You can give parameter hints with :meth:`set_param_hint`.
+  2. You can initialize the parameters when creating parameters with :meth:`Model.make_params`.
+  3. You can give parameter hints with :meth:`Model.set_param_hint`.
   4. You can supply initial values for the parameters when you use the
-     :meth:`eval` or :meth:`fit` methods.
+     :meth:`Model.eval` or :meth:`Model.fit` methods.
 
 Of course these methods can be mixed, allowing you to overwrite initial
 values at any point in the process of defining and using the model.
@@ -549,10 +538,10 @@ with keywords can be treated as options.  It also means that some default
 initial value will always be available for the parameter.
 
 
-Initializing values with :meth:`make_params`
+Initializing values with :meth:`Model.make_params`
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-When creating parameters with :meth:`make_params` you can specify initial
+When creating parameters with :meth:`Model.make_params` you can specify initial
 values.  To do this, use keyword arguments for the parameter names and
 initial values::
 
@@ -564,10 +553,10 @@ Initializing values by setting parameter hints
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 After a model has been created, but prior to creating parameters with
-:meth:`make_params`, you can set parameter hints.  These allows you to set
+:meth:`Model.make_params`, you can set parameter hints.  These allows you to set
 not only a default initial value but also to set other parameter attributes
 controlling bounds, whether it is varied in the fit, or a constraint
-expression.  To set a parameter hint, you can use :meth:`set_param_hint`,
+expression.  To set a parameter hint, you can use :meth:`Model.set_param_hint`,
 as with::
 
     >>> mod = Model(myfunc)
@@ -583,17 +572,17 @@ Initializing values when using a model
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Finally, you can explicitly supply initial values when using a model.  That
-is, as with :meth:`make_params`, you can include values
-as keyword arguments to either the :meth:`eval` or :meth:`fit` methods::
+is, as with :meth:`Model.make_params`, you can include values
+as keyword arguments to either the :meth:`Model.eval` or :meth:`Model.fit` methods::
 
    >>> y1 = mod.eval(x=x, a=7.0, b=-2.0)
 
    >>> out = mod.fit(x=x, pars, a=3.0, b=-0.0)
 
-These approachess to initialization provide many opportunities for setting
+These approaches to initialization provide many opportunities for setting
 initial values for parameters.  The methods can be combined, so that you
 can set parameter hints but then change the initial value explicitly with
-:meth:`fit`.
+:meth:`Model.fit`.
 
 .. _model_param_hints_section:
 
@@ -602,10 +591,10 @@ Using parameter hints
 
 
 After a model has been created, you can give it hints for how to create
-parameters with :meth:`make_params`.  This allows you to set not only a
+parameters with :meth:`Model.make_params`.  This allows you to set not only a
 default initial value but also to set other parameter attributes
 controlling bounds, whether it is varied in the fit, or a constraint
-expression.   To set a parameter hint, you can use :meth:`set_param_hint`,
+expression.   To set a parameter hint, you can use :meth:`Model.set_param_hint`,
 as with::
 
     >>> mod = Model(myfunc)
@@ -619,8 +608,8 @@ which is simply a nested dictionary::
     {'a': {'value': 1}, 'b': {'max': 1.0, 'value': 0.3, 'min': 0}}
 
 
-You can change this dictionary directly, or with the :meth:`set_param_hint`
-method.  Either way, these parameter hints are used by :meth:`make_params`
+You can change this dictionary directly, or with the :meth:`Model.set_param_hint`
+method.  Either way, these parameter hints are used by :meth:`Model.make_params`
 when making parameters.
 
 An important feature of parameter hints is that you can force the creation
@@ -637,7 +626,7 @@ The :class:`ModelFit` class
 =======================================
 
 A :class:`ModelFit` is the object returned by :meth:`Model.fit`.  It is a
-sublcass of :class:`Minimizer`, and so contains many of the fit results.
+subclass of :class:`Minimizer`, and so contains many of the fit results.
 Of course, it knows the :class:`Model` and the set of :class:`Parameters`
 used in the fit, and it has methods to evaluate the model, to fit the data
 (or re-fit the data with changes to the parameters, or fit with different
@@ -654,19 +643,34 @@ with a model.
 A :class:`ModelFit` has several attributes holding values for fit results,
 and several methods for working with fits.
 
+.. class:: ModelFit()
+
+    Model fit is intended to be created and returned by :meth:`Model.fit`.
+
+
+
 :class:`ModelFit` methods
 ---------------------------------
 
 These methods are all inherited from :class:`Minimize` or from
 :class:`Model`.
 
-.. method:: eval(**kwargs)
+.. method:: ModelFit.eval(**kwargs)
 
    evaluate the model using the best-fit parameters and supplied
    independent variables.  The ``**kwargs`` arguments can be used to update
    parameter values and/or independent variables.
 
-.. method:: fit(data=None[, params=None[, weights=None[, method=None[, **kwargs]]]])
+
+.. method:: ModelFit.eval_components(**kwargs)
+
+   evaluate each component of a :class:`CompositeModel`, returning an
+   ordered dictionary of with the values for each component model.  The
+   returned dictionary will have keys of the model prefix or (if no prefix
+   is given), the model name.  The ``**kwargs`` arguments can be used to
+   update parameter values and/or independent variables.
+
+.. method:: ModelFit.fit(data=None[, params=None[, weights=None[, method=None[, **kwargs]]]])
 
    fit (or re-fit), optionally changing ``data``, ``params``, ``weights``,
    or ``method``, or changing the independent variable(s) with the
@@ -674,7 +678,7 @@ These methods are all inherited from :class:`Minimize` or from
    descriptions, and note that any value of ``None`` defaults to the last
    used value.
 
-.. method:: fit_report(modelpars=None[, show_correl=True[, min_correl=0.1]])
+.. method:: ModelFit.fit_report(modelpars=None[, show_correl=True[,`< min_correl=0.1]])
 
    return a printable fit report for the fit with fit statistics, best-fit
    values with uncertainties and correlations.  As with :func:`fit_report`.
@@ -769,7 +773,7 @@ These methods are all inherited from :class:`Minimize` or from
 
 .. attribute::  nfree
 
-    integer number of free paramaeters in fit.
+    integer number of free parameters in fit.
 
 .. attribute::  nvarys
 
@@ -800,20 +804,24 @@ These methods are all inherited from :class:`Minimize` or from
    ndarray (or ``None``) of weighting values used in fit.
 
 
+.. index:: Composite models
+
 .. _composite_models_section:
 
-Creating composite models
-=============================
 
-One of the most interesting features of the :class:`Model` class is that
-models can be added together to give a composite model, with parameters
-from the component models all being available to influence the total sum of
-the separat component models.  This will become even more useful in the
-next chapter, when pre-built subclasses of :class:`Model` are discussed.
+Composite Models : adding (or multiplying) Models
+==============================================================
 
-For now, we'll consider a simple example will build a model of a Gaussian
-plus a line.  Obviously, we could build a model that included both
-components::
+One of the more interesting features of the :class:`Model` class is that
+Models can be added together or combined with basic algebraic operations
+(add, subtract, multiply, and divide) to give a composite model.  The
+composite model will have parameters from each of the component models,
+with all parameters being available to influence the whole model.  This
+ability to combine models will become even more useful in the next chapter,
+when pre-built subclasses of :class:`Model` are discussed.  For now, we'll
+consider a simple example, and build a model of a Gaussian plus a line, as
+to model a peak with a background. For such a simple problem, we could just
+build a model that included both components::
 
     def gaussian_plus_line(x, amp, cen, wid, slope, intercept):
         "line + 1-d gaussian"
@@ -826,16 +834,16 @@ and use that with::
 
     mod = Model(gaussian_plus_line)
 
-but, of course, we already had a function for a gaussian function, and
-maybe we'll discover that a linear background isn't sufficient and we'd
-have to alter the model again.  As an alternative we could just define a
-linear function::
+But we already had a function for a gaussian function, and maybe we'll
+discover that a linear background isn't sufficient which would mean the
+model function would have to be changed.  As an alternative we could define
+a linear function::
 
     def line(x, slope, intercept):
         "a line"
         return slope * x + intercept
 
-and build a composite model with::
+and build a composite model with just::
 
     mod = Model(gaussian) + Model(line)
 
@@ -846,9 +854,7 @@ This model has parameters for both component models, and can be used as:
 which prints out the results::
 
     [[Model]]
-     Composite Model:
-        gaussian
-        line
+        (Model(gaussian) + Model(line))
     [[Fit Statistics]]
         # function evals   = 44
         # data points      = 101
@@ -883,66 +889,107 @@ red line, and the initial fit is shown as a black dashed line.  In the
 figure on the right, the data is again shown in blue dots, and the Gaussian
 component shown as a black dashed line, and the linear component shown as a
 red dashed line.  These components were generated after the fit using the
-Models :meth:`eval` method::
+Models :meth:`ModelFit.eval_components` method of the `result`::
 
+    comps = result.eval_components()
 
-    comp_gauss = mod.components[0].eval(x=x)
-    comp_line  = mod.components[1].eval(x=x)
+which returns a dictionary of the components, using keys of the model name
+(or `prefix` if that is set).  This will use the parameter values in
+``result.params`` and the independent variables (``x``) used during the
+fit.  Note that while the :class:`ModelFit` held in `result` does store the
+best parameters and the best estimate of the model in ``result.best_fit``,
+the original model and parameters in ``pars`` are left unaltered.
 
-
-Note that we have to pass in ``x`` here, but not any of the final values
-for the parameters -- the current values for ``mod.params`` will be used,
-and these will be the best-fit values after a fit.  While the model does
-store the best parameters and the estimate of the data in ``mod.best_fit``,
-it does not actually store the data it fit to or the independent variables
--- here, ``x`` for that data.  That means you can easily apply this model
-to other data sets, or evaluate the model at other values of ``x``.   You
-may want to do this to give a finer or coarser spacing of data point,  or to
-extrapolate the model outside the fitting range.    This can be done with::
+You can apply this composite model to other data sets, or evaluate the
+model at other values of ``x``.  You may want to do this to give a finer or
+coarser spacing of data point, or to extrapolate the model outside the
+fitting range.  This can be done with::
 
     xwide = np.linspace(-5, 25, 3001)
     predicted = mod.eval(x=xwide)
 
+In this example, the argument names for the model functions do not overlap.
+If they had, the ``prefix`` argument to :class:`Model` would have allowed
+us to identify which parameter went with which component model.  As we will
+see in the next chapter, using composite models with the built-in models
+provides a simple way to build up complex models.
+
+.. class::  CompositeModel(left, right, op[, **kws])
+
+    Create a composite model from two models (`left` and `right` and an
+    binary operator (`op`).  Additional keywords are passed to
+    :class:`Model`.
+
+    :param left: left-hand side Model
+    :type left: :class:`Model`
+    :param right: right-hand side Model
+    :type right: :class:`Model`
+    :param op: binary operator
+    :type op: callable, and taking 2 arguments (`left` and `right`).
+
+Normally, one does not have to explicitly create a :class:`CompositeModel`,
+as doing::
+
+     mod = Model(fcn1) + Model(fcn2) * Model(fcn3)
+
+will automatically create a :class:`CompositeModel`.  In this example,
+`mod.left` will be `Model(fcn1)`, `mod.op` will be :meth:`operator.add`,
+and `mod.right` will be another CompositeModel that has a `left` attribute
+of `Model(fcn2)`, an `op` of :meth:`operator.mul`, and a `right` of
+`Model(fcn3)`.
+
+If you want to use a binary operator other than add, subtract, multiply, or
+divide that are supported through normal Python syntax, you'll need to
+explicitly create a :class:`CompositeModel` with the appropriate binary
+operator.  For example, to convolve two models, you could define a simple
+convolution function, perhaps as::
+
+    import numpy as np
+    def convolve(dat, kernel):
+        # simple convolution
+        npts = min(len(dat), len(kernel))
+        pad  = np.ones(npts)
+        tmp  = np.concatenate((pad*dat[0], dat, pad*dat[-1]))
+        out  = np.convolve(tmp, kernel, mode='valid')
+        noff = int((len(out) - npts)/2)
+        return (out[noff:])[:npts]
+
+which extends the data in both directions so that the convolving kernel
+function gives a valid result over the data range.  Because this function
+takes two array arguments and returns an array, it can be used as the
+binary operator.  A full script using this technique is here:
+
+.. literalinclude:: ../examples/doc_model3.py
 
-A final note: In this example, the argument names for the model functions
-do not overlap.  If they had, the ``prefix`` argument to :class:`Model`
-would have allowed us to identify which parameter went with which component
-model.  As we will see in the next chapter, using composite models with the
-built-in models provides a simple way to build up complex models.
-
-Model names for composite models
------------------------------------------
-
-By default a `Model` object has a `name` attribute containing the name of
-the model function. This name can be overridden when building a model::
-
-    my_model = Model(gaussian, name='my_gaussian')
-
-or by assigning the `name` attribute::
-
-    my_model = Model(gaussian)
-    my_model.name = 'my_gaussian'
-
-This name is used in the object representation (for example when printing)::
-
-    <lmfit.Model: my_gaussian>
-
-A composite model will have the name `'composite_fun'` by default, but as
-noted, we can overwrite it with a more meaningful string. This can be useful
-when dealing with multiple models.
+which prints out the results::
 
-For example, let assume we want to fit some bi-modal data. We initially try
-two Gaussian peaks::
+    [[Model]]
+        (Model(jump) <function convolve at 0x109ee4488> Model(gaussian))
+    [[Fit Statistics]]
+        # function evals   = 25
+        # data points      = 201
+        # variables        = 3
+        chi-square         = 21.692
+        reduced chi-square = 0.110
+    [[Variables]]
+        amplitude:   0.62106099 +/- 0.001783 (0.29%) (init= 1)
+        center:      4.49913218 +/- 0.009373 (0.21%) (init= 3.5)
+        mid:         5 (fixed)
+        sigma:       0.61936067 +/- 0.012977 (2.10%) (init= 1)
+    [[Correlations]] (unreported correlations are <  0.100)
+        C(amplitude, center)         =  0.336
+        C(amplitude, sigma)          =  0.274
 
-    model = GaussianModel(prefix='p1_') + GaussianModel(prefix='p2_')
-    model.name = '2-Gaussians model'
+and shows the plots:
 
-Here, instead of the standard name `'composite_func'`, we assigned a more
-meaningful name. Now, if we want to also fit with two Lorentzian peaks
-we can do similarly::
+.. _figModel3:
 
-    model2 = LorentzianModel(prefix='p1_') + LorentzianModel(prefix='p2_')
-    model2.name = '2-Lorentzians model'
+  .. image:: _images/model_fit3a.png
+     :target: _images/model_fit3a.png
+     :width: 48%
+  .. image:: _images/model_fit3b.png
+     :target: _images/model_fit3b.png
+     :width: 48%
 
-It is evident that assigning names will help to easily distinguish
-the different models.
+Using composite models with built-in or custom operators allows you to
+build complex models from testable sub-components.
diff --git a/doc/parameters.rst b/doc/parameters.rst
index be2f64f..d4ab5e3 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -90,7 +90,8 @@ feature.
    :param expr:  mathematical expression to use to evaluate value during fit.
 
    Each argument of :meth:`set` has a default value of ``None``, and will
-   be set only if the provided value is not ``None``.  You can use this to
+
+  be set only if the provided value is not ``None``.  You can use this to
    update some Parameter attribute without affecting others, for example::
 
        p1 = Parameter('a', value=2.0)
@@ -178,7 +179,7 @@ The :class:`Parameters` class
    :attr:`name` and :attr:`value` of a Parameter.
 
    This is distinct from the :class:`Parameters` itself, as the dictionary
-   values are not :class:`Parameeter` objects, just the :attr:`value`.
+   values are not :class:`Parameter` objects, just the :attr:`value`.
    This can be a very convenient way to get updated values in a objective
    function.
 
@@ -203,4 +204,4 @@ which would make the objective function ``fcn2min`` above look like::
         model = v['amp'] * np.sin(x * v['omega'] + v['shift']) * np.exp(-x*x*v['decay'])
         return model - data
 
-The results are identical, and the difference is a stylisic choice.
+The results are identical, and the difference is a stylistic choice.
diff --git a/examples/doc_model2.py b/examples/doc_model2.py
index c54c12c..afb41c0 100644
--- a/examples/doc_model2.py
+++ b/examples/doc_model2.py
@@ -20,11 +20,6 @@ def line(x, slope, intercept):
 mod = Model(gaussian) + Model(line)
 pars  = mod.make_params( amp=5, cen=5, wid=1, slope=0, intercept=1)
 
-print mod
-
-for k, v in pars.items():
-    print k, v
-
 result = mod.fit(y, pars, x=x)
 
 print(result.fit_report())
diff --git a/examples/doc_model3.py b/examples/doc_model3.py
new file mode 100644
index 0000000..85fd2f9
--- /dev/null
+++ b/examples/doc_model3.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+#<examples/model_doc3.py>
+
+import numpy as np
+from lmfit import Model, CompositeModel
+from lmfit.lineshapes import step, gaussian
+
+import matplotlib.pyplot as plt
+
+# create data from broadened step
+npts = 201
+x = np.linspace(0, 10, npts)
+y = step(x, amplitude=12.5, center=4.5, sigma=0.88, form='erf')
+y = y + np.random.normal(size=npts, scale=0.35)
+
+def jump(x, mid):
+    "heaviside step function"
+    o = np.zeros(len(x))
+    imid = max(np.where(x<=mid)[0])
+    o[imid:] = 1.0
+    return o
+
+def convolve(arr, kernel):
+    # simple convolution of two arrays
+    npts = min(len(arr), len(kernel))
+    pad  = np.ones(npts)
+    tmp  = np.concatenate((pad*arr[0], arr, pad*arr[-1]))
+    out  = np.convolve(tmp, kernel, mode='valid')
+    noff = int((len(out) - npts)/2)
+    return out[noff:noff+npts]
+#
+# create Composite Model using the custom convolution operator
+mod  = CompositeModel(Model(jump), Model(gaussian), convolve)
+
+pars = mod.make_params(amplitude=1, center=3.5, sigma=1.5, mid=5.0)
+
+# 'mid' and 'center' should be completely correlated, and 'mid' is
+# used as an integer index, so a very poor fit variable:
+pars['mid'].vary = False
+
+# fit this model to data array y
+result =  mod.fit(y, params=pars, x=x)
+
+print(result.fit_report())
+
+plot_components = False
+
+# plot results
+plt.plot(x, y,         'bo')
+if plot_components:
+    # generate components
+    comps = result.eval_components(x=x)
+    plt.plot(x, 10*comps['jump'], 'k--')
+    plt.plot(x, 10*comps['gaussian'], 'r-')
+else:
+    plt.plot(x, result.init_fit, 'k--')
+    plt.plot(x, result.best_fit, 'r-')
+plt.show()
+# #<end examples/model_doc3.py>
diff --git a/examples/fit1.py b/examples/fit1.py
deleted file mode 100644
index edf278a..0000000
--- a/examples/fit1.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from lmfit import Parameters, minimize, report_fit
-
-from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
-from scipy.optimize import leastsq
-
-try:
-    import pylab
-    HASPYLAB = True
-except ImportError:
-    HASPYLAB = False
-
-p_true = Parameters()
-p_true.add('amp', value=14.0)
-p_true.add('period', value=5.33)
-p_true.add('shift', value=0.123)
-p_true.add('decay', value=0.010)
-
-def residual(pars, x, data=None):
-    amp = pars['amp'].value
-    per = pars['period'].value
-    shift = pars['shift'].value
-    decay = pars['decay'].value
-
-    if abs(shift) > pi/2:
-        shift = shift - sign(shift)*pi
-    model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
-    if data is None:
-        return model
-    return (model - data)
-
-n = 2500
-xmin = 0.
-xmax = 250.0
-noise = random.normal(scale=0.7215, size=n)
-x     = linspace(xmin, xmax, n)
-data  = residual(p_true, x) + noise
-
-fit_params = Parameters()
-fit_params.add('amp', value=13.0)
-fit_params.add('period', value=2)
-fit_params.add('shift', value=0.0)
-fit_params.add('decay', value=0.02)
-
-out = minimize(residual, fit_params, args=(x,), kws={'data':data})
-
-fit = residual(fit_params, x)
-
-print ' N fev = ', out.nfev
-print out.chisqr, out.redchi, out.nfree
-
-print '### Error Report:'
-report_fit(fit_params)
-
-
-if HASPYLAB:
-    pylab.plot(x, data, 'ro')
-    pylab.plot(x, fit, 'b')
-    pylab.show()
-
-
-
-
-
diff --git a/examples/fit_pvoigt.py b/examples/fit_pvoigt.py
deleted file mode 100644
index 0542236..0000000
--- a/examples/fit_pvoigt.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import sys
-
-from lmfit import Parameters, Parameter, Minimizer, report_fit
-from lmfit.lineshapes import gaussian, lorentzian, pvoigt
-
-from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
-
-try:
-    import matplotlib
-    # matplotlib.use('WXAGG')
-    import pylab
-    HASPYLAB = True
-except ImportError:
-    HASPYLAB = False
-
-def per_iteration(pars, i, resid, x, *args, **kws):
-    if i < 10 or i % 10 == 0:
-        print( '====== Iteration ', i)
-        for p in pars.values():
-            print( p.name , p.value)
-
-def residual(pars, x, sigma=None, data=None):
-    yg = gaussian(x, pars['amp_g'].value,
-                  pars['cen_g'].value, pars['wid_g'].value)
-    yl = lorentzian(x, pars['amp_l'].value,
-                    pars['cen_l'].value, pars['wid_l'].value)
-
-    frac = pars['frac'].value
-    slope = pars['line_slope'].value
-    offset = pars['line_off'].value
-    model = (1-frac) * yg + frac * yl + offset + x * slope
-    if data is None:
-        return model
-    if sigma is  None:
-        return (model - data)
-    return (model - data)/sigma
-
-
-n = 601
-xmin = 0.
-xmax = 20.0
-x = linspace(xmin, xmax, n)
-
-p_true = Parameters()
-p_true.add('amp_g', value=21.0)
-p_true.add('cen_g', value=8.1)
-p_true.add('wid_g', value=1.6)
-p_true.add('frac', value=0.37)
-p_true.add('line_off', value=-1.023)
-p_true.add('line_slope', value=0.62)
-
-data = (pvoigt(x, p_true['amp_g'].value, p_true['cen_g'].value,
-              p_true['wid_g'].value, p_true['frac'].value) +
-        random.normal(scale=0.23,  size=n) +
-        x*p_true['line_slope'].value + p_true['line_off'].value )
-
-if HASPYLAB:
-    pylab.plot(x, data, 'r+')
-
-pfit = [Parameter(name='amp_g', value=10),
-        Parameter(name='amp_g', value=10.0),
-        Parameter(name='cen_g', value=9),
-        Parameter(name='wid_g', value=1),
-        Parameter(name='frac', value=0.50),
-        Parameter(name='amp_l', expr='amp_g'),
-        Parameter(name='cen_l', expr='cen_g'),
-        Parameter(name='wid_l', expr='wid_g'),
-        Parameter(name='line_slope', value=0.0),
-        Parameter(name='line_off', value=0.0)]
-
-sigma = 0.021  # estimate of data error (for all data points)
-
-myfit = Minimizer(residual, pfit, iter_cb=per_iteration,
-                  fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
-                  scale_covar=True)
-
-myfit.prepare_fit()
-init = residual(myfit.params, x)
-
-if HASPYLAB:
-    pylab.plot(x, init, 'b--')
-
-myfit.leastsq()
-
-print(' Nfev = ', myfit.nfev)
-print( myfit.chisqr, myfit.redchi, myfit.nfree)
-
-report_fit(myfit.params, modelpars=p_true)
-
-fit = residual(myfit.params, x)
-
-if HASPYLAB:
-    pylab.plot(x, fit, 'k-')
-    pylab.show()
-
-
-
-
-
diff --git a/examples/fit_pvoigt2.py b/examples/fit_pvoigt2.py
deleted file mode 100644
index 23320aa..0000000
--- a/examples/fit_pvoigt2.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import sys
-
-from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
-
-from lmfit import Parameters, Parameter, Minimizer
-from lmfit.lineshapes import gaussian, lorentzian, pvoigt
-from lmfit.printfuncs import report_fit
-
-try:
-    import matplotlib
-    matplotlib.use('WXAGG')
-    import pylab
-    HASPYLAB = True
-except ImportError:
-    HASPYLAB = False
-
-
-def residual(pars, x, sigma=None, data=None):
-    yg = gaussian(x, pars['amp_g'].value,
-                  pars['cen_g'].value, pars['wid_g'].value)
-    yl = lorentzian(x, pars['amp_l'].value,
-                    pars['cen_l'].value, pars['wid_l'].value)
-
-    slope = pars['line_slope'].value
-    offset = pars['line_off'].value
-    model =  yg +  yl + offset + x * slope
-    if data is None:
-        return model
-    if sigma is  None:
-        return (model - data)
-    return (model - data)/sigma
-
-
-n = 601
-xmin = 0.
-xmax = 20.0
-x = linspace(xmin, xmax, n)
-
-data = (gaussian(x, 21, 8.1, 1.2) +
-        lorentzian(x, 10, 9.6, 2.4) +
-        random.normal(scale=0.23,  size=n) +
-        x*0.5)
-
-
-if HASPYLAB:
-    pylab.plot(x, data, 'r+')
-
-pfit = [Parameter(name='amp_g',  value=10),
-        Parameter(name='cen_g',  value=9),
-        Parameter(name='wid_g',  value=1),
-
-        Parameter(name='amp_tot',  value=20),
-        Parameter(name='amp_l',  expr='amp_tot - amp_g'),
-        Parameter(name='cen_l',  expr='1.5+cen_g'),
-        Parameter(name='wid_l',  expr='2*wid_g'),
-
-        Parameter(name='line_slope', value=0.0),
-        Parameter(name='line_off', value=0.0)]
-
-sigma = 0.021  # estimate of data error (for all data points)
-
-myfit = Minimizer(residual, pfit,
-                  fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
-                  scale_covar=True)
-
-myfit.prepare_fit()
-init = residual(myfit.params, x)
-
-if HASPYLAB:
-    pylab.plot(x, init, 'b--')
-
-myfit.leastsq()
-
-print(' Nfev = ', myfit.nfev)
-print( myfit.chisqr, myfit.redchi, myfit.nfree)
-
-report_fit(myfit.params)
-
-fit = residual(myfit.params, x)
-
-if HASPYLAB:
-    pylab.plot(x, fit, 'k-')
-    pylab.show()
-
-
-
-
diff --git a/examples/fit_pvoigt_NelderMead.py b/examples/fit_pvoigt_NelderMead.py
deleted file mode 100644
index 279de10..0000000
--- a/examples/fit_pvoigt_NelderMead.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import sys
-from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
-from lmfit import Parameters, Parameter, Minimizer, report_fit
-from lmfit.lineshapes import gaussian, lorentzian, pvoigt
-
-try:
-    import matplotlib
-    matplotlib.use('WXAGG')
-    import pylab
-    HASPYLAB = True
-except ImportError:
-    HASPYLAB = False
-
-def per_iteration(pars, i, resid, x, *args, **kws):
-    if i < 10 or i % 10 == 0:
-        print( '====== Iteration ', i)
-        for p in pars.values():
-            print( p.name , p.value)
-
-def residual(pars, x, sigma=None, data=None):
-    yg = gaussian(x, pars['amp_g'].value,
-                  pars['cen_g'].value, pars['wid_g'].value)
-    yl = lorentzian(x, pars['amp_l'].value,
-                    pars['cen_l'].value, pars['wid_l'].value)
-
-    frac = pars['frac'].value
-    slope = pars['line_slope'].value
-    offset = pars['line_off'].value
-    model = (1-frac) * yg + frac * yl + offset + x * slope
-    if data is None:
-        return model
-    if sigma is  None:
-        return (model - data)
-    return (model - data)/sigma
-
-
-n = 601
-xmin = 0.
-xmax = 20.0
-x = linspace(xmin, xmax, n)
-
-p_true = Parameters()
-p_true.add('amp_g', value=21.0)
-p_true.add('cen_g', value=8.1)
-p_true.add('wid_g', value=1.6)
-p_true.add('frac', value=0.37)
-p_true.add('line_off', value=-1.023)
-p_true.add('line_slope', value=0.62)
-
-data = (pvoigt(x, p_true['amp_g'].value, p_true['cen_g'].value,
-              p_true['wid_g'].value, p_true['frac'].value) +
-        random.normal(scale=0.23,  size=n) +
-        x*p_true['line_slope'].value + p_true['line_off'].value )
-
-if HASPYLAB:
-    pylab.plot(x, data, 'r+')
-
-pfit = [Parameter(name='amp_g', value=10),
-        Parameter(name='amp_g', value=10.0),
-        Parameter(name='cen_g', value=9),
-        Parameter(name='wid_g', value=1),
-        Parameter(name='frac', value=0.50),
-        Parameter(name='amp_l', expr='amp_g'),
-        Parameter(name='cen_l', expr='cen_g'),
-        Parameter(name='wid_l', expr='wid_g'),
-        Parameter(name='line_slope', value=0.0),
-        Parameter(name='line_off', value=0.0)]
-
-sigma = 0.021  # estimate of data error (for all data points)
-
-myfit = Minimizer(residual, pfit, # iter_cb=per_iteration,
-                  fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
-                  scale_covar=True)
-
-myfit.prepare_fit()
-init = residual(myfit.params, x)
-
-if HASPYLAB:
-    pylab.plot(x, init, 'b--')
-
-# fit with Nelder-Mead simplex method
-supported_methods = ('BFGS', 'COBYLA', 'SLSQP', 'Powell', 'Nelder-Mead')
-myfit.scalar_minimize(method='Nelder-Mead')
-
-
-print(' Nfev = ', myfit.nfev)
-# print( myfit.chisqr, myfit.redchi, myfit.nfree)
-# report_fit(myfit.params, modelpars=p_true)
-
-fit = residual(myfit.params, x)
-
-if HASPYLAB:
-    pylab.plot(x, fit, 'k-')
-    pylab.show()
-
-
-
-
-
diff --git a/examples/fit_pvoigt_NelderMead2.py b/examples/fit_pvoigt_NelderMead2.py
deleted file mode 100644
index 111afda..0000000
--- a/examples/fit_pvoigt_NelderMead2.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import sys
-from numpy import linspace, exp, random
-
-from lmfit import Parameters, minimize
-from lmfit.lineshapes import gaussian, lorentzian, pvoigt
-
-try:
-    import matplotlib
-    import pylab
-    HASPYLAB = True
-except ImportError:
-    HASPYLAB = False
-
-def per_iteration(pars, i, resid, x, *args, **kws):
-    if i < 10 or i % 10 == 0:
-        print( '====== Iteration ', i)
-        for p in pars.values():
-            print( p.name , p.value)
-
-def residual(pars, x, sigma=None, data=None):
-    yg = gaussian(x, pars['amp_g'].value,
-                  pars['cen_g'].value, pars['wid_g'].value)
-    yl = lorentzian(x, pars['amp_l'].value,
-                    pars['cen_l'].value, pars['wid_l'].value)
-
-    frac = pars['frac'].value
-    slope = pars['line_slope'].value
-    offset = pars['line_off'].value
-    model = (1-frac) * yg + frac * yl + offset + x * slope
-    if data is None:
-        return model
-    if sigma is  None:
-        return (model - data)
-    return (model - data)/sigma
-
-
-n = 601
-xmin = 0.
-xmax = 20.0
-x = linspace(xmin, xmax, n)
-
-p_true = Parameters()
-p_true.add('amp_g', value=21.0)
-p_true.add('cen_g', value=8.1)
-p_true.add('wid_g', value=1.6)
-p_true.add('frac', value=0.37)
-p_true.add('line_off', value=-1.023)
-p_true.add('line_slope', value=0.62)
-
-data = (pvoigt(x, p_true['amp_g'].value, p_true['cen_g'].value,
-              p_true['wid_g'].value, p_true['frac'].value) +
-        random.normal(scale=0.23,  size=n) +
-        x*p_true['line_slope'].value + p_true['line_off'].value )
-
-
-pfit = Parameters()
-pfit.add('amp_g', value=10)
-pfit.add('amp_g', value=10.0)
-pfit.add('cen_g', value=9)
-pfit.add('wid_g', value=1)
-pfit.add('frac', value=0.50)
-pfit.add('amp_l', expr='amp_g')
-pfit.add('cen_l', expr='cen_g')
-pfit.add('wid_l', expr='wid_g')
-pfit.add('line_slope', value=0.0)
-pfit.add('line_off', value=0.0)
-
-sigma = 0.021
-
-myfit = minimize(residual, pfit, method='nelder',
-                 args=(x,), kws={'sigma':sigma, 'data':data})
-
-print(' Nfev = ', myfit.nfev)
-
-fit = residual(myfit.params, x)
-
-if HASPYLAB:
-    pylab.plot(x, data, 'r+')
-    pylab.plot(x, fit, 'k-')
-    pylab.show()
-
-
-
-
-
diff --git a/examples/lmfit_and_emcee.py b/examples/lmfit_and_emcee.py
new file mode 100644
index 0000000..0d6d163
--- /dev/null
+++ b/examples/lmfit_and_emcee.py
@@ -0,0 +1,124 @@
+from __future__ import print_function
+import lmfit
+import numpy as np
+import emcee
+
+try:
+    import matplotlib.pyplot as plt
+    HASPYLAB = True
+except ImportError:
+    HASPYLAB = False
+
+def create_prior(params):
+    """
+    emccee uses a uniform prior for every variable.
+    Here we create a functions which checks the bounds
+    and returns np.inf if a value is outside of its
+    allowed range. WARNING: A uniform prior may not be
+    what you want!
+    """
+    none_to_inf = lambda x, sign=1: sign*np.inf if x is None else x
+    lower_bounds = np.array([none_to_inf(i.min, -1) for i in params.values() if i.vary])
+    upper_bounds = np.array([none_to_inf(i.max, 1) for i in params.values() if i.vary])
+    def bounds_prior(values):
+        values = np.asarray(values)
+        is_ok = np.all((lower_bounds < values) & (values < upper_bounds))
+        return 0 if is_ok else -np.inf
+    return bounds_prior
+
+def create_lnliklihood(mini, sigma=None):
+    """create a normal-likihood from the residuals"""
+    def lnprob(vals, sigma=sigma):
+        for v, p in zip(vals, [p for p in mini.params.values() if p.vary]):
+            p.value = v
+        residuals = mini.userfcn(mini.params)
+        if not sigma:
+            #sigma is either the error estimate or it will
+            #be part of the sampling.
+            sigma = vals[-1]
+        val = -0.5*np.sum(np.log(2*np.pi*sigma**2) + (residuals/sigma)**2)
+        return val
+    return lnprob
+
+def starting_guess(mini, estimate_sigma=True):
+    """
+    Use best a fit as a starting point for the samplers.
+    If no sigmas are given, it is assumed that
+    all points have the same uncertainty which will
+    be also part of the sampled parameters.
+    """
+    vals = [i.value for i in params.values() if i.vary]
+    if estimate_sigma:
+        vals.append(mini.residual.std())
+    return vals
+
+def create_all(mini, sigma=None):
+    """
+    creates the log-poposterior function from a minimizer.
+    sigma should is either None or an array with the
+    1-sigma uncertainties of each residual point. If None,
+    sigma will be assumed the same for all residuals and
+    is added to the sampled parameters.
+    """
+    sigma_given = not sigma is None
+    lnprior = create_prior(params)
+    lnprob = create_lnliklihood(mini, sigma=sigma)
+    guess = starting_guess(mini, not sigma_given)
+    if sigma_given:
+        func = lambda x: lnprior(x[:]) + lnprob(x)
+    else:
+        func = lambda x: lnprior(x[:-1]) + lnprob(x)
+    return func, guess
+
+#Setup example problem.
+params = lmfit.Parameters()
+params.add_many(('a', 5),
+               ('b', -5),
+               ('t1', 1, 1, 0),
+               ('t2', 15, 1, 0))
+
+x = np.linspace(0, 20, 350)
+a, b, t1, t2 = 2, 3, 2, 10  #Real values
+y_true = a * np.exp(-x/t1) + b * np.exp(-x/t2)
+sigma = 0.02
+y = y_true + np.random.randn(x.size)*sigma
+
+def residuals(paras):
+    a = paras['a'].value
+    b = paras['b'].value
+    t1 = paras['t1'].value
+    t2 = paras['t2'].value
+    return a * np.exp(-x/t1) + b * np.exp(-x/t2) - y
+
+#Fit the data with lmfit.
+mini = lmfit.Minimizer(residuals, params)
+mini.leastsq()
+lmfit.report_errors(params)
+
+#create lnfunc and starting distribution.
+lnfunc, guess = create_all(mini)
+nwalkers, ndim = 30, len(guess)
+p0 = emcee.utils.sample_ball(guess, 0.1*np.array(guess), nwalkers)
+sampler = emcee.EnsembleSampler(nwalkers, ndim, lnfunc)
+steps = 500
+sampler.run_mcmc(p0, steps)
+
+if HASPYLAB:
+    fig, axes = plt.subplots(5, 1, sharex=True, figsize=(8, 9))
+    for (i, name, rv) in zip(range(5), params.keys() + ['sigma'], [a, b, t1, t2, sigma]):
+        axes[i].plot(sampler.chain[:, :, i].T, color="k", alpha=0.05)
+        axes[i].yaxis.set_major_locator(plt.MaxNLocator(5))
+        axes[i].axhline(rv, color="#888888", lw=2)
+        axes[i].set_ylabel("$%s$"% name)
+    axes[-1].set_xlabel("Steps")
+
+    plt.figure()
+
+    try:
+        import triangle # use pip install triangle_plot
+        burnin = 100
+        samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
+        triangle.corner(samples, labels=params.keys()+['sigma'],
+                      truths=[a, b, t1, t2, sigma])
+    except ImportError:
+        print("Please install triangle_plot for a nice overview graphic")
\ No newline at end of file
diff --git a/examples/m1.py b/examples/m1.py
deleted file mode 100644
index 352ce22..0000000
--- a/examples/m1.py
+++ /dev/null
@@ -1,26 +0,0 @@
-
-import numpy as np
-from lmfit.old_models1d import  GaussianModel
-import matplotlib.pyplot as plt
-
-data = np.loadtxt('model1d_gauss.dat')
-x = data[:, 0]
-y = data[:, 1]
-
-model = GaussianModel()  # background='linear'
-
-# model.guess_starting_values(y, x, negative=False)
-# model.params['bkg_offset'].value=min(y)
-
-init_fit = model.model(x=x) + model.calc_background(x)
-model.fit(y, x=x)
-
-print model.fit_report()
-
-final_fit = model.model(x=x)
-
-plt.plot(x, y)
-plt.plot(x, init_fit)
-plt.plot(x, final_fit)
-plt.show()
-
diff --git a/examples/model1d_doc1.py b/examples/model1d_doc1.py
deleted file mode 100644
index 57e74b3..0000000
--- a/examples/model1d_doc1.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import numpy as np
-from lmfit.old_models1d import  GaussianModel
-import matplotlib.pyplot as plt
-
-data = np.loadtxt('model1d_gauss.dat')
-x = data[:, 0]
-y = data[:, 1]
-
-model = GaussianModel()
-model.guess_starting_values(y, x=x)
-# model.params['amplitude'].value=6.0
-
-init_fit = model.model(x=x)
-model.fit(y, x=x)
-
-print model.fit_report(min_correl=0.25)
-
-final_fit = model.model(x=x)
-
-plt.plot(x, final_fit, 'r-')
-plt.plot(x, init_fit, 'k--')
-plt.plot(x, y,         'bo')
-plt.show()
diff --git a/examples/model1d_doc2.py b/examples/model1d_doc2.py
deleted file mode 100644
index bf3b1f7..0000000
--- a/examples/model1d_doc2.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import numpy as np
-from lmfit.old_models1d import  GaussianModel, VoigtModel
-import matplotlib.pyplot as plt
-
-data = np.loadtxt('model1d_gauss.dat')
-x = data[:, 0]
-y = data[:, 1]
-
-model = VoigtModel(background='linear')
-
-# get default starting values, but then alter them
-model.guess_starting_values(y, x=x)
-model.params['amplitude'].value = 2.0
-
-init_fit = model.model(x=x)
-
-# the actual fit
-model.fit(y, x=x)
-
-print model.fit_report(min_correl=0.25)
-
-vfit = model.model(x=x)
-
-
-mod2 = GaussianModel(background='linear')
-
-mod2.fit(y, x=x)
-gfit = mod2.model(x=x)
-
-print mod2.fit_report(min_correl=0.25)
-
-print 'Voigt    Sum of Squares: ', ((vfit - y)**2).sum()
-print 'Gaussian Sum of Squares: ', ((gfit - y)**2).sum()
-
-plt.plot(x, vfit, 'r-')
-plt.plot(x, gfit, 'b-')
-plt.plot(x, y,    'bo')
-plt.show()
diff --git a/examples/models.py b/examples/models.py
deleted file mode 100644
index ee24ce2..0000000
--- a/examples/models.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Created on Sun Apr 22 04:24:43 2012
-
- at author: Tillsten
-"""
-
-from lmfit import Parameters, Minimizer
-import numpy as np
-import matplotlib.pyplot as plt
-
-class LmModel(object):
-    """ 
-    Base class for all models. 
-    
-    Models take x and y and return 
-    """
-    def __init__(self,x,y):
-        self.x, self.y=x,y
-        self.parameters=Parameters()
-        self.min=Minimizer(self.residual, self.parameters)
-    
-    def print_para(self):
-        for i in self.parameters.values:
-            print i
-            
-    def func(self,paras):
-        raise NotImplementedError
-    
-    def est_startvals(self):
-        raise NotImplementedError
-    
-    def residual(self,paras):
-        return self.func(paras)-self.y
-        
-    def fit(self):
-        self.min.leastsq()
-        self.y_model=self.func(self.parameters)
-        
-    
-
-class Linear(LmModel):
-    """
-    y = a*x + b
-    """    
-    def __init__(self,x,y):        
-        LmModel.__init__(self,x,y)  
-        self.parameters.add_many(('a',0), ('b',0))
-        self.est_startvals()
-        
-    def est_startvals(self):        
-        a, b = np.polyfit(self.x,self.y,1)
-        self.parameters['a'].value = a
-        self.parameters['b'].value = b 
-               
-    def func(self,paras):
-        a=paras['a'].value 
-        b=paras['b'].value 
-        return a*self.x+b
-        
-
-
-class ExpDecay(LmModel):
-    """
-    y = a*exp(-x / b) + c
-    """    
-    def __init__(self,x,y):        
-        LmModel.__init__(self,x,y)         
-        self.parameters.add_many(('a',0), ('b',0),('c',0))
-        self.est_startvals()
-        
-    def est_startvals(self):        
-        c = np.min(self.y)
-        a, b = np.polyfit(self.x, np.log(self.y-c+0.5),1)
-        self.parameters['a'].value = np.exp(b)
-        self.parameters['b'].value = 1/b
-               
-    def func(self,paras):
-        a=paras['a'].value 
-        b=paras['b'].value 
-        c=paras['c'].value
-        return a*np.exp(-x / b) + c
-        
-        
-class Gaussian(LmModel):
-    """
-    y = a*exp(-(x-xc)**2/(2*w))+c
-    """
-    def __init__(self,x,y):        
-        LmModel.__init__(self,x,y)         
-        self.parameters.add_many(('a',0), ('xc',0),('w',0),('c',0))
-        self.est_startvals()
-        
-    def est_startvals(self):        
-        c = np.min(self.y)
-        xc = x[np.argmax(abs(y))]
-        a = np.max(y)
-        w = abs(x[np.argmin(abs(a/2.-y))]-x[np.argmax(y)])*2
-        self.parameters['c'].value=c
-        self.parameters['xc'].value=xc
-        self.parameters['a'].value=a
-        self.parameters['w'].value=w
-        
-    def func(self,paras):
-        c=paras['c'].value 
-        xc=paras['xc'].value 
-        a=paras['a'].value
-        w=paras['w'].value
-        return a*np.exp(-(self.x-xc)**2/(2*w))+c
-    
-        
-#x=np.linspace(-5,5,20)
-#y=3*x+1+np.random.randn(x.size)
-#lm=Linear(x,y)
-#lm.fit()
-
-x=np.linspace(-5,5,20)
-y= 5*np.exp(-x / 3.) + 3+ 4*np.random.randn(x.size)
-lm=ExpDecay(x,y)
-lm.fit()
-
-plt.plot(lm.x, lm.y)
-plt.plot(lm.x, lm.y_model)
-plt.show()
-
-
-        
-        
\ No newline at end of file
diff --git a/examples/models_doc2.py b/examples/models_doc2.py
deleted file mode 100644
index a5ae679..0000000
--- a/examples/models_doc2.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-#<examples/models_doc2.py>
-from numpy import loadtxt
-from lmfit import fit_report
-from lmfit.models import GaussianModel, LinearModel
-import matplotlib.pyplot as plt
-
-data = loadtxt('model1d_gauss.dat')
-x = data[:, 0]
-y = data[:, 1] + x  * 0.1 - 3.0
-
-gauss = GaussianModel()
-gauss.guess_starting_values(y, x=x)
-
-line = LinearModel()
-line.params['slope'].value  = 0
-line.params['intercept'].value  = -1.0
-
-total = gauss + line
-
-result = total.fit(y, x=x)
-
-print fit_report(result.params, min_correl=0.25)
-
-plt.plot(x, y,         'bo')
-plt.plot(x, result.init_fit, 'k--')
-plt.plot(x, result.best_fit, 'r-')
-plt.show()
-
-#<end examples/models_doc2.py>
diff --git a/examples/peakfit_1.py b/examples/peakfit_1.py
deleted file mode 100644
index 7c72306..0000000
--- a/examples/peakfit_1.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
-from scipy.optimize import leastsq
-try:
-    import pylab
-    HASPYLAB = True
-except ImportError:
-    HASPYLAB = False
-
-
-from lmfit import Parameters, Minimizer, report_fit
-from lmfit.lineshapes import gaussian
-
-def residual(pars, x, data=None):
-    g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
-    g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
-    model = g1 + g2
-    if data is None:
-        return model
-    return (model - data)
-
-n    = 601
-xmin = 0.
-xmax = 15.0
-noise = random.normal(scale=.65, size=n)
-x = linspace(xmin, xmax, n)
-
-fit_params = Parameters()
-fit_params.add_many(('a1', 12.0, True, None, None, None),
-                    ('c1',  5.3, True, None, None, None),
-                    ('w1',  1.0, True, None, None, None),
-                    ('a2',  9.1, True, None, None, None),
-                    ('c2',  8.1, True, None, None, None),
-                    ('w2',  2.5, True, None, None, None))
-
-data  = residual(fit_params, x) + noise
-
-if HASPYLAB:
-    pylab.plot(x, data, 'r+')
-
-fit_params = Parameters()
-fit_params.add_many(('a1',  8.0, True, None, 14., None),
-                    ('c1',  5.0, True, None, None, None),
-                    ('w1',  0.7, True, None, None, None),
-                    ('a2',  3.1, True, None, None, None),
-                    ('c2',  8.8, True, None, None, None))
-
-fit_params.add('w2', expr='2.5*w1')
-
-myfit = Minimizer(residual, fit_params,
-                  fcn_args=(x,), fcn_kws={'data':data})
-
-myfit.prepare_fit()
-
-init = residual(fit_params, x)
-
-if HASPYLAB:
-    pylab.plot(x, init, 'b--')
-
-myfit.leastsq()
-
-print ' N fev = ', myfit.nfev
-print myfit.chisqr, myfit.redchi, myfit.nfree
-
-report_fit(fit_params)
-
-fit = residual(fit_params, x)
-
-if HASPYLAB:
-    pylab.plot(x, fit, 'k-')
-    pylab.show()
-
-
-
-
-
diff --git a/examples/plot_fit.py b/examples/plot_fit.py
new file mode 100644
index 0000000..4e39577
--- /dev/null
+++ b/examples/plot_fit.py
@@ -0,0 +1,26 @@
+import lmfit
+import numpy as np
+from matplotlib import pyplot as plt
+
+# construct data
+x = np.linspace(-4, 4)
+y = np.exp(-x**2)
+
+noise = np.random.randn(x.size) * 0.1
+y += noise
+
+# define model and fit
+model = lmfit.models.GaussianModel()
+model.guess(y, x=x)
+fit = model.fit(y, x=x, weights=1/noise**2)
+
+fig = fit.plot()
+
+# customize plot post-factum
+ax_residuals, ax_fit = fig.get_axes()
+ax_residuals.set_title('The residuals')
+ax_fit.set_title('An example gaussian fit')
+ax_residuals.get_legend().set_visible(False)
+ax_fit.get_legend().texts[0].set_size('small')
+
+plt.show()
diff --git a/examples/plot_fit2.py b/examples/plot_fit2.py
new file mode 100644
index 0000000..d322155
--- /dev/null
+++ b/examples/plot_fit2.py
@@ -0,0 +1,25 @@
+import lmfit
+import numpy as np
+from matplotlib import pyplot as plt
+
+# construct data
+x = np.linspace(-4, 4)
+y = np.exp(-x**2)
+
+noise = np.random.randn(x.size) * 0.1
+y += noise
+
+# define model and fit
+model_gaussian = lmfit.models.GaussianModel()
+model_gaussian.guess(y, x=x)
+fit_gaussian = model_gaussian.fit(y, x=x, weights=1/noise**2)
+
+# plot the with with customization
+fig = fit_gaussian.plot(fig_kws=dict(figsize=[8,7]),
+                        ax_fit_kws=dict(title='The gaussian fit'),
+                        initfmt='k:', datafmt='ks',
+                        fit_kws=dict(lw=2, color='red'),
+                        data_kws=dict(ms=8, markerfacecolor='white'))
+
+fig.set_tight_layout(True)
+plt.show()
diff --git a/examples/use_models1d.py b/examples/use_models1d.py
deleted file mode 100644
index d5d9f4b..0000000
--- a/examples/use_models1d.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import numpy as np
-from lmfit.old_models1d import LinearModel, QuadraticModel, ExponentialModel
-from lmfit.old_models1d import  LorenztianModel, GaussianModel, VoigtModel
-import matplotlib.pyplot as plt
-
-
-x  = np.linspace(0, 10, 101)
-# dat = 118.0 + 10.0*np.exp(-x/7.0) + 5e-2*np.random.randn(len(x))
-# dat = 18.0 + 1.5*x  + 5.6*np.random.randn(len(x))
-
-sig = 0.47
-amp = 12.00
-cen = 5.66
-eps = 0.15
-off = 9
-slo = 0.0012
-sca = 1./(2.0*np.sqrt(2*np.pi))/sig
-
-noise =  eps*np.random.randn(len(x))
-
-dat = off +slo*x + amp*sca* np.exp(-(x-cen)**2 / (2*sig)**2) + noise
-
-# mod = ExponentialModel(background='linear')
-# mod = LinearModel()
-
-mod = GaussianModel(background='quad')
-mod = VoigtModel(background='quad')
-mod = LorenztianModel(background='quad')
-mod.guess_starting_values(dat, x, negative=False)
-mod.params['bkg_offset'].value=min(dat)
-
-init = mod.model(x=x)+mod.calc_background(x)
-mod.fit(dat, x=x)
-
-
-print mod.fit_report()
-
-fit = mod.model(x=x)+mod.calc_background(x)
-
-plt.plot(x, dat)
-plt.plot(x, init)
-plt.plot(x, fit)
-plt.show()
-
diff --git a/lmfit/__init__.py b/lmfit/__init__.py
index fe436e8..b325000 100644
--- a/lmfit/__init__.py
+++ b/lmfit/__init__.py
@@ -40,7 +40,7 @@ from .confidence import conf_interval, conf_interval2d
 from .printfuncs import (fit_report, ci_report,
                          report_fit, report_ci, report_errors)
 
-from .model import Model
+from .model import Model, CompositeModel
 from . import models
 
 from . import uncertainties
diff --git a/lmfit/_differentialevolution.py b/lmfit/_differentialevolution.py
new file mode 100644
index 0000000..41312e1
--- /dev/null
+++ b/lmfit/_differentialevolution.py
@@ -0,0 +1,696 @@
+"""
+differential_evolution: The differential evolution global optimization algorithm
+Added by Andrew Nelson 2014
+"""
+from __future__ import division, print_function, absolute_import
+import numpy as np
+from scipy.optimize import OptimizeResult, minimize
+from scipy.optimize.optimize import _status_message
+import numbers
+
+__all__ = ['differential_evolution']
+
+_MACHEPS = np.finfo(np.float64).eps
+
+
+def differential_evolution(func, bounds, args=(), strategy='best1bin',
+                           maxiter=None, popsize=15, tol=0.01,
+                           mutation=(0.5, 1), recombination=0.7, seed=None,
+                           callback=None, disp=False, polish=True,
+                           init='latinhypercube'):
+    """Finds the global minimum of a multivariate function.
+    Differential Evolution is stochastic in nature (does not use gradient
+    methods) to find the minimium, and can search large areas of candidate
+    space, but often requires larger numbers of function evaluations than
+    conventional gradient based techniques.
+
+    The algorithm is due to Storn and Price [1]_.
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized.  Must be in the form
+        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
+        and ``args`` is a  tuple of any additional fixed parameters needed to
+        completely specify the function.
+    bounds : sequence
+        Bounds for variables.  ``(min, max)`` pairs for each element in ``x``,
+        defining the lower and upper bounds for the optimizing argument of
+        `func`. It is required to have ``len(bounds) == len(x)``.
+        ``len(bounds)`` is used to determine the number of parameters in ``x``.
+    args : tuple, optional
+        Any additional fixed parameters needed to
+        completely specify the objective function.
+    strategy : str, optional
+        The differential evolution strategy to use. Should be one of:
+
+            - 'best1bin'
+            - 'best1exp'
+            - 'rand1exp'
+            - 'randtobest1exp'
+            - 'best2exp'
+            - 'rand2exp'
+            - 'randtobest1bin'
+            - 'best2bin'
+            - 'rand2bin'
+            - 'rand1bin'
+
+        The default is 'best1bin'.
+    maxiter : int, optional
+        The maximum number of times the entire population is evolved.
+        The maximum number of function evaluations is:
+        ``maxiter * popsize * len(x)``
+    popsize : int, optional
+        A multiplier for setting the total population size.  The population has
+        ``popsize * len(x)`` individuals.
+    tol : float, optional
+        When the mean of the population energies, multiplied by tol,
+        divided by the standard deviation of the population energies
+        is greater than 1 the solving process terminates:
+        ``convergence = mean(pop) * tol / stdev(pop) > 1``
+    mutation : float or tuple(float, float), optional
+        The mutation constant.
+        If specified as a float it should be in the range [0, 2].
+        If specified as a tuple ``(min, max)`` dithering is employed. Dithering
+        randomly changes the mutation constant on a generation by generation
+        basis. The mutation constant for that generation is taken from
+        ``U[min, max)``. Dithering can help speed convergence significantly.
+        Increasing the mutation constant increases the search radius, but will
+        slow down convergence.
+    recombination : float, optional
+        The recombination constant, should be in the range [0, 1]. Increasing
+        this value allows a larger number of mutants to progress into the next
+        generation, but at the risk of population stability.
+    seed : int or `np.random.RandomState`, optional
+        If `seed` is not specified the `np.RandomState` singleton is used.
+        If `seed` is an int, a new `np.random.RandomState` instance is used,
+        seeded with seed.
+        If `seed` is already a `np.random.RandomState instance`, then that
+        `np.random.RandomState` instance is used.
+        Specify `seed` for repeatable minimizations.
+    disp : bool, optional
+        Display status messages
+    callback : callable, `callback(xk, convergence=val)`, optional:
+        A function to follow the progress of the minimization. ``xk`` is
+        the current value of ``x0``. ``val`` represents the fractional
+        value of the population convergence.  When ``val`` is greater than one
+        the function halts. If callback returns `True`, then the minimization
+        is halted (any polishing is still carried out).
+    polish : bool, optional
+        If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
+        method is used to polish the best population member at the end, which
+        can improve the minimization slightly.
+    init : string, optional
+        Specify how the population initialization is performed. Should be
+        one of:
+
+            - 'latinhypercube'
+            - 'random'
+
+        The default is 'latinhypercube'. Latin Hypercube sampling tries to
+        maximize coverage of the available parameter space. 'random' initializes
+        the population randomly - this has the drawback that clustering can
+        occur, preventing the whole of parameter space being covered.
+
+    Returns
+    -------
+    res : OptimizeResult
+        The optimization result represented as a `OptimizeResult` object.
+        Important attributes are: ``x`` the solution array, ``success`` a
+        Boolean flag indicating if the optimizer exited successfully and
+        ``message`` which describes the cause of the termination. See
+        `OptimizeResult` for a description of other attributes. If `polish`
+        was employed, then OptimizeResult also contains the `jac` attribute.
+
+    Notes
+    -----
+    Differential evolution is a stochastic population based method that is
+    useful for global optimization problems. At each pass through the population
+    the algorithm mutates each candidate solution by mixing with other candidate
+    solutions to create a trial candidate. There are several strategies [2]_ for
+    creating trial candidates, which suit some problems more than others. The
+    'best1bin' strategy is a good starting point for many systems. In this
+    strategy two members of the population are randomly chosen. Their difference
+    is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`,
+    so far:
+
+    .. math::
+
+        b' = b_0 + mutation * (population[rand0] - population[rand1])
+
+    A trial vector is then constructed. Starting with a randomly chosen 'i'th
+    parameter the trial is sequentially filled (in modulo) with parameters from
+    `b'` or the original candidate. The choice of whether to use `b'` or the
+    original candidate is made with a binomial distribution (the 'bin' in
+    'best1bin') - a random number in [0, 1) is generated.  If this number is
+    less than the `recombination` constant then the parameter is loaded from
+    `b'`, otherwise it is loaded from the original candidate.  The final
+    parameter is always loaded from `b'`.  Once the trial candidate is built
+    its fitness is assessed. If the trial is better than the original candidate
+    then it takes its place. If it is also better than the best overall
+    candidate it also replaces that.
+    To improve your chances of finding a global minimum use higher `popsize`
+    values, with higher `mutation` and (dithering), but lower `recombination`
+    values. This has the effect of widening the search radius, but slowing
+    convergence.
+
+    .. versionadded:: 0.15.0
+
+    Examples
+    --------
+    Let us consider the problem of minimizing the Rosenbrock function. This
+    function is implemented in `rosen` in `scipy.optimize`.
+
+    >>> from scipy.optimize import rosen, differential_evolution
+    >>> bounds = [(0,2), (0, 2), (0, 2), (0, 2), (0, 2)]
+    >>> result = differential_evolution(rosen, bounds)
+    >>> result.x, result.fun
+    (array([1., 1., 1., 1., 1.]), 1.9216496320061384e-19)
+
+    Next find the minimum of the Ackley function
+    (http://en.wikipedia.org/wiki/Test_functions_for_optimization).
+
+    >>> from scipy.optimize import differential_evolution
+    >>> import numpy as np
+    >>> def ackley(x):
+    ...     arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
+    ...     arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
+    ...     return -20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e
+    >>> bounds = [(-5, 5), (-5, 5)]
+    >>> result = differential_evolution(ackley, bounds)
+    >>> result.x, result.fun
+    (array([ 0.,  0.]), 4.4408920985006262e-16)
+
+    References
+    ----------
+    .. [1] Storn, R and Price, K, Differential Evolution - a Simple and
+           Efficient Heuristic for Global Optimization over Continuous Spaces,
+           Journal of Global Optimization, 1997, 11, 341 - 359.
+    .. [2] http://www1.icsi.berkeley.edu/~storn/code.html
+    .. [3] http://en.wikipedia.org/wiki/Differential_evolution
+    """
+
+    solver = DifferentialEvolutionSolver(func, bounds, args=args,
+                                         strategy=strategy, maxiter=maxiter,
+                                         popsize=popsize, tol=tol,
+                                         mutation=mutation,
+                                         recombination=recombination,
+                                         seed=seed, polish=polish,
+                                         callback=callback,
+                                         disp=disp,
+                                         init=init)
+    return solver.solve()
+
+
+class DifferentialEvolutionSolver(object):
+
+    """This class implements the differential evolution solver
+
+    Parameters
+    ----------
+    func : callable
+        The objective function to be minimized.  Must be in the form
+        ``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
+        and ``args`` is a  tuple of any additional fixed parameters needed to
+        completely specify the function.
+    bounds : sequence
+        Bounds for variables.  ``(min, max)`` pairs for each element in ``x``,
+        defining the lower and upper bounds for the optimizing argument of
+        `func`. It is required to have ``len(bounds) == len(x)``.
+        ``len(bounds)`` is used to determine the number of parameters in ``x``.
+    args : tuple, optional
+        Any additional fixed parameters needed to
+        completely specify the objective function.
+    strategy : str, optional
+        The differential evolution strategy to use. Should be one of:
+
+            - 'best1bin'
+            - 'best1exp'
+            - 'rand1exp'
+            - 'randtobest1exp'
+            - 'best2exp'
+            - 'rand2exp'
+            - 'randtobest1bin'
+            - 'best2bin'
+            - 'rand2bin'
+            - 'rand1bin'
+
+        The default is 'best1bin'
+
+    maxiter : int, optional
+        The maximum number of times the entire population is evolved. The
+        maximum number of function evaluations is:
+        ``maxiter * popsize * len(x)``
+    popsize : int, optional
+        A multiplier for setting the total population size.  The population has
+        ``popsize * len(x)`` individuals.
+    tol : float, optional
+        When the mean of the population energies, multiplied by tol,
+        divided by the standard deviation of the population energies
+        is greater than 1 the solving process terminates:
+        ``convergence = mean(pop) * tol / stdev(pop) > 1``
+    mutation : float or tuple(float, float), optional
+        The mutation constant.
+        If specified as a float it should be in the range [0, 2].
+        If specified as a tuple ``(min, max)`` dithering is employed. Dithering
+        randomly changes the mutation constant on a generation by generation
+        basis. The mutation constant for that generation is taken from
+        U[min, max). Dithering can help speed convergence significantly.
+        Increasing the mutation constant increases the search radius, but will
+        slow down convergence.
+    recombination : float, optional
+        The recombination constant, should be in the range [0, 1]. Increasing
+        this value allows a larger number of mutants to progress into the next
+        generation, but at the risk of population stability.
+    seed : int or `np.random.RandomState`, optional
+        If `seed` is not specified the `np.random.RandomState` singleton is
+        used.
+        If `seed` is an int, a new `np.random.RandomState` instance is used,
+        seeded with `seed`.
+        If `seed` is already a `np.random.RandomState` instance, then that
+        `np.random.RandomState` instance is used.
+        Specify `seed` for repeatable minimizations.
+    disp : bool, optional
+        Display status messages
+    callback : callable, `callback(xk, convergence=val)`, optional
+        A function to follow the progress of the minimization. ``xk`` is
+        the current value of ``x0``. ``val`` represents the fractional
+        value of the population convergence.  When ``val`` is greater than one
+        the function halts. If callback returns `True`, then the minimization
+        is halted (any polishing is still carried out).
+    polish : bool, optional
+        If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method
+        is used to polish the best population member at the end. This requires
+        a few more function evaluations.
+    maxfun : int, optional
+        Set the maximum number of function evaluations. However, it probably
+        makes more sense to set `maxiter` instead.
+    init : string, optional
+        Specify which type of population initialization is performed. Should be
+        one of:
+
+            - 'latinhypercube'
+            - 'random'
+    """
+
+    # Dispatch of mutation strategy method (binomial or exponential).
+    _binomial = {'best1bin': '_best1',
+                 'randtobest1bin': '_randtobest1',
+                 'best2bin': '_best2',
+                 'rand2bin': '_rand2',
+                 'rand1bin': '_rand1'}
+    _exponential = {'best1exp': '_best1',
+                    'rand1exp': '_rand1',
+                    'randtobest1exp': '_randtobest1',
+                    'best2exp': '_best2',
+                    'rand2exp': '_rand2'}
+
+    def __init__(self, func, bounds, args=(),
+                 strategy='best1bin', maxiter=None, popsize=15,
+                 tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
+                 maxfun=None, callback=None, disp=False, polish=True,
+                 init='latinhypercube'):
+
+        if strategy in self._binomial:
+            self.mutation_func = getattr(self, self._binomial[strategy])
+        elif strategy in self._exponential:
+            self.mutation_func = getattr(self, self._exponential[strategy])
+        else:
+            raise ValueError("Please select a valid mutation strategy")
+        self.strategy = strategy
+
+        self.callback = callback
+        self.polish = polish
+        self.tol = tol
+
+        #Mutation constant should be in [0, 2). If specified as a sequence
+        #then dithering is performed.
+        self.scale = mutation
+        if (not np.all(np.isfinite(mutation)) or
+                np.any(np.array(mutation) >= 2) or
+                np.any(np.array(mutation) < 0)):
+            raise ValueError('The mutation constant must be a float in '
+                             'U[0, 2), or specified as a tuple(min, max)'
+                             ' where min < max and min, max are in U[0, 2).')
+
+        self.dither = None
+        if hasattr(mutation, '__iter__') and len(mutation) > 1:
+            self.dither = [mutation[0], mutation[1]]
+            self.dither.sort()
+
+        self.cross_over_probability = recombination
+
+        self.func = func
+        self.args = args
+
+        # convert tuple of lower and upper bounds to limits
+        # [(low_0, high_0), ..., (low_n, high_n]
+        #     -> [[low_0, ..., low_n], [high_0, ..., high_n]]
+        self.limits = np.array(bounds, dtype='float').T
+        if (np.size(self.limits, 0) != 2
+                or not np.all(np.isfinite(self.limits))):
+            raise ValueError('bounds should be a sequence containing '
+                             'real valued (min, max) pairs for each value'
+                             ' in x')
+
+        self.maxiter = maxiter or 1000
+        self.maxfun = (maxfun or ((self.maxiter + 1) * popsize *
+                                  np.size(self.limits, 1)))
+
+        # population is scaled to between [0, 1].
+        # We have to scale between parameter <-> population
+        # save these arguments for _scale_parameter and
+        # _unscale_parameter. This is an optimization
+        self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
+        self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
+
+        parameter_count = np.size(self.limits, 1)
+        self.random_number_generator = _make_random_gen(seed)
+
+        #default initialization is a latin hypercube design, but there
+        #are other population initializations possible.
+        self.population = np.zeros((popsize * parameter_count,
+                                    parameter_count))
+        if init == 'latinhypercube':
+            self.init_population_lhs()
+        elif init == 'random':
+            self.init_population_random()
+        else:
+            raise ValueError("The population initialization method must be one"
+                             "of 'latinhypercube' or 'random'")
+
+        self.population_energies = np.ones(
+            popsize * parameter_count) * np.inf
+
+        self.disp = disp
+
+    def init_population_lhs(self):
+        """
+        Initializes the population with Latin Hypercube Sampling
+        Latin Hypercube Sampling ensures that the sampling of parameter space
+        is maximised.
+        """
+        samples = np.size(self.population, 0)
+        N = np.size(self.population, 1)
+        rng = self.random_number_generator
+
+        # Generate the intervals
+        segsize = 1.0 / samples
+
+        # Fill points uniformly in each interval
+        rdrange = rng.rand(samples, N) * segsize
+        rdrange += np.atleast_2d(np.arange(0., 1., segsize)).T
+
+        # Make the random pairings
+        self.population = np.zeros_like(rdrange)
+
+        for j in range(N):
+            order = rng.permutation(range(samples))
+            self.population[:, j] = rdrange[order, j]
+
+    def init_population_random(self):
+        """
+        Initialises the population at random.  This type of initialization
+        can possess clustering, Latin Hypercube sampling is generally better.
+        """
+        rng = self.random_number_generator
+        self.population = rng.random_sample(self.population.shape)
+
+    @property
+    def x(self):
+        """
+        The best solution from the solver
+
+        Returns
+        -------
+        x - ndarray
+            The best solution from the solver.
+        """
+        return self._scale_parameters(self.population[0])
+
+    def solve(self):
+        """
+        Runs the DifferentialEvolutionSolver.
+
+        Returns
+        -------
+        res : OptimizeResult
+            The optimization result represented as a ``OptimizeResult`` object.
+            Important attributes are: ``x`` the solution array, ``success`` a
+            Boolean flag indicating if the optimizer exited successfully and
+            ``message`` which describes the cause of the termination. See
+            `OptimizeResult` for a description of other attributes. If polish
+            was employed, then OptimizeResult also contains the ``hess_inv`` and
+            ``jac`` attributes.
+        """
+
+        nfev, nit, warning_flag = 0, 0, False
+        status_message = _status_message['success']
+
+        # calculate energies to start with
+        for index, candidate in enumerate(self.population):
+            parameters = self._scale_parameters(candidate)
+            self.population_energies[index] = self.func(parameters,
+                                                        *self.args)
+            nfev += 1
+
+            if nfev > self.maxfun:
+                warning_flag = True
+                status_message = _status_message['maxfev']
+                break
+
+        minval = np.argmin(self.population_energies)
+
+        # put the lowest energy into the best solution position.
+        lowest_energy = self.population_energies[minval]
+        self.population_energies[minval] = self.population_energies[0]
+        self.population_energies[0] = lowest_energy
+
+        self.population[[0, minval], :] = self.population[[minval, 0], :]
+
+        if warning_flag:
+            return OptimizeResult(
+                           x=self.x,
+                           fun=self.population_energies[0],
+                           nfev=nfev,
+                           nit=nit,
+                           message=status_message,
+                           success=(warning_flag != True))
+
+        # do the optimisation.
+        for nit in range(1, self.maxiter + 1):
+            if self.dither is not None:
+                self.scale = self.random_number_generator.rand(
+                ) * (self.dither[1] - self.dither[0]) + self.dither[0]
+            for candidate in range(np.size(self.population, 0)):
+                if nfev > self.maxfun:
+                    warning_flag = True
+                    status_message = _status_message['maxfev']
+                    break
+
+                trial = self._mutate(candidate)
+                self._ensure_constraint(trial)
+                parameters = self._scale_parameters(trial)
+
+                energy = self.func(parameters, *self.args)
+                nfev += 1
+
+                if energy < self.population_energies[candidate]:
+                    self.population[candidate] = trial
+                    self.population_energies[candidate] = energy
+
+                    if energy < self.population_energies[0]:
+                        self.population_energies[0] = energy
+                        self.population[0] = trial
+
+            # stop when the fractional s.d. of the population is less than tol
+            # of the mean energy
+            convergence = (np.std(self.population_energies) /
+                           np.abs(np.mean(self.population_energies) +
+                                  _MACHEPS))
+
+            if self.disp:
+                print("differential_evolution step %d: f(x)= %g"
+                      % (nit,
+                         self.population_energies[0]))
+
+            if (self.callback and
+                    self.callback(self._scale_parameters(self.population[0]),
+                                  convergence=self.tol / convergence) is True):
+
+                warning_flag = True
+                status_message = ('callback function requested stop early '
+                                  'by returning True')
+                break
+
+            if convergence < self.tol or warning_flag:
+                break
+
+        else:
+            status_message = _status_message['maxiter']
+            warning_flag = True
+
+        DE_result = OptimizeResult(
+            x=self.x,
+            fun=self.population_energies[0],
+            nfev=nfev,
+            nit=nit,
+            message=status_message,
+            success=(warning_flag != True))
+
+        if self.polish:
+            result = minimize(self.func,
+                              np.copy(DE_result.x),
+                              method='L-BFGS-B',
+                              bounds=self.limits.T,
+                              args=self.args)
+
+            nfev += result.nfev
+            DE_result.nfev = nfev
+
+            if result.fun < DE_result.fun:
+                DE_result.fun = result.fun
+                DE_result.x = result.x
+                DE_result.jac = result.jac
+                # to keep internal state consistent
+                self.population_energies[0] = result.fun
+                self.population[0] = self._unscale_parameters(result.x)
+
+        return DE_result
+
+    def _scale_parameters(self, trial):
+        """
+        scale from a number between 0 and 1 to parameters
+        """
+        return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
+
+    def _unscale_parameters(self, parameters):
+        """
+        scale from parameters to a number between 0 and 1.
+        """
+        return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
+
+    def _ensure_constraint(self, trial):
+        """
+        make sure the parameters lie between the limits
+        """
+        for index, param in enumerate(trial):
+            if param > 1 or param < 0:
+                trial[index] = self.random_number_generator.rand()
+
+    def _mutate(self, candidate):
+        """
+        create a trial vector based on a mutation strategy
+        """
+        trial = np.copy(self.population[candidate])
+        parameter_count = np.size(trial, 0)
+
+        fill_point = self.random_number_generator.randint(0, parameter_count)
+
+        if (self.strategy == 'randtobest1exp'
+                or self.strategy == 'randtobest1bin'):
+            bprime = self.mutation_func(candidate,
+                                        self._select_samples(candidate, 5))
+        else:
+            bprime = self.mutation_func(self._select_samples(candidate, 5))
+
+        if self.strategy in self._binomial:
+            crossovers = self.random_number_generator.rand(parameter_count)
+            crossovers = crossovers < self.cross_over_probability
+            # the last one is always from the bprime vector for binomial
+            # If you fill in modulo with a loop you have to set the last one to
+            # true. If you don't use a loop then you can have any random entry
+            # be True.
+            crossovers[fill_point] = True
+            trial = np.where(crossovers, bprime, trial)
+            return trial
+
+        elif self.strategy in self._exponential:
+            i = 0
+            while (i < parameter_count and
+                   self.random_number_generator.rand() <
+                   self.cross_over_probability):
+
+                trial[fill_point] = bprime[fill_point]
+                fill_point = (fill_point + 1) % parameter_count
+                i += 1
+
+            return trial
+
+    def _best1(self, samples):
+        """
+        best1bin, best1exp
+        """
+        r0, r1 = samples[:2]
+        return (self.population[0] + self.scale *
+                (self.population[r0] - self.population[r1]))
+
+    def _rand1(self, samples):
+        """
+        rand1bin, rand1exp
+        """
+        r0, r1, r2 = samples[:3]
+        return (self.population[r0] + self.scale *
+                (self.population[r1] - self.population[r2]))
+
+    def _randtobest1(self, candidate, samples):
+        """
+        randtobest1bin, randtobest1exp
+        """
+        r0, r1 = samples[:2]
+        bprime = np.copy(self.population[candidate])
+        bprime += self.scale * (self.population[0] - bprime)
+        bprime += self.scale * (self.population[r0] -
+                                self.population[r1])
+        return bprime
+
+    def _best2(self, samples):
+        """
+        best2bin, best2exp
+        """
+        r0, r1, r2, r3 = samples[:4]
+        bprime = (self.population[0] + self.scale *
+                            (self.population[r0] + self.population[r1]
+                           - self.population[r2] - self.population[r3]))
+
+        return bprime
+
+    def _rand2(self, samples):
+        """
+        rand2bin, rand2exp
+        """
+        r0, r1, r2, r3, r4 = samples
+        bprime = (self.population[r0] + self.scale *
+                 (self.population[r1] + self.population[r2] -
+                  self.population[r3] - self.population[r4]))
+
+        return bprime
+
+    def _select_samples(self, candidate, number_samples):
+        """
+        obtain random integers from range(np.size(self.population, 0)),
+        without replacement.  You can't have the original candidate either.
+        """
+        idxs = list(range(np.size(self.population, 0)))
+        idxs.remove(candidate)
+        self.random_number_generator.shuffle(idxs)
+        idxs = idxs[:number_samples]
+        return idxs
+
+
+def _make_random_gen(seed):
+    """Turn seed into a np.random.RandomState instance
+
+    If seed is None, return the RandomState singleton used by np.random.
+    If seed is an int, return a new RandomState instance seeded with seed.
+    If seed is already a RandomState instance, return it.
+    Otherwise raise ValueError.
+    """
+    if seed is None or seed is np.random:
+        return np.random.mtrand._rand
+    if isinstance(seed, (numbers.Integral, np.integer)):
+        return np.random.RandomState(seed)
+    if isinstance(seed, np.random.RandomState):
+        return seed
+    raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
+                     ' instance' % seed)
diff --git a/lmfit/_version.py b/lmfit/_version.py
index 2edcc4f..8e2d281 100644
--- a/lmfit/_version.py
+++ b/lmfit/_version.py
@@ -9,8 +9,8 @@
 # versioneer-0.12 (https://github.com/warner/python-versioneer)
 
 # these strings will be replaced by git during git-archive
-git_refnames = " (HEAD, tag: 0.8.1, master)"
-git_full = "0e2ad768ce26589b95f61e92ba4bac60cbba2bc3"
+git_refnames = " (tag: 0.8.2)"
+git_full = "a4ffac9e157de33706836e3e55517701050fbdd5"
 
 # these strings are filled in when 'setup.py versioneer' creates _version.py
 tag_prefix = ""
diff --git a/lmfit/confidence.py b/lmfit/confidence.py
index 9631232..8d5d57e 100644
--- a/lmfit/confidence.py
+++ b/lmfit/confidence.py
@@ -125,10 +125,11 @@ def conf_interval(minimizer, p_names=None, sigmas=(0.674, 0.95, 0.997),
 def map_trace_to_names(trace, params):
     "maps trace to param names"
     out = {}
+    allnames = list(params.keys()) + ['prob']
     for name in trace.keys():
         tmp_dict = {}
         tmp = np.array(trace[name])
-        for para_name, values in zip(params.keys() + ['prob'], tmp.T):
+        for para_name, values in zip(allnames, tmp.T):
             tmp_dict[para_name] = values
         out[name] = tmp_dict
     return out
diff --git a/lmfit/minimizer.py b/lmfit/minimizer.py
index 9a8f241..df9219d 100644
--- a/lmfit/minimizer.py
+++ b/lmfit/minimizer.py
@@ -1,5 +1,5 @@
 """
-Simple minimizer  is a wrapper around scipy.leastsq, allowing a
+Simple minimizer is a wrapper around scipy.leastsq, allowing a
 user to build a fitting model as a function of general purpose
 Fit Parameters that can be fixed or floated, bounded, and written
 as a simple expression of other Fit Parameters.
@@ -14,7 +14,7 @@ function-to-be-minimized (residual function) in terms of these Parameters.
 from copy import deepcopy
 import numpy as np
 from numpy import (dot, eye, ndarray, ones_like,
-                   sqrt, take, transpose, triu)
+                   sqrt, take, transpose, triu, deprecate)
 from numpy.dual import inv
 from numpy.linalg import LinAlgError
 
@@ -22,6 +22,12 @@ from scipy.optimize import leastsq as scipy_leastsq
 from scipy.optimize import fmin as scipy_fmin
 from scipy.optimize.lbfgsb import fmin_l_bfgs_b as scipy_lbfgsb
 
+# differential_evolution is only present in scipy >= 0.15
+try:
+    from scipy.optimize import differential_evolution as scipy_diffev
+except ImportError:
+    from ._differentialevolution import differential_evolution as scipy_diffev
+
 # check for scipy.optimize.minimize
 HAS_SCALAR_MIN = False
 try:
@@ -38,7 +44,7 @@ from .parameter import Parameter, Parameters
 from . import uncertainties
 
 
-def asteval_with_uncertainties(*vals,  **kwargs):
+def asteval_with_uncertainties(*vals, **kwargs):
     """
     given values for variables, calculate object value.
     This is used by the uncertainties package to calculate
@@ -61,7 +67,6 @@ def asteval_with_uncertainties(*vals,  **kwargs):
 
 wrap_ueval = uncertainties.wrap(asteval_with_uncertainties)
 
-
 def eval_stderr(obj, uvars, _names, _pars, _asteval):
     """evaluate uncertainty and set .stderr for a parameter `obj`
     given the uncertain values `uvars` (a list of uncertainties.ufloats),
@@ -99,6 +104,21 @@ def check_ast_errors(error):
             return msg
     return None
 
+def _differential_evolution(func, x0, **kwds):
+    """
+    A wrapper for differential_evolution that can be used with scipy.minimize
+    """
+    kwargs = dict(args=(), strategy='best1bin', maxiter=None, popsize=15,
+                  tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
+                  callback=None, disp=False, polish=True,
+                  init='latinhypercube')
+
+    for k, v in kwds.items():
+        if k in kwargs:
+            kwargs[k] = v
+
+    return scipy_diffev(func, kwds['bounds'], **kwargs)
+
 SCALAR_METHODS = {'nelder': 'Nelder-Mead',
                   'powell': 'Powell',
                   'cg': 'CG',
@@ -110,18 +130,64 @@ SCALAR_METHODS = {'nelder': 'Nelder-Mead',
                   'cobyla': 'COBYLA',
                   'slsqp': 'SLSQP',
                   'dogleg': 'dogleg',
-                  'trust-ncg': 'trust-ncg'}
+                  'trust-ncg': 'trust-ncg',
+                  'differential_evolution': 'differential_evolution'}
+
 
 class Minimizer(object):
-    """general minimizer"""
-    err_nonparam = \
-     "params must be a minimizer.Parameters() instance or list of Parameters()"
-    err_maxfev = """Too many function calls (max set to  %i)!  Use:
-    minimize(func, params, ...., maxfev=NNN)
-or set  leastsq_kws['maxfev']  to increase this maximum."""
+    """A general minimizer for curve fitting"""
+    err_nonparam = ("params must be a minimizer.Parameters() instance or list "
+                    "of Parameters()")
+    err_maxfev = ("Too many function calls (max set to %i)!  Use:"
+                  " minimize(func, params, ..., maxfev=NNN)"
+                  "or set leastsq_kws['maxfev']  to increase this maximum.")
 
     def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None,
                  iter_cb=None, scale_covar=True, **kws):
+        """
+        Initialization of the Minimzer class
+
+        Parameters
+        ----------
+        userfcn : callable
+            objective function that returns the residual (difference between
+            model and data) to be minimized in a least squares sense.  The
+            function must have the signature:
+            `userfcn(params, *fcn_args, **fcn_kws)`
+        params : lmfit.parameter.Parameters object.
+            contains the Parameters for the model.
+        fcn_args : tuple, optional
+            positional arguments to pass to userfcn.
+        fcn_kws : dict, optional
+            keyword arguments to pass to userfcn.
+        iter_cb : callable, optional
+            Function to be called at each fit iteration. This function should
+            have the signature:
+            `iter_cb(params, iter, resid, *fcn_args, **fcn_kws)`,
+            where where `params` will have the current parameter values, `iter`
+            the iteration, `resid` the current residual array, and `*fcn_args`
+            and `**fcn_kws` as passed to the objective function.
+        scale_covar : bool, optional
+            Whether to automatically scale the covariance matrix (leastsq
+            only).
+        kws : dict, optional
+            Options to pass to the minimizer being used.
+
+        Notes
+        -----
+        The objective function should return the value to be minimized. For the
+        Levenberg-Marquardt algorithm from leastsq(), this returned value must
+        be an array, with a length greater than or equal to the number of
+        fitting variables in the model. For the other methods, the return value
+        can either be a scalar or an array. If an array is returned, the sum of
+        squares of the array will be sent to the underlying fitting method,
+        effectively doing a least-squares optimization of the return values.
+
+        A common use for the fcn_args and fcn_kwds would be to pass in other
+        data needed to calculate the residual, including such things as the
+        data array, dependent variable, uncertainties in the data, and other
+        data structures for the model calculation.
+        """
         self.userfcn = userfcn
         self.userargs = fcn_args
         if self.userargs is None:
@@ -138,13 +204,13 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         self.ndata = 0
         self.nvarys = 0
         self.ier = 0
-        self.success = None
+        self.success = True
+        self.errorbars = False
         self.message = None
         self.lmdif_message = None
         self.chisqr = None
         self.redchi = None
         self.covar = None
-        self.errorbars = None
         self.residual = None
         self.var_map = []
         self.vars = []
@@ -159,7 +225,13 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
 
     @property
     def values(self):
-        "Convenience function that returns Parameter values as a simple dict."
+        """
+        Returns
+        -------
+        param_values : dict
+            Parameter values in a simple dictionary.
+        """
+
         return dict([(name, p.value) for name, p in self.params.items()])
 
     def __update_paramval(self, name):
@@ -189,19 +261,21 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         self.updated[name] = True
 
     def update_constraints(self):
-        """update all constrained parameters, checking that
-        dependencies are evaluated as needed."""
+        """
+        Update all constrained parameters, checking that dependencies are
+        evaluated as needed.
+        """
         self.updated = dict([(name, False) for name in self.params])
         for name in self.params:
             self.__update_paramval(name)
 
     def __residual(self, fvars):
         """
-        residual function used for least-squares fit.
-        With the new, candidate values of fvars (the fitting variables),
-        this evaluates all parameters, including setting bounds and
-        evaluating constraints, and then passes those to the
-        user-supplied function to calculate the residual.
+        Residual function used for least-squares fit.
+        With the new, candidate values of fvars (the fitting variables), this
+        evaluates all parameters, including setting bounds and evaluating
+        constraints, and then passes those to the user-supplied function to
+        calculate the residual.
         """
         # set parameter values
         for varname, val in zip(self.var_map, fvars):
@@ -212,7 +286,7 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
 
         self.update_constraints()
         out = self.userfcn(self.params, *self.userargs, **self.userkws)
-        if hasattr(self.iter_cb, '__call__'):
+        if callable(self.iter_cb):
             self.iter_cb(self.params, self.nfev, out,
                          *self.userargs, **self.userkws)
         return out
@@ -249,9 +323,19 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
             raise MinimizerException(self.err_nonparam)
 
     def penalty(self, params):
-        """penalty function for scalar minimizers:
-        evaluates user-supplied objective function,
-        if result is an array, return array sum-of-squares.
+        """
+        Penalty function for scalar minimizers:
+
+        Parameters
+        ----------
+        params : lmfit.parameter.Parameters object
+            The model parameters
+
+        Returns
+        -------
+        r - float
+            The user evaluated user-supplied objective function. If the
+            objective function is an array, return the array sum-of-squares
         """
         r = self.__residual(params)
         if isinstance(r, ndarray):
@@ -259,7 +343,9 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         return r
 
     def prepare_fit(self, params=None):
-        """prepare parameters for fit"""
+        """
+        Prepares parameters for fitting
+        """
         # determine which parameters are actually variables
         # and which are defined expressions.
         if params is None and self.params is not None and self.__prepared:
@@ -299,8 +385,9 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         self.__prepared = True
 
     def unprepare_fit(self):
-        """unprepare fit, so that subsequent fits will be
-        forced to run re-prepare the fit
+        """
+        Unprepares the fit, so that subsequent fits will be
+        forced to run prepare_fit.
 
         removes ast compilations of constraint expressions
         """
@@ -310,10 +397,20 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
             if hasattr(par, 'ast'):
                 delattr(par, 'ast')
 
+    @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
+                       'and method=\'L-BFGS-B\' instead')
     def lbfgsb(self, **kws):
         """
-        use l-bfgs-b minimization
+        Use l-bfgs-b minimization
+
+        Parameters
+        ----------
+        kws : dict
+            Minimizer options to pass to the
+            scipy.optimize.lbfgsb.fmin_l_bfgs_b function.
+
         """
+
         self.prepare_fit()
         lb_kws = dict(factr=1000.0, approx_grad=True, m=20,
                       maxfun=2000 * (self.nvarys + 1),
@@ -335,10 +432,18 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         self.unprepare_fit()
         return
 
+    @deprecate(message='    Deprecated in lmfit 0.8.2, use scalar_minimize '
+                       'and method=\'Nelder-Mead\' instead')
     def fmin(self, **kws):
         """
-        use nelder-mead (simplex) minimization
+        Use Nelder-Mead (simplex) minimization
+
+        Parameters
+        ----------
+        kws : dict
+            Minimizer options to pass to the scipy.optimize.fmin minimizer.
         """
+
         self.prepare_fit()
         fmin_kws = dict(full_output=True, disp=False, retall=True,
                         ftol=1.e-4, xtol=1.e-4,
@@ -361,19 +466,29 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         return
 
     def scalar_minimize(self, method='Nelder-Mead', **kws):
-        """use one of the scaler minimization methods from scipy.
-        Available methods include:
-          Nelder-Mead
-          Powell
-          CG  (conjugate gradient)
-          BFGS
-          Newton-CG
-          L-BFGS-B
-          TNC
-          COBYLA
-          SLSQP
-          dogleg
-          trust-ncg
+        """
+        Use one of the scalar minimization methods from
+        scipy.optimize.minimize.
+
+        Parameters
+        ----------
+        method : str, optional
+            Name of the fitting method to use.
+            One of:
+                'Nelder-Mead' (default)
+                'L-BFGS-B'
+                'Powell'
+                'CG'
+                'Newton-CG'
+                'COBYLA'
+                'TNC'
+                'trust-ncg'
+                'dogleg'
+                'SLSQP'
+                'differential_evolution'
+
+        kws : dict, optional
+            Minimizer options pass to scipy.optimize.minimize.
 
         If the objective function returns a numpy array instead
         of the expected scalar, the sum of squares of the array
@@ -381,16 +496,22 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
 
         Note that bounds and constraints can be set on Parameters
         for any of these methods, so are not supported separately
-        for those designed to use bounds.
+        for those designed to use bounds. However, if you use the
+        differential_evolution option you must specify finite
+        (min, max) for each Parameter.
+
+        Returns
+        -------
+        success : bool
+            Whether the fit was successful.
 
         """
         if not HAS_SCALAR_MIN:
             raise NotImplementedError
-
         self.prepare_fit()
 
         fmin_kws = dict(method=method,
-                        options={'maxiter': 1000*(self.nvarys + 1)})
+                        options={'maxiter': 1000 * (self.nvarys + 1)})
         fmin_kws.update(self.kws)
         fmin_kws.update(kws)
 
@@ -409,9 +530,20 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
             self.jacfcn = None
             fmin_kws.pop('jac')
 
+        if method == 'differential_evolution':
+            fmin_kws['method'] = _differential_evolution
+            pars = self.params
+            bounds = [(pars[par].min, pars[par].max) for par in pars]
+            if not np.all(np.isfinite(bounds)):
+                raise ValueError('With differential evolution finite bounds '
+                                 'are required for each parameter')
+            bounds = [(-np.pi / 2., np.pi / 2.)] * len(self.vars)
+            fmin_kws['bounds'] = bounds
+
         ret = scipy_minimize(self.penalty, self.vars, **fmin_kws)
         xout = ret.x
         self.message = ret.message
+
         self.nfev = ret.nfev
         self.chisqr = self.residual = self.__residual(xout)
         self.ndata = 1
@@ -420,24 +552,32 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
             self.chisqr = (self.chisqr**2).sum()
             self.ndata = len(self.residual)
             self.nfree = self.ndata - self.nvarys
-        self.redchi = self.chisqr/self.nfree
+        self.redchi = self.chisqr / self.nfree
         self.unprepare_fit()
-        return
+        return ret.success
 
     def leastsq(self, **kws):
         """
-        use Levenberg-Marquardt minimization to perform fit.
-        This assumes that ModelParameters have been stored,
-        and a function to minimize has been properly set up.
+        Use Levenberg-Marquardt minimization to perform a fit.
+        This assumes that ModelParameters have been stored, and a function to
+        minimize has been properly set up.
 
-        This wraps scipy.optimize.leastsq, and keyword arguments are passed
-        directly as options to scipy.optimize.leastsq
+        This wraps scipy.optimize.leastsq.
 
         When possible, this calculates the estimated uncertainties and
         variable correlations from the covariance matrix.
 
-        writes outputs to many internal attributes, and
-        returns True if fit was successful, False if not.
+        Writes outputs to many internal attributes.
+
+        Parameters
+        ----------
+        kws : dict, optional
+            Minimizer options to pass to scipy.optimize.leastsq.
+
+        Returns
+        -------
+        success : bool
+            True if fit was successful, False if not.
         """
         self.prepare_fit()
         lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7,
@@ -559,7 +699,33 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
         return self.success
 
     def minimize(self, method='leastsq'):
-        """perform minimization using supplied method"""
+        """
+        Perform the minimization.
+
+        Parameters
+        ----------
+        method : str, optional
+            Name of the fitting method to use.
+            One of:
+            'leastsq'                -    Levenberg-Marquardt (default)
+            'nelder'                 -    Nelder-Mead
+            'lbfgsb'                 -    L-BFGS-B
+            'powell'                 -    Powell
+            'cg'                     -    Conjugate-Gradient
+            'newton'                 -    Newton-CG
+            'cobyla'                 -    Cobyla
+            'tnc'                    -    Truncate Newton
+            'trust-ncg'              -    Trust Newton-CGn
+            'dogleg'                 -    Dogleg
+            'slsqp'                  -    Sequential Linear Squares Programming
+            'differential_evolution' -    differential evolution
+
+        Returns
+        -------
+        success : bool
+            Whether the fit was successful.
+        """
+
         function = self.leastsq
         kwargs = {}
 
@@ -571,7 +737,7 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
             for key, val in SCALAR_METHODS.items():
                 if (key.lower().startswith(user_method) or
                     val.lower().startswith(user_method)):
-                    kwargs = dict(method=val)
+                    kwargs['method'] = val
         elif (user_method.startswith('nelder') or
               user_method.startswith('fmin')):
             function = self.fmin
@@ -583,9 +749,67 @@ or set  leastsq_kws['maxfev']  to increase this maximum."""
 
 def minimize(fcn, params, method='leastsq', args=None, kws=None,
              scale_covar=True, iter_cb=None, **fit_kws):
-    """simple minimization function,
-    finding the values for the params which give the
-    minimal sum-of-squares of the array return by fcn
+    """
+    A general purpose curvefitting function
+    The minimize function takes a objective function to be minimized, a
+    dictionary (lmfit.parameter.Parameters) containing the model parameters,
+    and several optional arguments.
+
+    Parameters
+    ----------
+    fcn : callable
+        objective function that returns the residual (difference between
+        model and data) to be minimized in a least squares sense.  The
+        function must have the signature:
+        `fcn(params, *args, **kws)`
+    params : lmfit.parameter.Parameters object.
+        contains the Parameters for the model.
+    method : str, optional
+        Name of the fitting method to use.
+        One of:
+            'leastsq'                -    Levenberg-Marquardt (default)
+            'nelder'                 -    Nelder-Mead
+            'lbfgsb'                 -    L-BFGS-B
+            'powell'                 -    Powell
+            'cg'                     -    Conjugate-Gradient
+            'newton'                 -    Newton-CG
+            'cobyla'                 -    Cobyla
+            'tnc'                    -    Truncate Newton
+            'trust-ncg'              -    Trust Newton-CGn
+            'dogleg'                 -    Dogleg
+            'slsqp'                  -    Sequential Linear Squares Programming
+            'differential_evolution' -    differential evolution
+
+    args : tuple, optional
+        Positional arguments to pass to fcn.
+    kws : dict, optional
+        keyword arguments to pass to fcn.
+    iter_cb : callable, optional
+        Function to be called at each fit iteration. This function should
+        have the signature `iter_cb(params, iter, resid, *args, **kws)`,
+        where where `params` will have the current parameter values, `iter`
+        the iteration, `resid` the current residual array, and `*args`
+        and `**kws` as passed to the objective function.
+    scale_covar : bool, optional
+        Whether to automatically scale the covariance matrix (leastsq
+        only).
+    fit_kws : dict, optional
+        Options to pass to the minimizer being used.
+
+    Notes
+    -----
+    The objective function should return the value to be minimized. For the
+    Levenberg-Marquardt algorithm from leastsq(), this returned value must
+    be an array, with a length greater than or equal to the number of
+    fitting variables in the model. For the other methods, the return value
+    can either be a scalar or an array. If an array is returned, the sum of
+    squares of the array will be sent to the underlying fitting method,
+    effectively doing a least-squares optimization of the return values.
+
+    A common use for `args` and `kwds` would be to pass in other
+    data needed to calculate the residual, including such things as the
+    data array, dependent variable, uncertainties in the data, and other
+    data structures for the model calculation.
     """
     fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
                        iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
diff --git a/lmfit/model.py b/lmfit/model.py
index a60fc70..8c4b41c 100644
--- a/lmfit/model.py
+++ b/lmfit/model.py
@@ -4,11 +4,18 @@ Concise nonlinear curve fitting.
 from __future__ import print_function
 import warnings
 import inspect
+import operator
 from copy import deepcopy
 import numpy as np
 from . import Parameters, Parameter, Minimizer
 from .printfuncs import fit_report
 
+try:
+    from collections import OrderedDict
+except ImportError:
+    from ordereddict import OrderedDict
+
+
 # Use pandas.isnull for aligning missing data is pandas is available.
 # otherwise use numpy.isnan
 try:
@@ -25,6 +32,24 @@ def _align(var, mask, data):
         return var[mask]
     return var
 
+
+try:
+    from matplotlib import pyplot as plt
+    _HAS_MATPLOTLIB = True
+except ImportError:
+    _HAS_MATPLOTLIB = False
+
+
+def _ensureMatplotlib(function):
+    if _HAS_MATPLOTLIB:
+        return function
+    else:
+        def no_op(*args, **kwargs):
+            print('matplotlib module is required for plotting the results')
+
+        return no_op
+
+
 class Model(object):
     """Create a model from a user-defined function.
 
@@ -64,7 +89,6 @@ class Model(object):
     _invalid_par   = "Invalid parameter name ('%s') for function %s"
     _invalid_missing = "missing must be None, 'none', 'drop', or 'raise'."
     _valid_missing   = (None, 'none', 'drop', 'raise')
-    _names_collide = "Two models have parameters named %s. Use distinct names"
 
     _invalid_hint = "unknown parameter hint '%s' for param '%s'"
     _hint_names = ('value', 'vary', 'min', 'max', 'expr')
@@ -75,7 +99,6 @@ class Model(object):
         self._prefix = prefix
         self._param_root_names = param_names  # will not include prefixes
         self.independent_vars = independent_vars
-        self.components = []
         self._func_allargs = []
         self._func_haskeywords = False
         if not missing in self._valid_missing:
@@ -92,29 +115,20 @@ class Model(object):
         self._name = name
 
     def _reprstring(self, long=False):
-        if not self.is_composite:
-            # base model
-            opts = []
-            if len(self._prefix) > 0:
-                opts.append("prefix='%s'" % (self._prefix))
-            if long:
-                for k, v in self.opts.items():
-                    opts.append("%s='%s'" % (k, v))
-
-            out = ["%s" % self._name]
-            if len(opts) > 0:
-                out[0] = "%s(%s)" % (out[0], ','.join(opts))
-        else:
-            # composite model
-            if self._name is None:
-                out = [c._reprstring(long)[0] for c in self.components]
-            else:
-                out = [self._name]
-        return out
+        out = self._name
+        opts = []
+        if len(self._prefix) > 0:
+            opts.append("prefix='%s'" % (self._prefix))
+        if long:
+            for k, v in self.opts.items():
+                opts.append("%s='%s'" % (k, v))
+        if len(opts) > 0:
+            out = "%s, %s" % (out, ', '.join(opts))
+        return "Model(%s)" % out
 
     @property
     def name(self):
-        return '+'.join(self._reprstring(long=False))
+        return self._reprstring(long=False)
 
     @name.setter
     def name(self, value):
@@ -131,21 +145,7 @@ class Model(object):
 
     @property
     def param_names(self):
-        if self.is_composite:
-            return self._compute_composite_param_names()
-        else:
-            return self._param_names
-
-    def _compute_composite_param_names(self):
-        param_names = set()
-        for sub_model in self.components:
-            param_names |= sub_model.param_names
-        param_names |= self._param_names
-        return param_names
-
-    @property
-    def is_composite(self):
-        return len(self.components) > 0
+        return self._param_names
 
     def __repr__(self):
         return  "<lmfit.Model: %s>" % (self.name)
@@ -263,45 +263,27 @@ class Model(object):
         if 'verbose' in kwargs:
             verbose = kwargs['verbose']
         params = Parameters()
-        if not self.is_composite:
-            # base model: build Parameters from scratch
-            for name in self.param_names:
-                par = Parameter(name=name)
-                basename = name[len(self._prefix):]
-                # apply defaults from model function definition
-                if basename in self.def_vals:
-                    par.value = self.def_vals[basename]
-                # apply defaults from parameter hints
-                if basename in self.param_hints:
-                    hint = self.param_hints[basename]
-                    for item in self._hint_names:
-                        if item in  hint:
-                            setattr(par, item, hint[item])
-                # apply values passed in through kw args
-                if basename in kwargs:
-                    # kw parameter names with no prefix
-                    par.value = kwargs[basename]
-                if name in kwargs:
-                    # kw parameter names with prefix
-                    par.value = kwargs[name]
-                params[name] = par
-        else:
-            # composite model: merge the sub_models parameters adding hints
-            for sub_model in self.components:
-                comp_params = sub_model.make_params(**kwargs)
-                for par_name, param in comp_params.items():
-                    # apply composite-model hints
-                    if par_name in self.param_hints:
-                        hint = self.param_hints[par_name]
-                        for item in self._hint_names:
-                            if item in  hint:
-                                setattr(param, item, hint[item])
-                params.update(comp_params)
-
-            # apply defaults passed in through kw args
-            for name in self.param_names:
-                if name in kwargs:
-                    params[name].value = kwargs[name]
+
+        for name in self.param_names:
+            par = Parameter(name=name)
+            basename = name[len(self._prefix):]
+            # apply defaults from model function definition
+            if basename in self.def_vals:
+                par.value = self.def_vals[basename]
+            # apply defaults from parameter hints
+            if basename in self.param_hints:
+                hint = self.param_hints[basename]
+                for item in self._hint_names:
+                    if item in  hint:
+                        setattr(par, item, hint[item])
+            # apply values passed in through kw args
+            if basename in kwargs:
+                # kw parameter names with no prefix
+                par.value = kwargs[basename]
+            if name in kwargs:
+                # kw parameter names with prefix
+                par.value = kwargs[name]
+            params[name] = par
 
         # add any additional parameters defined in param_hints
         # note that composites may define their own additional
@@ -379,25 +361,32 @@ class Model(object):
         args = {}
         for key, val in self.make_funcargs(params, kwargs).items():
             args["%s%s" % (self._prefix, key)] = val
-        for sub_model in self.components:
-            otherargs = sub_model._make_all_args(params, **kwargs)
-            args.update(otherargs)
         return args
 
     def eval(self, params=None, **kwargs):
         """evaluate the model with the supplied parameters"""
-        if len(self.components) > 0:
-            result = self.components[0].eval(params, **kwargs)
-            for model in self.components[1:]:
-                result += model.eval(params, **kwargs)
-        else:
-            result = self.func(**self.make_funcargs(params, kwargs))
-            # Handle special case of constant result and one
-            # independent variable (of any dimension).
-            if np.ndim(result) == 0 and len(self.independent_vars) == 1:
-                result = np.tile(result, kwargs[self.independent_vars[0]].shape)
+        result = self.func(**self.make_funcargs(params, kwargs))
+        # Handle special case of constant result and one
+        # independent variable (of any dimension).
+        if np.ndim(result) == 0 and len(self.independent_vars) == 1:
+            result = np.tile(result, kwargs[self.independent_vars[0]].shape)
         return result
 
+    @property
+    def components(self):
+        """return components for composite model"""
+        return [self]
+
+    def eval_components(self, params=None, **kwargs):
+        """
+        evaluate the model with the supplied parameters and returns a ordered
+        dict containting name, result pairs.
+        """
+        key = self._prefix
+        if len(key) < 1:
+            key = self._name
+        return {key: self.eval(params=params, **kwargs)}
+
     def fit(self, data, params=None, weights=None, method='leastsq',
             iter_cb=None, scale_covar=True, verbose=True, fit_kws=None, **kwargs):
         """Fit the model to the data.
@@ -504,29 +493,113 @@ class Model(object):
         output = ModelFit(self, params, method=method, iter_cb=iter_cb,
                           scale_covar=scale_covar, fcn_kws=kwargs, **fit_kws)
         output.fit(data=data, weights=weights)
+        output.components = self.components
         return output
 
     def __add__(self, other):
-        colliding_param_names = self.param_names & other.param_names
-        if len(colliding_param_names) != 0:
-            collision = colliding_param_names.pop()
-            raise NameError(self._names_collide % collision)
-
-        # If the model is already composite just add other as component
-        composite = self
-        if not self.is_composite:
-            # make new composite Model, add self and other as components
-            composite = Model(func=None)
-            composite.components = [self]
-            # we assume that all the sub-models have the same independent vars
-            composite.independent_vars = self.independent_vars[:]
-
-        if other.is_composite:
-            composite.components.extend(other.components)
-            composite.param_hints.update(other.param_hints)
-        else:
-            composite.components.append(other)
-        return composite
+        return CompositeModel(self, other, operator.add)
+
+    def __sub__(self, other):
+        return CompositeModel(self, other, operator.sub)
+
+    def __mul__(self, other):
+        return CompositeModel(self, other, operator.mul)
+
+    def __div__(self, other):
+        return CompositeModel(self, other, operator.truediv)
+
+    def __truediv__(self, other):
+        return CompositeModel(self, other, operator.truediv)
+
+
+class CompositeModel(Model):
+    """Create a composite model -- a binary operator of two Models
+
+    Parameters
+    ----------
+    left_model:    left-hand side model-- must be a Model()
+    right_model:   right-hand side model -- must be a Model()
+    oper:          callable binary operator (typically, operator.add, operator.mul, etc)
+
+    independent_vars: list of strings or None (default)
+        arguments to func that are independent variables
+    param_names: list of strings or None (default)
+        names of arguments to func that are to be made into parameters
+    missing: None, 'none', 'drop', or 'raise'
+        'none' or None: Do not check for null or missing values (default)
+        'drop': Drop null or missing observations in data.
+            if pandas is installed, pandas.isnull is used, otherwise
+            numpy.isnan is used.
+        'raise': Raise a (more helpful) exception when data contains null
+            or missing values.
+    name: None or string
+        name for the model. When `None` (default) the name is the same as
+        the model function (`func`).
+
+    """
+    _names_collide = "Two models have parameters named {clash}. Use distinct names"
+    _bad_arg   = "CompositeModel: argument {arg} is not a Model"
+    _bad_op    = "CompositeModel: operator {op} is not callable"
+    _known_ops = {operator.add: '+', operator.sub: '-',
+                  operator.mul: '*', operator.truediv: '/'}
+
+    def __init__(self, left, right, op, **kws):
+        if not isinstance(left, Model):
+            raise ValueError(self._bad_arg.format(arg=left))
+        if not isinstance(right, Model):
+            raise ValueError(self._bad_arg.format(arg=right))
+        if not callable(op):
+            raise ValueError(self._bad_op.format(op=op))
+
+        self.left  = left
+        self.right = right
+        self.op    = op
+
+        name_collisions = left.param_names & right.param_names
+        if len(name_collisions) > 0:
+            raise NameError(self._names_collide.format(clash=name_collisions))
+
+        if 'independent_vars' not in kws:
+            kws['independent_vars'] = self.left.independent_vars
+        if 'missing' not in kws:
+            kws['missing'] = self.left.missing
+
+        def _tmp(self, *args, **kws): pass
+        Model.__init__(self, _tmp, **kws)
+
+    def _parse_params(self):
+        self._func_haskeywords = (self.left._func_haskeywords or
+                                  self.right._func_haskeywords)
+        self._func_allargs = (self.left._func_allargs +
+                              self.right._func_allargs)
+        self.def_vals = deepcopy(self.right.def_vals)
+        self.def_vals.update(self.left.def_vals)
+        self.opts = deepcopy(self.right.opts)
+        self.opts.update(self.left.opts)
+
+    def _reprstring(self, long=False):
+        return "(%s %s %s)" % (self.left._reprstring(long=long),
+                               self._known_ops.get(self.op, self.op),
+                               self.right._reprstring(long=long))
+
+    def eval(self, params=None, **kwargs):
+        return self.op(self.left.eval(params=params, **kwargs),
+                       self.right.eval(params=params, **kwargs))
+
+    def eval_components(self, **kwargs):
+        """return ordered dict of name, results for each component"""
+        out = OrderedDict(self.left.eval_components(**kwargs))
+        out.update(self.right.eval_components(**kwargs))
+        return out
+
+    @property
+    def param_names(self):
+        return self.left.param_names | self.right.param_names
+
+    @property
+    def components(self):
+        """return components for composite model"""
+        return self.left.components + self.right.components
 
 
 class ModelFit(Minimizer):
@@ -558,9 +631,28 @@ class ModelFit(Minimizer):
          evaluate the current model, with the current parameter values,
          with values in kwargs sent to the model function.
 
+    eval_components(**kwargs)
+         evaluate the current model, with the current parameter values,
+         with values in kwargs sent to the model function and returns
+         a ordered dict with the model names as the key and the component
+         results as the values.
+
    fit_report(modelpars=None, show_correl=True, min_correl=0.1)
          return a fit report.
 
+   plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
+            numpoints=None,  data_kws=None, fit_kws=None, init_kws=None,
+            ax_kws=None)
+        Plot the fit results using matplotlib.
+
+   plot_residuals(self, ax=None, datafmt='o', data_kws=None, fit_kws=None,
+                  ax_kws=None)
+        Plot the fit residuals using matplotlib.
+
+   plot(self, datafmt='o', fitfmt='-', initfmt='--', numpoints=None,
+        data_kws=None, fit_kws=None, init_kws=None, ax_res_kws=None,
+        ax_fit_kws=None, fig_kws=None)
+        Plot the fit results and residuals using matplotlib.
     """
     def __init__(self, model, params, data=None, weights=None,
                  method='leastsq', fcn_args=None, fcn_kws=None,
@@ -598,18 +690,285 @@ class ModelFit(Minimizer):
         self.userkws.update(kwargs)
         return self.model.eval(params=self.params, **self.userkws)
 
+    def eval_components(self, **kwargs):
+        self.userkws.update(kwargs)
+        return self.model.eval_components(params=self.params, **self.userkws)
+
     def fit_report(self, modelpars=None, show_correl=True, min_correl=0.1):
         "return fit report"
         stats_report = fit_report(self, modelpars=modelpars,
                                  show_correl=show_correl,
                                  min_correl=min_correl)
         buff = ['[[Model]]']
-        if len(self.model.components)==0:
-            buff.append('    %s' % self.model._reprstring(long=True)[0])
-        else:
-            buff.append(' Composite Model:')
-            for x in self.model._reprstring(long=True):
-                buff.append('    %s' % x)
+        buff.append('    %s' % self.model._reprstring(long=True))
         buff = '\n'.join(buff)
         out = '%s\n%s' % (buff, stats_report)
         return out
+
+    @_ensureMatplotlib
+    def plot_fit(self, ax=None, datafmt='o', fitfmt='-', initfmt='--',
+                 numpoints=None,  data_kws=None, fit_kws=None, init_kws=None,
+                 ax_kws=None):
+        """Plot the fit results using matplotlib.
+
+        The method will plot results of the fit using matplotlib, including:
+        the data points, the initial fit curve and the fitted curve. If the fit
+        model included weights, errorbars will also be plotted.
+
+        Parameters
+        ----------
+        ax : matplotlib.axes.Axes, optional
+            The axes to plot on. The default in None, which means use the
+            current pyplot axis or create one if there is none.
+        datafmt : string, optional
+            matplotlib format string for data points
+        fitfmt : string, optional
+            matplotlib format string for fitted curve
+        initfmt : string, optional
+            matplotlib format string for initial conditions for the fit
+        numpoints : int, optional
+            If provided, the final and initial fit curves are evaluated not
+            only at data points, but refined to contain `numpoints` points in
+            total.
+        data_kws : dictionary, optional
+            keyword arguments passed on to the plot function for data points
+        fit_kws : dictionary, optional
+            keyword arguments passed on to the plot function for fitted curve
+        init_kws : dictionary, optional
+            keyword arguments passed on to the plot function for the initial
+            conditions of the fit
+        ax_kws : dictionary, optional
+            keyword arguments for a new axis, if there is one being created
+
+        Returns
+        -------
+        matplotlib.axes.Axes
+
+        Notes
+        ----
+        For details about plot format strings and keyword arguments see
+        documentation of matplotlib.axes.Axes.plot.
+
+        If the fit model included weights, then matplotlib.axes.Axes.errorbar
+        is used to plot the data, with yerr set to 1 / self.weights**0.5
+
+        If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
+
+        See Also
+        --------
+        ModelFit.plot_residuals : Plot the fit residuals using matplotlib.
+        ModelFit.plot : Plot the fit results and residuals using matplotlib.
+        """
+        if data_kws is None:
+            data_kws = {}
+        if fit_kws is None:
+            fit_kws = {}
+        if init_kws is None:
+            init_kws = {}
+        if ax_kws is None:
+            ax_kws = {}
+
+        if len(self.model.independent_vars) == 1:
+            independent_var = self.model.independent_vars[0]
+        else:
+            print('Fit can only be plotted if the model function has one '
+                  'independent variable.')
+            return False
+
+        if not isinstance(ax, plt.Axes):
+            ax = plt.gca(**ax_kws)
+
+        x_array = self.userkws[independent_var]
+
+        # make a dense array for x-axis if data is not dense
+        if numpoints is not None and len(self.data) < numpoints:
+            x_array_dense = np.linspace(x_array[0], x_array[-1], numpoints)
+        else:
+            x_array_dense = x_array
+
+        ax.plot(x_array_dense, self.model.eval(self.init_params,
+                **{independent_var: x_array_dense}), initfmt,
+                label='initial for ' + self.model.name, **init_kws)
+        ax.plot(x_array_dense, self.model.eval(self.params,
+                **{independent_var: x_array_dense}), fitfmt,
+                label=self.model.name, **fit_kws)
+
+        if self.weights is not None:
+            ax.errorbar(x_array, self.data, yerr=1/self.weights**0.5,
+                        fmt=datafmt, label='data', **data_kws)
+        else:
+            ax.plot(x_array, self.data, datafmt, label='data', **data_kws)
+
+        ax.set_xlabel(independent_var)
+        ax.set_ylabel('y')
+        ax.legend()
+
+        return ax
+
+    @_ensureMatplotlib
+    def plot_residuals(self, ax=None, datafmt='o', data_kws=None, fit_kws=None,
+                       ax_kws=None):
+        """Plot the fit residuals using matplotlib.
+
+        The method will plot residuals of the fit using matplotlib, including:
+        the data points and the fitted curve (as horizontal line). If the fit
+        model included weights, errorbars will also be plotted.
+
+        Parameters
+        ----------
+        ax : matplotlib.axes.Axes, optional
+            The axes to plot on. The default in None, which means use the
+            current pyplot axis or create one if there is none.
+        datafmt : string, optional
+            matplotlib format string for data points
+        data_kws : dictionary, optional
+            keyword arguments passed on to the plot function for data points
+        fit_kws : dictionary, optional
+            keyword arguments passed on to the plot function for fitted curve
+        ax_kws : dictionary, optional
+            keyword arguments for a new axis, if there is one being created
+
+        Returns
+        -------
+        matplotlib.axes.Axes
+
+        Notes
+        ----
+        For details about plot format strings and keyword arguments see
+        documentation of matplotlib.axes.Axes.plot.
+
+        If the fit model included weights, then matplotlib.axes.Axes.errorbar
+        is used to plot the data, with yerr set to 1 / self.weights**0.5
+
+        If `ax` is None then matplotlib.pyplot.gca(**ax_kws) is called.
+
+        See Also
+        --------
+        ModelFit.plot_fit : Plot the fit results using matplotlib.
+        ModelFit.plot : Plot the fit results and residuals using matplotlib.
+        """
+        if data_kws is None:
+            data_kws = {}
+        if fit_kws is None:
+            fit_kws = {}
+        if fit_kws is None:
+            fit_kws = {}
+        if ax_kws is None:
+            ax_kws = {}
+
+        if len(self.model.independent_vars) == 1:
+            independent_var = self.model.independent_vars[0]
+        else:
+            print('Fit can only be plotted if the model function has one '
+                  'independent variable.')
+            return False
+
+        if not isinstance(ax, plt.Axes):
+            ax = plt.gca(**ax_kws)
+
+        x_array = self.userkws[independent_var]
+
+        ax.axhline(0, label=self.model.name, **fit_kws)
+
+        if self.weights is not None:
+            ax.errorbar(x_array, self.eval() - self.data,
+                        yerr=1 / self.weights**0.5, fmt=datafmt,
+                        label='residuals', **data_kws)
+        else:
+            ax.plot(x_array, self.eval() - self.data, datafmt,
+                    label='residuals', **data_kws)
+
+        ax.set_ylabel('residuals')
+        ax.legend()
+
+        return ax
+
+    @_ensureMatplotlib
+    def plot(self, datafmt='o', fitfmt='-', initfmt='--', numpoints=None,
+             fig=None, data_kws=None, fit_kws=None, init_kws=None,
+             ax_res_kws=None, ax_fit_kws=None, fig_kws=None):
+        """Plot the fit results and residuals using matplotlib.
+
+        The method will produce a matplotlib figure with both results of the
+        fit and the residuals plotted. If the fit model included weights,
+        errorbars will also be plotted.
+
+        Parameters
+        ----------
+        datafmt : string, optional
+            matplotlib format string for data points
+        fitfmt : string, optional
+            matplotlib format string for fitted curve
+        initfmt : string, optional
+            matplotlib format string for initial conditions for the fit
+        numpoints : int, optional
+            If provided, the final and initial fit curves are evaluated not
+            only at data points, but refined to contain `numpoints` points in
+            total.
+        fig : matplotlib.figure.Figure, optional
+            The figure to plot on. The default in None, which means use the
+            current pyplot figure or create one if there is none.
+        data_kws : dictionary, optional
+            keyword arguments passed on to the plot function for data points
+        fit_kws : dictionary, optional
+            keyword arguments passed on to the plot function for fitted curve
+        init_kws : dictionary, optional
+            keyword arguments passed on to the plot function for the initial
+            conditions of the fit
+        ax_res_kws : dictionary, optional
+            keyword arguments for the axes for the residuals plot
+        ax_fit_kws : dictionary, optional
+            keyword arguments for the axes for the fit plot
+        fig_kws : dictionary, optional
+            keyword arguments for a new figure, if there is one being created
+
+        Returns
+        -------
+        matplotlib.figure.Figure
+
+        Notes
+        ----
+        The method combines ModelFit.plot_fit and ModelFit.plot_residuals.
+
+        If the fit model included weights, then matplotlib.axes.Axes.errorbar
+        is used to plot the data, with yerr set to 1 / self.weights**0.5
+
+        If `fig` is None then matplotlib.pyplot.figure(**fig_kws) is called.
+
+        See Also
+        --------
+        ModelFit.plot_fit : Plot the fit results using matplotlib.
+        ModelFit.plot_residuals : Plot the fit residuals using matplotlib.
+        """
+        if data_kws is None:
+            data_kws = {}
+        if fit_kws is None:
+            fit_kws = {}
+        if init_kws is None:
+            init_kws = {}
+        if ax_res_kws is None:
+            ax_res_kws = {}
+        if ax_fit_kws is None:
+            ax_fit_kws = {}
+        if fig_kws is None:
+            fig_kws = {}
+
+        if len(self.model.independent_vars) != 1:
+            print('Fit can only be plotted if the model function has one '
+                  'independent variable.')
+            return False
+
+        if not isinstance(fig, plt.Figure):
+            fig = plt.figure(**fig_kws)
+
+        gs = plt.GridSpec(nrows=2, ncols=1, height_ratios=[1, 4])
+        ax_res = fig.add_subplot(gs[0], **ax_res_kws)
+        ax_fit = fig.add_subplot(gs[1], sharex=ax_res, **ax_fit_kws)
+
+        self.plot_fit(ax=ax_fit, datafmt=datafmt, fitfmt=fitfmt,
+                      initfmt=initfmt, numpoints=numpoints, data_kws=data_kws,
+                      fit_kws=fit_kws, init_kws={}, ax_kws=ax_fit_kws)
+        self.plot_residuals(ax=ax_res, datafmt=datafmt, data_kws=data_kws,
+                            fit_kws=fit_kws, ax_kws=ax_res_kws)
+
+        return fig
diff --git a/lmfit/parameter.py b/lmfit/parameter.py
index 299818a..3862c66 100644
--- a/lmfit/parameter.py
+++ b/lmfit/parameter.py
@@ -13,8 +13,10 @@ from . import uncertainties
 from .astutils import valid_symbol_name
 
 class Parameters(OrderedDict):
-    """a custom dictionary of Parameters.  All keys must be
-    strings, and valid Python symbol names, and all values
+    """
+    A dictionary of all the Parameters required to specify a fit model.
+
+    All keys must be strings, and valid Python symbol names, and all values
     must be Parameters.
 
     Custom methods:
@@ -44,22 +46,34 @@ class Parameters(OrderedDict):
         return self
 
     def add(self, name, value=None, vary=True, min=None, max=None, expr=None):
-        """convenience function for adding a Parameter:
-        with   p = Parameters()
-        p.add(name, value=XX, ....)
+        """
+        Convenience function for adding a Parameter:
 
-        is equivalent to
+        Example
+        -------
+        p = Parameters()
+        p.add(name, value=XX, ...)
+
+        is equivalent to:
         p[name] = Parameter(name=name, value=XX, ....
         """
         self.__setitem__(name, Parameter(value=value, name=name, vary=vary,
                                          min=min, max=max, expr=expr))
 
     def add_many(self, *parlist):
-        """convenience function for adding a list of Parameters:
-        Here, you must provide a sequence of tuples, each containing
-        at least the name. The order in each tuple is the following:
+        """
+        Convenience function for adding a list of Parameters.
+
+        Parameters
+        ----------
+        parlist : sequence
+        A sequence of tuples, each containing at least the name. The order in
+        each tuple is the following:
             name, value, vary, min, max, expr
-        with   p = Parameters()
+
+        Example
+        -------
+        p = Parameters()
         p.add_many( (name1, val1, True, None, None, None),
                     (name2, val2, True,  0.0, None, None),
                     (name3, val3, False, None, None, None),
@@ -70,21 +84,58 @@ class Parameters(OrderedDict):
             self.add(*para)
 
     def valuesdict(self):
-        """return on ordered dictionary of name:value pairs for each Parameter.
+        """
+        Returns
+        -------
+        An ordered dictionary of name:value pairs for each Parameter.
         This is distinct from the Parameters itself, as it has values of
-        the Parameeter values, not the full Parameter object """
+        the Parameter values, not the full Parameter object.
+        """
 
         return OrderedDict(((p.name, p.value) for p in self.values()))
 
 
 class Parameter(object):
-    """A Parameter is the basic Parameter going
-    into Fit Model.  The Parameter holds many attributes:
-    value, vary, max_value, min_value, constraint expression.
-    The value and min/max values will be be set to floats.
+    """
+    A Parameter is an object used to define a Fit Model.
+    Attributes
+    ----------
+    name : str
+        Parameter name.
+    value : float
+        The numerical value of the Parameter.
+    vary : bool
+        Whether the Parameter is fixed during a fit.
+    min : float
+        Lower bound for value (None = no lower bound).
+    max : float
+        Upper bound for value (None = no upper bound).
+    expr : str
+        An expression specifying constraints for the parameter.
+    stderr : float
+        The estimated standard error for the best-fit value.
+    correl : dict
+        Specifies correlation with the other fitted Parameter after a fit.
+        Of the form `{'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}`
     """
     def __init__(self, name=None, value=None, vary=True,
                  min=None, max=None, expr=None):
+        """
+        Parameters
+        ----------
+        name : str, optional
+            Name of the parameter.
+        value : float, optional
+            Numerical Parameter value.
+        vary : bool, optional
+            Whether the Parameter is fixed during a fit.
+        min : float, optional
+            Lower bound for value (None = no lower bound).
+        max : float, optional
+            Upper bound for value (None = no upper bound).
+        expr : str, optional
+            Mathematical expression used to constrain the value during the fit.
+        """
         self.name = name
         self._val = value
         self.user_value = value
@@ -100,7 +151,23 @@ class Parameter(object):
         self._init_bounds()
 
     def set(self, value=None, vary=None, min=None, max=None, expr=None):
-        "set value, vary, min, max, expr with keyword args"
+        """
+        Set or update Parameter attributes.
+
+        Parameters
+        ----------
+        value : float, optional
+            Numerical Parameter value.
+        vary : bool, optional
+            Whether the Parameter is fixed during a fit.
+        min : float, optional
+            Lower bound for value. To remove a lower bound you must use -np.inf
+        max : float, optional
+            Upper bound for value. To remove an upper bound you must use np.inf
+        expr : str, optional
+            Mathematical expression used to constrain the value during the fit.
+            To remove a constraint you must supply an empty string.
+        """
         if value is not None:
             self._val = value
         if vary is not None:
@@ -154,13 +221,10 @@ class Parameter(object):
         return "<Parameter %s>" % ', '.join(s)
 
     def setup_bounds(self):
-        """set up Minuit-style internal/external parameter transformation
+        """
+        Set up Minuit-style internal/external parameter transformation
         of min/max bounds.
 
-        returns internal value for parameter from self.value (which holds
-        the external, user-expected value).   This internal values should
-        actually be used in a fit....
-
         As a side-effect, this also defines the self.from_internal method
         used to re-calculate self.value from the internal value, applying
         the inverse Minuit-style transformation.  This method should be
@@ -168,7 +232,13 @@ class Parameter(object):
         function.
 
         This code borrows heavily from JJ Helmus' leastsqbound.py
-        """
+
+        Returns
+        -------
+        The internal value for parameter from self.value (which holds
+        the external, user-expected value).   This internal value should
+        actually be used in a fit.
+       """
         if self.min in (None, -inf) and self.max in (None, inf):
             self.from_internal = lambda val: val
             _val  = self._val
@@ -185,7 +255,10 @@ class Parameter(object):
         return _val
 
     def scale_gradient(self, val):
-        """returns scaling factor for gradient the according to Minuit-style
+        """
+        Returns
+        -------
+        scaling factor for gradient the according to Minuit-style
         transformation.
         """
         if self.min in (None, -inf) and self.max in (None, inf):
@@ -225,22 +298,27 @@ class Parameter(object):
 
     @property
     def value(self):
-        "get value"
+        "The numerical value of the Parameter, with bounds applied"
         return self._getval()
 
     @value.setter
     def value(self, val):
-        "set value"
+        "Set the numerical Parameter value."
         self._val = val
 
     @property
     def expr(self):
-        "get expression"
+        """
+        The mathematical expression used to constrain the value during the fit.
+        """
         return self._expr
 
     @expr.setter
     def expr(self, val):
-        "set expr"
+        """
+        The mathematical expression used to constrain the value during the fit.
+        To remove a constraint you must supply an empty string.
+        """
         if val == '':
             val = None
         self._expr = val
diff --git a/tests/test_basicfit.py b/tests/test_basicfit.py
index c579652..97f4f15 100644
--- a/tests/test_basicfit.py
+++ b/tests/test_basicfit.py
@@ -2,6 +2,7 @@ import numpy as np
 from lmfit import minimize, Parameters, Parameter, report_fit
 from lmfit_testutils import assert_paramval, assert_paramattr
 
+
 def test_basic():
     # create data to be fitted
     x = np.linspace(0, 15, 301)
@@ -41,5 +42,6 @@ def test_basic():
     assert_paramval(params['amp'],   5.03, tol=0.05)
     assert_paramval(params['omega'], 2.0, tol=0.05)
 
+
 if __name__ == '__main__':
     test_basic()
diff --git a/tests/test_nose.py b/tests/test_nose.py
index 9925d30..1b92b11 100644
--- a/tests/test_nose.py
+++ b/tests/test_nose.py
@@ -1,9 +1,10 @@
 # -*- coding: utf-8 -*-
 from __future__ import print_function
 from lmfit import minimize, Parameters, Parameter, report_fit, Minimizer
+from lmfit.minimizer import SCALAR_METHODS
 from lmfit.lineshapes import gaussian
 import numpy as np
-pi = np.pi
+from numpy import pi
 import unittest
 import nose
 from nose import SkipTest
@@ -44,7 +45,7 @@ def test_simple():
     params = Parameters()
     params.add('amp',   value= 10,  min=0)
     params.add('decay', value= 0.1)
-    params.add('shift', value= 0.0, min=-np.pi/2., max=np.pi/2)
+    params.add('shift', value= 0.0, min=-pi / 2., max=pi / 2)
     params.add('omega', value= 3.0)
 
 
@@ -76,8 +77,8 @@ def test_lbfgsb():
         decay = pars['decay'].value
 
         if abs(shift) > pi/2:
-            shift = shift - np.sign(shift)*pi
-        model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
+            shift = shift - np.sign(shift) * pi
+        model = amp * np.sin(shift + x / per) * np.exp(-x * x * decay * decay)
         if data is None:
             return model
         return (model - data)
@@ -232,7 +233,7 @@ def test_peakfit():
     check_paras(fit_params, org_params)
 
 
-class CommonMinimizerTest(object):
+class CommonMinimizerTest(unittest.TestCase):
 
     def setUp(self):
         """
@@ -246,7 +247,6 @@ class CommonMinimizerTest(object):
         p_true.add('decay', value=0.010)
         self.p_true = p_true
 
-
         n = 2500
         xmin = 0.
         xmax = 250.0
@@ -271,39 +271,32 @@ class CommonMinimizerTest(object):
         decay = pars['decay'].value
 
         if abs(shift) > pi/2:
-            shift = shift - np.sign(shift)*pi
+            shift = shift - np.sign(shift) * pi
         model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
         if data is None:
             return model
         return (model - data)
-
-    def test_scalar_minimizer(self):
-        try:
-            from scipy.optimize import minimize as scipy_minimize
-        except ImportError:
-            raise SkipTest
-
-        print(self.minimizer)
-        self.mini.scalar_minimize(method=self.minimizer)
-
-        fit = self.residual(self.fit_params, self.x)
-
-        for name, par in self.fit_params.items():
-            nout = "%s:%s" % (name, ' '*(20-len(name)))
-            print("%s: %s (%s) " % (nout, par.value, self.p_true[name].value))
-
-        for para, true_para in zip(self.fit_params.values(),
-                                   self.p_true.values()):
-            check_wo_stderr(para, true_para.value, sig=0.15)
-
-class TestNelder_Mead(CommonMinimizerTest, unittest.TestCase):
-
-    def setUp(self):
-        self.minimizer = 'Nelder-Mead'
-        super(TestNelder_Mead, self).setUp()
-
-    # override default because Nelder-Mead is less precise
-    def test_scalar_minimizer(self):
+        
+    def test_diffev_bounds_check(self):
+        # You need finite (min, max) for each parameter if you're using
+        # differential_evolution.
+        self.fit_params['decay'].min = None
+        self.minimizer = 'differential_evolution'
+        np.testing.assert_raises(ValueError, self.scalar_minimizer)
+
+    def test_scalar_minimizers(self):
+        # test all the scalar minimizers
+        for method in SCALAR_METHODS:
+            if method in ['newton', 'dogleg', 'trust-ncg']:
+                continue
+            self.minimizer = SCALAR_METHODS[method]
+            if method == 'Nelder-Mead':
+                sig = 0.2
+            else:
+                sig = 0.15
+            self.scalar_minimizer(sig=sig)
+        
+    def scalar_minimizer(self, sig=0.15):
         try:
             from scipy.optimize import minimize as scipy_minimize
         except ImportError:
@@ -320,56 +313,7 @@ class TestNelder_Mead(CommonMinimizerTest, unittest.TestCase):
 
         for para, true_para in zip(self.fit_params.values(),
                                    self.p_true.values()):
-            check_wo_stderr(para, true_para.value, sig=0.2)
-
-
-class TestL_BFGS_B(CommonMinimizerTest, unittest.TestCase):
-
-    def setUp(self):
-        self.minimizer = 'L-BFGS-B'
-        super(TestL_BFGS_B, self).setUp()
-
-
-class TestTNC(CommonMinimizerTest, unittest.TestCase):
-
-    def setUp(self):
-        self.minimizer = 'TNC'
-        super(TestTNC, self).setUp()
-
-
-class TestCOBYLA(CommonMinimizerTest, unittest.TestCase):
-
-    def setUp(self):
-        self.minimizer = 'COBYLA'
-        super(TestCOBYLA, self).setUp()
-
-
-class TestSLSQP(CommonMinimizerTest, unittest.TestCase):
-
-    def setUp(self):
-        self.minimizer = 'SLSQP'
-        super(TestSLSQP, self).setUp()
-
-
-class TestBFGS(CommonMinimizerTest, unittest.TestCase):
-
-    def setUp(self):
-        self.minimizer = 'BFGS'
-        super(TestBFGS, self).setUp()
-
-
-class TestCG(CommonMinimizerTest, unittest.TestCase):
-
-    def setUp(self):
-        self.minimizer = 'CG'
-        super(TestCG, self).setUp()
-
-
-class TestPowell(CommonMinimizerTest, unittest.TestCase):
-
-    def setUp(self):
-        self.minimizer = 'Powell'
-        super(TestPowell, self).setUp()
+            check_wo_stderr(para, true_para.value, sig=sig)
 
 
 if __name__ == '__main__':

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/lmfit-py.git



More information about the debian-science-commits mailing list