[lmfit-py] 07/15: Imported Upstream version 0.7.4+dfsg.1
Frédéric-Emmanuel Picca
picca at moszumanska.debian.org
Sat Jul 26 13:18:22 UTC 2014
This is an automated email from the git hooks/post-receive script.
picca pushed a commit to branch master
in repository lmfit-py.
commit 08c6321ded9afe63501906e401a58e7aef25b7de
Author: Picca Frédéric-Emmanuel <picca at debian.org>
Date: Sat Jul 26 12:01:25 2014 +0200
Imported Upstream version 0.7.4+dfsg.1
---
.gitignore | 10 +
.travis.yml | 57 ++
PKG-INFO | 35 --
README => README.md | 0
THANKS.txt | 15 +
doc/ext/ipython_console_highlighting.py | 114 ----
doc/ext/ipython_directive.py | 830 ----------------------------
doc/ext/numpydoc/__init__.py | 1 -
doc/ext/numpydoc/comment_eater.py | 158 ------
doc/ext/numpydoc/compiler_unparse.py | 860 ------------------------------
doc/ext/numpydoc/docscrape.py | 500 -----------------
doc/ext/numpydoc/docscrape_sphinx.py | 227 --------
doc/ext/numpydoc/numpydoc.py | 164 ------
doc/ext/numpydoc/phantom_import.py | 162 ------
doc/ext/numpydoc/plot_directive.py | 619 ---------------------
doc/ext/numpydoc/traitsdoc.py | 140 -----
doc/installation.rst | 16 +-
doc/models1d.rst | 2 +-
doc/parameters.rst | 2 +-
examples/NISTModels.py | 196 +++++++
examples/NIST_STRD/Bennett5.dat | 214 ++++++++
examples/NIST_STRD/BoxBOD.dat | 66 +++
examples/NIST_STRD/Chwirut1.dat | 274 ++++++++++
examples/NIST_STRD/Chwirut2.dat | 114 ++++
examples/NIST_STRD/DanWood.dat | 66 +++
examples/NIST_STRD/ENSO.dat | 228 ++++++++
examples/NIST_STRD/Eckerle4.dat | 95 ++++
examples/NIST_STRD/Gauss1.dat | 310 +++++++++++
examples/NIST_STRD/Gauss2.dat | 310 +++++++++++
examples/NIST_STRD/Gauss3.dat | 310 +++++++++++
examples/NIST_STRD/Hahn1.dat | 296 ++++++++++
examples/NIST_STRD/Kirby2.dat | 211 ++++++++
examples/NIST_STRD/Lanczos1.dat | 84 +++
examples/NIST_STRD/Lanczos2.dat | 84 +++
examples/NIST_STRD/Lanczos3.dat | 84 +++
examples/NIST_STRD/MGH09.dat | 71 +++
examples/NIST_STRD/MGH10.dat | 76 +++
examples/NIST_STRD/MGH17.dat | 93 ++++
examples/NIST_STRD/Misra1a.dat | 74 +++
examples/NIST_STRD/Misra1b.dat | 74 +++
examples/NIST_STRD/Misra1c.dat | 74 +++
examples/NIST_STRD/Misra1d.dat | 74 +++
examples/NIST_STRD/Models | 215 ++++++++
examples/NIST_STRD/Nelson.dat | 188 +++++++
examples/NIST_STRD/Rat42.dat | 69 +++
examples/NIST_STRD/Rat43.dat | 75 +++
examples/NIST_STRD/Roszman1.dat | 85 +++
examples/NIST_STRD/Thurber.dat | 97 ++++
examples/example_anneal.py | 63 +++
examples/example_ci.py | 112 ++++
examples/example_ci2.py | 90 ++++
examples/example_covar.py | 93 ++++
examples/example_derivfunc.py | 85 +++
examples/example_lbfgsb.py | 66 +++
examples/example_peakmodel.py | 33 ++
examples/example_stepmodel.py | 28 +
examples/fit1.py | 63 +++
examples/fit_NIST.py | 163 ++++++
examples/fit_NIST_leastsq.py | 148 +++++
examples/fit_NIST_scipy_lmdif.py | 147 +++++
examples/fit_multi_datasets.py | 69 +++
examples/fit_pvoigt.py | 99 ++++
examples/fit_pvoigt2.py | 87 +++
examples/fit_pvoigt_NelderMead.py | 100 ++++
examples/fit_pvoigt_NelderMead2.py | 85 +++
examples/fit_with_algebraic_constraint.py | 85 +++
examples/fit_with_bounds.py | 62 +++
examples/lmfit-model.ipynb | 567 ++++++++++++++++++++
examples/m1.py | 26 +
examples/model1d_doc1.py | 23 +
examples/model1d_doc2.py | 38 ++
examples/model1d_gauss.dat | 103 ++++
examples/models.py | 128 +++++
examples/peakfit_1.py | 75 +++
examples/simple.py | 46 ++
examples/use_models1d.py | 44 ++
lmfit.egg-info/PKG-INFO | 35 --
lmfit.egg-info/SOURCES.txt | 73 ---
lmfit.egg-info/dependency_links.txt | 1 -
lmfit.egg-info/top_level.txt | 1 -
lmfit/uncertainties/LICENSE.txt | 10 +
lmfit/uncertainties/README | 14 +
setup.cfg | 5 -
upload_wininst.bat | 18 +
use_py26.bat | 2 +
use_py27.bat | 2 +
use_py32.bat | 2 +
87 files changed, 7075 insertions(+), 3935 deletions(-)
diff --git a/.gitignore b/.gitignore
new file mode 100755
index 0000000..5750c74
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,10 @@
+*.pyc
+*~
+*#
+doc/_build
+doc/*.pdf
+build
+dist
+lmfit.egg-info
+sandbox/
+
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..2fac278
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,57 @@
+# Config file for automatic testing at travis-ci.org
+
+language: python
+
+python:
+ - 2.6
+
+matrix:
+ include:
+ - python: 2.7
+ env:
+ - PYTHON=python
+ - PYVER=2.x
+ - python: 3.2
+ env:
+ - PYTHON=python3
+ - PYVER=3.x
+ exclude:
+ - python: 2.6
+
+virtualenv:
+ system_site_packages: true
+
+before_install:
+ - export DISPLAY=:99.0
+ - sh -e /etc/init.d/xvfb start
+
+ - sudo apt-get update
+ - sudo apt-get install $PYTHON-setuptools
+ - sudo apt-get install $PYTHON-numpy
+ - sudo apt-get install $PYTHON-scipy
+ - sudo apt-get install $PYTHON-sphinx
+ - sudo apt-get install $PYTHON-nose
+ - sudo pip -q install --use-mirrors uncertainties asteval
+
+
+ - if [[ $PYVER == '2.x' ]]; then
+ - sudo apt-get install ipython
+ - sudo apt-get install $PYTHON-matplotlib;
+ - fi
+ - if [[ $PYVER == '3.x' ]]; then
+ - sudo apt-get install ipython3
+ - pip install --use-mirrors matplotlib;
+ - fi
+
+
+install:
+ - sudo $PYTHON setup.py install
+
+script:
+ # Execute the unit tests
+ - nosetests tests
+ # Generate the docs
+ - if [[ $PYVER == '2.x' ]]; then
+ - cd doc
+ - make html
+ - fi
diff --git a/PKG-INFO b/PKG-INFO
deleted file mode 100644
index aaac6b9..0000000
--- a/PKG-INFO
+++ /dev/null
@@ -1,35 +0,0 @@
-Metadata-Version: 1.1
-Name: lmfit
-Version: 0.7.4
-Summary: Least-Squares Minimization with Bounds and Constraints
-Home-page: http://lmfit.github.io/lmfit-py/
-Author: LM-Fit Development Team
-Author-email: matt.newville at gmail.com
-License: BSD
-Download-URL: http://lmfit.github.io//lmfit-py/
-Description: A library for least-squares minimization and data fitting in
- Python. Built on top of scipy.optimize, lmfit provides a Parameter object
- which can be set as fixed or free, can have upper and/or lower bounds, or
- can be written in terms of algebraic constraints of other Parameters. The
- user writes a function to be minimized as a function of these Parameters,
- and the scipy.optimize methods are used to find the optimal values for the
- Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
- algorithm, and provides estimated standard errors and correlations between
- varied Parameters. Other minimization methods, including Nelder-Mead's
- downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
- others are also supported. Bounds and contraints can be placed on
- Parameters for all of these methods.
-
- In addition, methods for explicitly calculating confidence intervals are
- provided for exploring minmization problems where the approximation of
- estimating Parameter uncertainties from the covariance matrix is
- questionable.
-Platform: Windows
-Platform: Linux
-Platform: Mac OS X
-Classifier: Intended Audience :: Science/Research
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Topic :: Scientific/Engineering
-Requires: numpy
-Requires: scipy
diff --git a/README b/README.md
similarity index 100%
rename from README
rename to README.md
diff --git a/THANKS.txt b/THANKS.txt
new file mode 100644
index 0000000..53b7e78
--- /dev/null
+++ b/THANKS.txt
@@ -0,0 +1,15 @@
+Many people have contributed to lmfit.
+
+Matthew Newville wrote the original implementation.
+Till Stensitzki wrote the improved estimates of confidence intervals,
+ and contributed many tests, bug fixes, and documentation.
+Daniel B. Allan wrote much of the high level Models, and many
+ improvements to the testing and documentation.
+J. J. Helmus wrote the MINUT bounds for leastsq, originally in
+ leastsqbounds.py, and ported to lmfit.
+E. O. Lebigot wrote the uncertainties package, a version of which is
+ used here.
+
+Additional patches, bug fixes, and suggestions have come from
+ Christohp Deil, Francois Boulogne, Colin Brosseau, nmearl,
+ Gustavo Pasquevich, and Ben Gamari
diff --git a/doc/ext/ipython_console_highlighting.py b/doc/ext/ipython_console_highlighting.py
deleted file mode 100644
index f0a41be..0000000
--- a/doc/ext/ipython_console_highlighting.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""reST directive for syntax-highlighting ipython interactive sessions.
-
-XXX - See what improvements can be made based on the new (as of Sept 2009)
-'pycon' lexer for the python console. At the very least it will give better
-highlighted tracebacks.
-"""
-
-#-----------------------------------------------------------------------------
-# Needed modules
-
-# Standard library
-import re
-
-# Third party
-from pygments.lexer import Lexer, do_insertions
-from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
- PythonTracebackLexer)
-from pygments.token import Comment, Generic
-
-from sphinx import highlighting
-
-#-----------------------------------------------------------------------------
-# Global constants
-line_re = re.compile('.*?\n')
-
-#-----------------------------------------------------------------------------
-# Code begins - classes and functions
-
-class IPythonConsoleLexer(Lexer):
- """
- For IPython console output or doctests, such as:
-
- .. sourcecode:: ipython
-
- In [1]: a = 'foo'
-
- In [2]: a
- Out[2]: 'foo'
-
- In [3]: print a
- foo
-
- In [4]: 1 / 0
-
- Notes:
-
- - Tracebacks are not currently supported.
-
- - It assumes the default IPython prompts, not customized ones.
- """
-
- name = 'IPython console session'
- aliases = ['ipython']
- mimetypes = ['text/x-ipython-console']
- input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
- output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
- continue_prompt = re.compile(" \.\.\.+:")
- tb_start = re.compile("\-+")
-
- def get_tokens_unprocessed(self, text):
- pylexer = PythonLexer(**self.options)
- tblexer = PythonTracebackLexer(**self.options)
-
- curcode = ''
- insertions = []
- for match in line_re.finditer(text):
- line = match.group()
- input_prompt = self.input_prompt.match(line)
- continue_prompt = self.continue_prompt.match(line.rstrip())
- output_prompt = self.output_prompt.match(line)
- if line.startswith("#"):
- insertions.append((len(curcode),
- [(0, Comment, line)]))
- elif input_prompt is not None:
- insertions.append((len(curcode),
- [(0, Generic.Prompt, input_prompt.group())]))
- curcode += line[input_prompt.end():]
- elif continue_prompt is not None:
- insertions.append((len(curcode),
- [(0, Generic.Prompt, continue_prompt.group())]))
- curcode += line[continue_prompt.end():]
- elif output_prompt is not None:
- # Use the 'error' token for output. We should probably make
- # our own token, but error is typicaly in a bright color like
- # red, so it works fine for our output prompts.
- insertions.append((len(curcode),
- [(0, Generic.Error, output_prompt.group())]))
- curcode += line[output_prompt.end():]
- else:
- if curcode:
- for item in do_insertions(insertions,
- pylexer.get_tokens_unprocessed(curcode)):
- yield item
- curcode = ''
- insertions = []
- yield match.start(), Generic.Output, line
- if curcode:
- for item in do_insertions(insertions,
- pylexer.get_tokens_unprocessed(curcode)):
- yield item
-
-
-def setup(app):
- """Setup as a sphinx extension."""
-
- # This is only a lexer, so adding it below to pygments appears sufficient.
- # But if somebody knows that the right API usage should be to do that via
- # sphinx, by all means fix it here. At least having this setup.py
- # suppresses the sphinx warning we'd get without it.
- pass
-
-#-----------------------------------------------------------------------------
-# Register the extension as a valid pygments lexer
-highlighting.lexers['ipython'] = IPythonConsoleLexer()
diff --git a/doc/ext/ipython_directive.py b/doc/ext/ipython_directive.py
deleted file mode 100644
index 89c1f54..0000000
--- a/doc/ext/ipython_directive.py
+++ /dev/null
@@ -1,830 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Sphinx directive to support embedded IPython code.
-
-This directive allows pasting of entire interactive IPython sessions, prompts
-and all, and their code will actually get re-executed at doc build time, with
-all prompts renumbered sequentially. It also allows you to input code as a pure
-python input by giving the argument python to the directive. The output looks
-like an interactive ipython section.
-
-To enable this directive, simply list it in your Sphinx ``conf.py`` file
-(making sure the directory where you placed it is visible to sphinx, as is
-needed for all Sphinx directives).
-
-By default this directive assumes that your prompts are unchanged IPython ones,
-but this can be customized. The configurable options that can be placed in
-conf.py are
-
-ipython_savefig_dir:
- The directory in which to save the figures. This is relative to the
- Sphinx source directory. The default is `html_static_path`.
-ipython_rgxin:
- The compiled regular expression to denote the start of IPython input
- lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
- shouldn't need to change this.
-ipython_rgxout:
- The compiled regular expression to denote the start of IPython output
- lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
- shouldn't need to change this.
-ipython_promptin:
- The string to represent the IPython input prompt in the generated ReST.
- The default is 'In [%d]:'. This expects that the line numbers are used
- in the prompt.
-ipython_promptout:
-
- The string to represent the IPython prompt in the generated ReST. The
- default is 'Out [%d]:'. This expects that the line numbers are used
- in the prompt.
-
-ToDo
-----
-
-- Turn the ad-hoc test() function into a real test suite.
-- Break up ipython-specific functionality from matplotlib stuff into better
- separated code.
-
-Authors
--------
-
-- John D Hunter: orignal author.
-- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
-- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
-- Skipper Seabold, refactoring, cleanups, pure python addition
-"""
-
-#-----------------------------------------------------------------------------
-# Imports
-#-----------------------------------------------------------------------------
-
-# Stdlib
-import cStringIO
-import os
-import re
-import sys
-import tempfile
-import ast
-
-# To keep compatibility with various python versions
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
-
-# Third-party
-import matplotlib
-import sphinx
-from docutils.parsers.rst import directives
-from docutils import nodes
-from sphinx.util.compat import Directive
-
-matplotlib.use('Agg')
-
-# Our own
-from IPython import Config, InteractiveShell
-from IPython.core.profiledir import ProfileDir
-from IPython.utils import io
-
-#-----------------------------------------------------------------------------
-# Globals
-#-----------------------------------------------------------------------------
-# for tokenizing blocks
-COMMENT, INPUT, OUTPUT = range(3)
-
-#-----------------------------------------------------------------------------
-# Functions and class declarations
-#-----------------------------------------------------------------------------
-def block_parser(part, rgxin, rgxout, fmtin, fmtout):
- """
- part is a string of ipython text, comprised of at most one
- input, one ouput, comments, and blank lines. The block parser
- parses the text into a list of::
-
- blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
-
- where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
- data is, depending on the type of token::
-
- COMMENT : the comment string
-
- INPUT: the (DECORATOR, INPUT_LINE, REST) where
- DECORATOR: the input decorator (or None)
- INPUT_LINE: the input as string (possibly multi-line)
- REST : any stdout generated by the input line (not OUTPUT)
-
-
- OUTPUT: the output string, possibly multi-line
- """
-
- block = []
- lines = part.split('\n')
- N = len(lines)
- i = 0
- decorator = None
- while 1:
-
- if i==N:
- # nothing left to parse -- the last line
- break
-
- line = lines[i]
- i += 1
- line_stripped = line.strip()
- if line_stripped.startswith('#'):
- block.append((COMMENT, line))
- continue
-
- if line_stripped.startswith('@'):
- # we're assuming at most one decorator -- may need to
- # rethink
- decorator = line_stripped
- continue
-
- # does this look like an input line?
- matchin = rgxin.match(line)
- if matchin:
- lineno, inputline = int(matchin.group(1)), matchin.group(2)
-
- # the ....: continuation string
- continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
- Nc = len(continuation)
- # input lines can continue on for more than one line, if
- # we have a '\' line continuation char or a function call
- # echo line 'print'. The input line can only be
- # terminated by the end of the block or an output line, so
- # we parse out the rest of the input line if it is
- # multiline as well as any echo text
-
- rest = []
- while i<N:
-
- # look ahead; if the next line is blank, or a comment, or
- # an output line, we're done
-
- nextline = lines[i]
- matchout = rgxout.match(nextline)
- #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
- if matchout or nextline.startswith('#'):
- break
- elif nextline.startswith(continuation):
- inputline += '\n' + nextline[Nc:]
- else:
- rest.append(nextline)
- i+= 1
-
- block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
- continue
-
- # if it looks like an output line grab all the text to the end
- # of the block
- matchout = rgxout.match(line)
- if matchout:
- lineno, output = int(matchout.group(1)), matchout.group(2)
- if i<N-1:
- output = '\n'.join([output] + lines[i:])
-
- block.append((OUTPUT, output))
- break
-
- return block
-
-class EmbeddedSphinxShell(object):
- """An embedded IPython instance to run inside Sphinx"""
-
- def __init__(self):
-
- self.cout = cStringIO.StringIO()
-
-
- # Create config object for IPython
- config = Config()
- config.Global.display_banner = False
- config.Global.exec_lines = ['import numpy as np',
- 'from pylab import *'
- ]
- config.InteractiveShell.autocall = False
- config.InteractiveShell.autoindent = False
- config.InteractiveShell.colors = 'NoColor'
-
- # create a profile so instance history isn't saved
- tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
- profname = 'auto_profile_sphinx_build'
- pdir = os.path.join(tmp_profile_dir,profname)
- profile = ProfileDir.create_profile_dir(pdir)
-
- # Create and initialize ipython, but don't start its mainloop
- IP = InteractiveShell.instance(config=config, profile_dir=profile)
- # io.stdout redirect must be done *after* instantiating InteractiveShell
- io.stdout = self.cout
- io.stderr = self.cout
-
- # For debugging, so we can see normal output, use this:
- #from IPython.utils.io import Tee
- #io.stdout = Tee(self.cout, channel='stdout') # dbg
- #io.stderr = Tee(self.cout, channel='stderr') # dbg
-
- # Store a few parts of IPython we'll need.
- self.IP = IP
- self.user_ns = self.IP.user_ns
- self.user_global_ns = self.IP.user_global_ns
-
- self.input = ''
- self.output = ''
-
- self.is_verbatim = False
- self.is_doctest = False
- self.is_suppress = False
-
- # on the first call to the savefig decorator, we'll import
- # pyplot as plt so we can make a call to the plt.gcf().savefig
- self._pyplot_imported = False
-
- def clear_cout(self):
- self.cout.seek(0)
- self.cout.truncate(0)
-
- def process_input_line(self, line, store_history=True):
- """process the input, capturing stdout"""
- #print "input='%s'"%self.input
- stdout = sys.stdout
- splitter = self.IP.input_splitter
- try:
- sys.stdout = self.cout
- splitter.push(line)
- more = splitter.push_accepts_more()
- if not more:
- source_raw = splitter.source_raw_reset()[1]
- self.IP.run_cell(source_raw, store_history=store_history)
- finally:
- sys.stdout = stdout
-
- def process_image(self, decorator):
- """
- # build out an image directive like
- # .. image:: somefile.png
- # :width 4in
- #
- # from an input like
- # savefig somefile.png width=4in
- """
- savefig_dir = self.savefig_dir
- source_dir = self.source_dir
- saveargs = decorator.split(' ')
- filename = saveargs[1]
- # insert relative path to image file in source
- outfile = os.path.relpath(os.path.join(savefig_dir,filename),
- source_dir)
-
- imagerows = ['.. image:: %s'%outfile]
-
- for kwarg in saveargs[2:]:
- arg, val = kwarg.split('=')
- arg = arg.strip()
- val = val.strip()
- imagerows.append(' :%s: %s'%(arg, val))
-
- image_file = os.path.basename(outfile) # only return file name
- image_directive = '\n'.join(imagerows)
- return image_file, image_directive
-
-
- # Callbacks for each type of token
- def process_input(self, data, input_prompt, lineno):
- """Process data block for INPUT token."""
- decorator, input, rest = data
- image_file = None
- image_directive = None
- #print 'INPUT:', data # dbg
- is_verbatim = decorator=='@verbatim' or self.is_verbatim
- is_doctest = decorator=='@doctest' or self.is_doctest
- is_suppress = decorator=='@suppress' or self.is_suppress
- is_savefig = decorator is not None and \
- decorator.startswith('@savefig')
-
- input_lines = input.split('\n')
- if len(input_lines) > 1:
- if input_lines[-1] != "":
- input_lines.append('') # make sure there's a blank line
- # so splitter buffer gets reset
-
- continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
- Nc = len(continuation)
-
- if is_savefig:
- image_file, image_directive = self.process_image(decorator)
-
- ret = []
- is_semicolon = False
-
- for i, line in enumerate(input_lines):
- if line.endswith(';'):
- is_semicolon = True
-
- if i==0:
- # process the first input line
- if is_verbatim:
- self.process_input_line('')
- self.IP.execution_count += 1 # increment it anyway
- else:
- # only submit the line in non-verbatim mode
- self.process_input_line(line, store_history=True)
- formatted_line = '%s %s'%(input_prompt, line)
- else:
- # process a continuation line
- if not is_verbatim:
- self.process_input_line(line, store_history=True)
-
- formatted_line = '%s %s'%(continuation, line)
-
- if not is_suppress:
- ret.append(formatted_line)
-
- if not is_suppress and len(rest.strip()) and is_verbatim:
- # the "rest" is the standard output of the
- # input, which needs to be added in
- # verbatim mode
- ret.append(rest)
-
- self.cout.seek(0)
- output = self.cout.read()
- if not is_suppress and not is_semicolon:
- ret.append(output)
- elif is_semicolon: # get spacing right
- ret.append('')
-
- self.cout.truncate(0)
- return (ret, input_lines, output, is_doctest, image_file,
- image_directive)
- #print 'OUTPUT', output # dbg
-
- def process_output(self, data, output_prompt,
- input_lines, output, is_doctest, image_file):
- """Process data block for OUTPUT token."""
- if is_doctest:
- submitted = data.strip()
- found = output
- if found is not None:
- found = found.strip()
-
- # XXX - fperez: in 0.11, 'output' never comes with the prompt
- # in it, just the actual output text. So I think all this code
- # can be nuked...
-
- # the above comment does not appear to be accurate... (minrk)
-
- ind = found.find(output_prompt)
- if ind<0:
- e='output prompt="%s" does not match out line=%s' % \
- (output_prompt, found)
- raise RuntimeError(e)
- found = found[len(output_prompt):].strip()
-
- if found!=submitted:
- e = ('doctest failure for input_lines="%s" with '
- 'found_output="%s" and submitted output="%s"' %
- (input_lines, found, submitted) )
- raise RuntimeError(e)
- #print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
-
- def process_comment(self, data):
- """Process data fPblock for COMMENT token."""
- if not self.is_suppress:
- return [data]
-
- def save_image(self, image_file):
- """
- Saves the image file to disk.
- """
- self.ensure_pyplot()
- command = 'plt.gcf().savefig("%s")'%image_file
- #print 'SAVEFIG', command # dbg
- self.process_input_line('bookmark ipy_thisdir', store_history=False)
- self.process_input_line('cd -b ipy_savedir', store_history=False)
- self.process_input_line(command, store_history=False)
- self.process_input_line('cd -b ipy_thisdir', store_history=False)
- self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
- self.clear_cout()
-
-
- def process_block(self, block):
- """
- process block from the block_parser and return a list of processed lines
- """
- ret = []
- output = None
- input_lines = None
- lineno = self.IP.execution_count
-
- input_prompt = self.promptin%lineno
- output_prompt = self.promptout%lineno
- image_file = None
- image_directive = None
-
- for token, data in block:
- if token==COMMENT:
- out_data = self.process_comment(data)
- elif token==INPUT:
- (out_data, input_lines, output, is_doctest, image_file,
- image_directive) = \
- self.process_input(data, input_prompt, lineno)
- elif token==OUTPUT:
- out_data = \
- self.process_output(data, output_prompt,
- input_lines, output, is_doctest,
- image_file)
- if out_data:
- ret.extend(out_data)
-
- # save the image files
- if image_file is not None:
- self.save_image(image_file)
-
- return ret, image_directive
-
- def ensure_pyplot(self):
- if self._pyplot_imported:
- return
- self.process_input_line('import matplotlib.pyplot as plt',
- store_history=False)
-
- def process_pure_python(self, content):
- """
- content is a list of strings. it is unedited directive conent
-
- This runs it line by line in the InteractiveShell, prepends
- prompts as needed capturing stderr and stdout, then returns
- the content as a list as if it were ipython code
- """
- output = []
- savefig = False # keep up with this to clear figure
- multiline = False # to handle line continuation
- multiline_start = None
- fmtin = self.promptin
-
- ct = 0
-
- for lineno, line in enumerate(content):
-
- line_stripped = line.strip()
- if not len(line):
- #output.append(line)
- continue
-
- # handle decorators
- if line_stripped.startswith('@'):
- output.extend([line])
- if 'savefig' in line:
- savefig = True # and need to clear figure
- continue
-
- # handle comments
- if line_stripped.startswith('#'):
- output.extend([line])
- continue
-
- # deal with lines checking for multiline
- continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
- if not multiline:
- modified = u"%s %s" % (fmtin % ct, line_stripped)
- output.append(modified)
- ct += 1
- try:
- ast.parse(line_stripped)
- output.append(u'')
- except Exception: # on a multiline
- multiline = True
- multiline_start = lineno
- else: # still on a multiline
- modified = u'%s %s' % (continuation, line)
- output.append(modified)
- try:
- mod = ast.parse(
- '\n'.join(content[multiline_start:lineno+1]))
- if isinstance(mod.body[0], ast.FunctionDef):
- # check to see if we have the whole function
- for element in mod.body[0].body:
- if isinstance(element, ast.Return):
- multiline = False
- else:
- output.append(u'')
- multiline = False
- except Exception:
- pass
-
- if savefig: # clear figure if plotted
- self.ensure_pyplot()
- self.process_input_line('plt.clf()', store_history=False)
- self.clear_cout()
- savefig = False
-
- return output
-
-class IpythonDirective(Directive):
-
- has_content = True
- required_arguments = 0
- optional_arguments = 4 # python, suppress, verbatim, doctest
- final_argumuent_whitespace = True
- option_spec = { 'python': directives.unchanged,
- 'suppress' : directives.flag,
- 'verbatim' : directives.flag,
- 'doctest' : directives.flag,
- }
-
- shell = EmbeddedSphinxShell()
-
- def get_config_options(self):
- # contains sphinx configuration variables
- config = self.state.document.settings.env.config
-
- # get config variables to set figure output directory
- confdir = self.state.document.settings.env.app.confdir
- savefig_dir = config.ipython_savefig_dir
- source_dir = os.path.dirname(self.state.document.current_source)
- if savefig_dir is None:
- savefig_dir = config.html_static_path
- if isinstance(savefig_dir, list):
- savefig_dir = savefig_dir[0] # safe to assume only one path?
- savefig_dir = os.path.join(confdir, savefig_dir)
-
- # get regex and prompt stuff
- rgxin = config.ipython_rgxin
- rgxout = config.ipython_rgxout
- promptin = config.ipython_promptin
- promptout = config.ipython_promptout
-
- return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
-
- def setup(self):
- # reset the execution count if we haven't processed this doc
- #NOTE: this may be borked if there are multiple seen_doc tmp files
- #check time stamp?
- seen_docs = [i for i in os.listdir(tempfile.tempdir)
- if i.startswith('seen_doc')]
- if seen_docs:
- fname = os.path.join(tempfile.tempdir, seen_docs[0])
- docs = open(fname).read().split('\n')
- if not self.state.document.current_source in docs:
- self.shell.IP.history_manager.reset()
- self.shell.IP.execution_count = 1
- else: # haven't processed any docs yet
- docs = []
-
-
- # get config values
- (savefig_dir, source_dir, rgxin,
- rgxout, promptin, promptout) = self.get_config_options()
-
- # and attach to shell so we don't have to pass them around
- self.shell.rgxin = rgxin
- self.shell.rgxout = rgxout
- self.shell.promptin = promptin
- self.shell.promptout = promptout
- self.shell.savefig_dir = savefig_dir
- self.shell.source_dir = source_dir
-
- # setup bookmark for saving figures directory
-
- self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
- store_history=False)
- self.shell.clear_cout()
-
- # write the filename to a tempfile because it's been "seen" now
- if not self.state.document.current_source in docs:
- fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
- fout = open(fname, 'a')
- fout.write(self.state.document.current_source+'\n')
- fout.close()
-
- return rgxin, rgxout, promptin, promptout
-
-
- def teardown(self):
- # delete last bookmark
- self.shell.process_input_line('bookmark -d ipy_savedir',
- store_history=False)
- self.shell.clear_cout()
-
- def run(self):
- debug = False
-
- #TODO, any reason block_parser can't be a method of embeddable shell
- # then we wouldn't have to carry these around
- rgxin, rgxout, promptin, promptout = self.setup()
-
- options = self.options
- self.shell.is_suppress = 'suppress' in options
- self.shell.is_doctest = 'doctest' in options
- self.shell.is_verbatim = 'verbatim' in options
-
-
- # handle pure python code
- if 'python' in self.arguments:
- content = self.content
- self.content = self.shell.process_pure_python(content)
-
- parts = '\n'.join(self.content).split('\n\n')
-
- lines = ['.. code-block:: ipython','']
- figures = []
-
- for part in parts:
-
- block = block_parser(part, rgxin, rgxout, promptin, promptout)
-
- if len(block):
- rows, figure = self.shell.process_block(block)
- for row in rows:
- lines.extend([' %s'%line for line in row.split('\n')])
-
- if figure is not None:
- figures.append(figure)
-
- #text = '\n'.join(lines)
- #figs = '\n'.join(figures)
-
- for figure in figures:
- lines.append('')
- lines.extend(figure.split('\n'))
- lines.append('')
-
- #print lines
- if len(lines)>2:
- if debug:
- print '\n'.join(lines)
- else: #NOTE: this raises some errors, what's it for?
- #print 'INSERTING %d lines'%len(lines)
- self.state_machine.insert_input(
- lines, self.state_machine.input_lines.source(0))
-
- text = '\n'.join(lines)
- txtnode = nodes.literal_block(text, text)
- txtnode['language'] = 'ipython'
- #imgnode = nodes.image(figs)
-
- # cleanup
- self.teardown()
-
- return []#, imgnode]
-
-# Enable as a proper Sphinx directive
-def setup(app):
- setup.app = app
-
- app.add_directive('ipython', IpythonDirective)
- app.add_config_value('ipython_savefig_dir', None, True)
- app.add_config_value('ipython_rgxin',
- re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
- app.add_config_value('ipython_rgxout',
- re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
- app.add_config_value('ipython_promptin', 'In [%d]:', True)
- app.add_config_value('ipython_promptout', 'Out[%d]:', True)
-
-
-# Simple smoke test, needs to be converted to a proper automatic test.
-def test():
-
- examples = [
- r"""
-In [9]: pwd
-Out[9]: '/home/jdhunter/py4science/book'
-
-In [10]: cd bookdata/
-/home/jdhunter/py4science/book/bookdata
-
-In [2]: from pylab import *
-
-In [2]: ion()
-
-In [3]: im = imread('stinkbug.png')
-
- at savefig mystinkbug.png width=4in
-In [4]: imshow(im)
-Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
-
-""",
- r"""
-
-In [1]: x = 'hello world'
-
-# string methods can be
-# used to alter the string
- at doctest
-In [2]: x.upper()
-Out[2]: 'HELLO WORLD'
-
- at verbatim
-In [3]: x.st<TAB>
-x.startswith x.strip
-""",
- r"""
-
-In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
- .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
-
-In [131]: print url.split('&')
-['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
-
-In [60]: import urllib
-
-""",
- r"""\
-
-In [133]: import numpy.random
-
- at suppress
-In [134]: numpy.random.seed(2358)
-
- at doctest
-In [135]: numpy.random.rand(10,2)
-Out[135]:
-array([[ 0.64524308, 0.59943846],
- [ 0.47102322, 0.8715456 ],
- [ 0.29370834, 0.74776844],
- [ 0.99539577, 0.1313423 ],
- [ 0.16250302, 0.21103583],
- [ 0.81626524, 0.1312433 ],
- [ 0.67338089, 0.72302393],
- [ 0.7566368 , 0.07033696],
- [ 0.22591016, 0.77731835],
- [ 0.0072729 , 0.34273127]])
-
-""",
-
- r"""
-In [106]: print x
-jdh
-
-In [109]: for i in range(10):
- .....: print i
- .....:
- .....:
-0
-1
-2
-3
-4
-5
-6
-7
-8
-9
-""",
-
- r"""
-
-In [144]: from pylab import *
-
-In [145]: ion()
-
-# use a semicolon to suppress the output
- at savefig test_hist.png width=4in
-In [151]: hist(np.random.randn(10000), 100);
-
-
- at savefig test_plot.png width=4in
-In [151]: plot(np.random.randn(10000), 'o');
- """,
-
- r"""
-# use a semicolon to suppress the output
-In [151]: plt.clf()
-
- at savefig plot_simple.png width=4in
-In [151]: plot([1,2,3])
-
- at savefig hist_simple.png width=4in
-In [151]: hist(np.random.randn(10000), 100);
-
-""",
- r"""
-# update the current fig
-In [151]: ylabel('number')
-
-In [152]: title('normal distribution')
-
-
- at savefig hist_with_text.png
-In [153]: grid(True)
-
- """,
- ]
- # skip local-file depending first example:
- examples = examples[1:]
-
- #ipython_directive.DEBUG = True # dbg
- #options = dict(suppress=True) # dbg
- options = dict()
- for example in examples:
- content = example.split('\n')
- ipython_directive('debug', arguments=None, options=options,
- content=content, lineno=0,
- content_offset=None, block_text=None,
- state=None, state_machine=None,
- )
-
-# Run test suite as a script
-if __name__=='__main__':
- if not os.path.isdir('_static'):
- os.mkdir('_static')
- test()
- print 'All OK? Check figures in _static/'
diff --git a/doc/ext/numpydoc/__init__.py b/doc/ext/numpydoc/__init__.py
deleted file mode 100755
index ae9073b..0000000
--- a/doc/ext/numpydoc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from numpydoc import setup
diff --git a/doc/ext/numpydoc/comment_eater.py b/doc/ext/numpydoc/comment_eater.py
deleted file mode 100755
index e11eea9..0000000
--- a/doc/ext/numpydoc/comment_eater.py
+++ /dev/null
@@ -1,158 +0,0 @@
-from cStringIO import StringIO
-import compiler
-import inspect
-import textwrap
-import tokenize
-
-from compiler_unparse import unparse
-
-
-class Comment(object):
- """ A comment block.
- """
- is_comment = True
- def __init__(self, start_lineno, end_lineno, text):
- # int : The first line number in the block. 1-indexed.
- self.start_lineno = start_lineno
- # int : The last line number. Inclusive!
- self.end_lineno = end_lineno
- # str : The text block including '#' character but not any leading spaces.
- self.text = text
-
- def add(self, string, start, end, line):
- """ Add a new comment line.
- """
- self.start_lineno = min(self.start_lineno, start[0])
- self.end_lineno = max(self.end_lineno, end[0])
- self.text += string
-
- def __repr__(self):
- return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
- self.end_lineno, self.text)
-
-
-class NonComment(object):
- """ A non-comment block of code.
- """
- is_comment = False
- def __init__(self, start_lineno, end_lineno):
- self.start_lineno = start_lineno
- self.end_lineno = end_lineno
-
- def add(self, string, start, end, line):
- """ Add lines to the block.
- """
- if string.strip():
- # Only add if not entirely whitespace.
- self.start_lineno = min(self.start_lineno, start[0])
- self.end_lineno = max(self.end_lineno, end[0])
-
- def __repr__(self):
- return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
- self.end_lineno)
-
-
-class CommentBlocker(object):
- """ Pull out contiguous comment blocks.
- """
- def __init__(self):
- # Start with a dummy.
- self.current_block = NonComment(0, 0)
-
- # All of the blocks seen so far.
- self.blocks = []
-
- # The index mapping lines of code to their associated comment blocks.
- self.index = {}
-
- def process_file(self, file):
- """ Process a file object.
- """
- for token in tokenize.generate_tokens(file.next):
- self.process_token(*token)
- self.make_index()
-
- def process_token(self, kind, string, start, end, line):
- """ Process a single token.
- """
- if self.current_block.is_comment:
- if kind == tokenize.COMMENT:
- self.current_block.add(string, start, end, line)
- else:
- self.new_noncomment(start[0], end[0])
- else:
- if kind == tokenize.COMMENT:
- self.new_comment(string, start, end, line)
- else:
- self.current_block.add(string, start, end, line)
-
- def new_noncomment(self, start_lineno, end_lineno):
- """ We are transitioning from a noncomment to a comment.
- """
- block = NonComment(start_lineno, end_lineno)
- self.blocks.append(block)
- self.current_block = block
-
- def new_comment(self, string, start, end, line):
- """ Possibly add a new comment.
-
- Only adds a new comment if this comment is the only thing on the line.
- Otherwise, it extends the noncomment block.
- """
- prefix = line[:start[1]]
- if prefix.strip():
- # Oops! Trailing comment, not a comment block.
- self.current_block.add(string, start, end, line)
- else:
- # A comment block.
- block = Comment(start[0], end[0], string)
- self.blocks.append(block)
- self.current_block = block
-
- def make_index(self):
- """ Make the index mapping lines of actual code to their associated
- prefix comments.
- """
- for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
- if not block.is_comment:
- self.index[block.start_lineno] = prev
-
- def search_for_comment(self, lineno, default=None):
- """ Find the comment block just before the given line number.
-
- Returns None (or the specified default) if there is no such block.
- """
- if not self.index:
- self.make_index()
- block = self.index.get(lineno, None)
- text = getattr(block, 'text', default)
- return text
-
-
-def strip_comment_marker(text):
- """ Strip # markers at the front of a block of comment text.
- """
- lines = []
- for line in text.splitlines():
- lines.append(line.lstrip('#'))
- text = textwrap.dedent('\n'.join(lines))
- return text
-
-
-def get_class_traits(klass):
- """ Yield all of the documentation for trait definitions on a class object.
- """
- # FIXME: gracefully handle errors here or in the caller?
- source = inspect.getsource(klass)
- cb = CommentBlocker()
- cb.process_file(StringIO(source))
- mod_ast = compiler.parse(source)
- class_ast = mod_ast.node.nodes[0]
- for node in class_ast.code.nodes:
- # FIXME: handle other kinds of assignments?
- if isinstance(node, compiler.ast.Assign):
- name = node.nodes[0].name
- rhs = unparse(node.expr).strip()
- doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
- yield name, rhs, doc
-
diff --git a/doc/ext/numpydoc/compiler_unparse.py b/doc/ext/numpydoc/compiler_unparse.py
deleted file mode 100755
index ffcf51b..0000000
--- a/doc/ext/numpydoc/compiler_unparse.py
+++ /dev/null
@@ -1,860 +0,0 @@
-""" Turn compiler.ast structures back into executable python code.
-
- The unparse method takes a compiler.ast tree and transforms it back into
- valid python code. It is incomplete and currently only works for
- import statements, function calls, function definitions, assignments, and
- basic expressions.
-
- Inspired by python-2.5-svn/Demo/parser/unparse.py
-
- fixme: We may want to move to using _ast trees because the compiler for
- them is about 6 times faster than compiler.compile.
-"""
-
-import sys
-import cStringIO
-from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
-
-def unparse(ast, single_line_functions=False):
- s = cStringIO.StringIO()
- UnparseCompilerAst(ast, s, single_line_functions)
- return s.getvalue().lstrip()
-
-op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
- 'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
-
-class UnparseCompilerAst:
- """ Methods in this class recursively traverse an AST and
- output source code for the abstract syntax; original formatting
- is disregarged.
- """
-
- #########################################################################
- # object interface.
- #########################################################################
-
- def __init__(self, tree, file = sys.stdout, single_line_functions=False):
- """ Unparser(tree, file=sys.stdout) -> None.
-
- Print the source for tree to file.
- """
- self.f = file
- self._single_func = single_line_functions
- self._do_indent = True
- self._indent = 0
- self._dispatch(tree)
- self._write("\n")
- self.f.flush()
-
- #########################################################################
- # Unparser private interface.
- #########################################################################
-
- ### format, output, and dispatch methods ################################
-
- def _fill(self, text = ""):
- "Indent a piece of text, according to the current indentation level"
- if self._do_indent:
- self._write("\n"+" "*self._indent + text)
- else:
- self._write(text)
-
- def _write(self, text):
- "Append a piece of text to the current line."
- self.f.write(text)
-
- def _enter(self):
- "Print ':', and increase the indentation."
- self._write(": ")
- self._indent += 1
-
- def _leave(self):
- "Decrease the indentation level."
- self._indent -= 1
-
- def _dispatch(self, tree):
- "_dispatcher function, _dispatching tree type T to method _T."
- if isinstance(tree, list):
- for t in tree:
- self._dispatch(t)
- return
- meth = getattr(self, "_"+tree.__class__.__name__)
- if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
- return
- meth(tree)
-
-
- #########################################################################
- # compiler.ast unparsing methods.
- #
- # There should be one method per concrete grammar type. They are
- # organized in alphabetical order.
- #########################################################################
-
- def _Add(self, t):
- self.__binary_op(t, '+')
-
- def _And(self, t):
- self._write(" (")
- for i, node in enumerate(t.nodes):
- self._dispatch(node)
- if i != len(t.nodes)-1:
- self._write(") and (")
- self._write(")")
-
- def _AssAttr(self, t):
- """ Handle assigning an attribute of an object
- """
- self._dispatch(t.expr)
- self._write('.'+t.attrname)
-
- def _Assign(self, t):
- """ Expression Assignment such as "a = 1".
-
- This only handles assignment in expressions. Keyword assignment
- is handled separately.
- """
- self._fill()
- for target in t.nodes:
- self._dispatch(target)
- self._write(" = ")
- self._dispatch(t.expr)
- if not self._do_indent:
- self._write('; ')
-
- def _AssName(self, t):
- """ Name on left hand side of expression.
-
- Treat just like a name on the right side of an expression.
- """
- self._Name(t)
-
- def _AssTuple(self, t):
- """ Tuple on left hand side of an expression.
- """
-
- # _write each elements, separated by a comma.
- for element in t.nodes[:-1]:
- self._dispatch(element)
- self._write(", ")
-
- # Handle the last one without writing comma
- last_element = t.nodes[-1]
- self._dispatch(last_element)
-
- def _AugAssign(self, t):
- """ +=,-=,*=,/=,**=, etc. operations
- """
-
- self._fill()
- self._dispatch(t.node)
- self._write(' '+t.op+' ')
- self._dispatch(t.expr)
- if not self._do_indent:
- self._write(';')
-
- def _Bitand(self, t):
- """ Bit and operation.
- """
-
- for i, node in enumerate(t.nodes):
- self._write("(")
- self._dispatch(node)
- self._write(")")
- if i != len(t.nodes)-1:
- self._write(" & ")
-
- def _Bitor(self, t):
- """ Bit or operation
- """
-
- for i, node in enumerate(t.nodes):
- self._write("(")
- self._dispatch(node)
- self._write(")")
- if i != len(t.nodes)-1:
- self._write(" | ")
-
- def _CallFunc(self, t):
- """ Function call.
- """
- self._dispatch(t.node)
- self._write("(")
- comma = False
- for e in t.args:
- if comma: self._write(", ")
- else: comma = True
- self._dispatch(e)
- if t.star_args:
- if comma: self._write(", ")
- else: comma = True
- self._write("*")
- self._dispatch(t.star_args)
- if t.dstar_args:
- if comma: self._write(", ")
- else: comma = True
- self._write("**")
- self._dispatch(t.dstar_args)
- self._write(")")
-
- def _Compare(self, t):
- self._dispatch(t.expr)
- for op, expr in t.ops:
- self._write(" " + op + " ")
- self._dispatch(expr)
-
- def _Const(self, t):
- """ A constant value such as an integer value, 3, or a string, "hello".
- """
- self._dispatch(t.value)
-
- def _Decorators(self, t):
- """ Handle function decorators (eg. @has_units)
- """
- for node in t.nodes:
- self._dispatch(node)
-
- def _Dict(self, t):
- self._write("{")
- for i, (k, v) in enumerate(t.items):
- self._dispatch(k)
- self._write(": ")
- self._dispatch(v)
- if i < len(t.items)-1:
- self._write(", ")
- self._write("}")
-
- def _Discard(self, t):
- """ Node for when return value is ignored such as in "foo(a)".
- """
- self._fill()
- self._dispatch(t.expr)
-
- def _Div(self, t):
- self.__binary_op(t, '/')
-
- def _Ellipsis(self, t):
- self._write("...")
-
- def _From(self, t):
- """ Handle "from xyz import foo, bar as baz".
- """
- # fixme: Are From and ImportFrom handled differently?
- self._fill("from ")
- self._write(t.modname)
- self._write(" import ")
- for i, (name,asname) in enumerate(t.names):
- if i != 0:
- self._write(", ")
- self._write(name)
- if asname is not None:
- self._write(" as "+asname)
-
- def _Function(self, t):
- """ Handle function definitions
- """
- if t.decorators is not None:
- self._fill("@")
- self._dispatch(t.decorators)
- self._fill("def "+t.name + "(")
- defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
- for i, arg in enumerate(zip(t.argnames, defaults)):
- self._write(arg[0])
- if arg[1] is not None:
- self._write('=')
- self._dispatch(arg[1])
- if i < len(t.argnames)-1:
- self._write(', ')
- self._write(")")
- if self._single_func:
- self._do_indent = False
- self._enter()
- self._dispatch(t.code)
- self._leave()
- self._do_indent = True
-
- def _Getattr(self, t):
- """ Handle getting an attribute of an object
- """
- if isinstance(t.expr, (Div, Mul, Sub, Add)):
- self._write('(')
- self._dispatch(t.expr)
- self._write(')')
- else:
- self._dispatch(t.expr)
-
- self._write('.'+t.attrname)
-
- def _If(self, t):
- self._fill()
-
- for i, (compare,code) in enumerate(t.tests):
- if i == 0:
- self._write("if ")
- else:
- self._write("elif ")
- self._dispatch(compare)
- self._enter()
- self._fill()
- self._dispatch(code)
- self._leave()
- self._write("\n")
-
- if t.else_ is not None:
- self._write("else")
- self._enter()
- self._fill()
- self._dispatch(t.else_)
- self._leave()
- self._write("\n")
-
- def _IfExp(self, t):
- self._dispatch(t.then)
- self._write(" if ")
- self._dispatch(t.test)
-
- if t.else_ is not None:
- self._write(" else (")
- self._dispatch(t.else_)
- self._write(")")
-
- def _Import(self, t):
- """ Handle "import xyz.foo".
- """
- self._fill("import ")
-
- for i, (name,asname) in enumerate(t.names):
- if i != 0:
- self._write(", ")
- self._write(name)
- if asname is not None:
- self._write(" as "+asname)
-
- def _Keyword(self, t):
- """ Keyword value assignment within function calls and definitions.
- """
- self._write(t.name)
- self._write("=")
- self._dispatch(t.expr)
-
- def _List(self, t):
- self._write("[")
- for i,node in enumerate(t.nodes):
- self._dispatch(node)
- if i < len(t.nodes)-1:
- self._write(", ")
- self._write("]")
-
- def _Module(self, t):
- if t.doc is not None:
- self._dispatch(t.doc)
- self._dispatch(t.node)
-
- def _Mul(self, t):
- self.__binary_op(t, '*')
-
- def _Name(self, t):
- self._write(t.name)
-
- def _NoneType(self, t):
- self._write("None")
-
- def _Not(self, t):
- self._write('not (')
- self._dispatch(t.expr)
- self._write(')')
-
- def _Or(self, t):
- self._write(" (")
- for i, node in enumerate(t.nodes):
- self._dispatch(node)
- if i != len(t.nodes)-1:
- self._write(") or (")
- self._write(")")
-
- def _Pass(self, t):
- self._write("pass\n")
-
- def _Printnl(self, t):
- self._fill("print ")
- if t.dest:
- self._write(">> ")
- self._dispatch(t.dest)
- self._write(", ")
- comma = False
- for node in t.nodes:
- if comma: self._write(', ')
- else: comma = True
- self._dispatch(node)
-
- def _Power(self, t):
- self.__binary_op(t, '**')
-
- def _Return(self, t):
- self._fill("return ")
- if t.value:
- if isinstance(t.value, Tuple):
- text = ', '.join([ name.name for name in t.value.asList() ])
- self._write(text)
- else:
- self._dispatch(t.value)
- if not self._do_indent:
- self._write('; ')
-
- def _Slice(self, t):
- self._dispatch(t.expr)
- self._write("[")
- if t.lower:
- self._dispatch(t.lower)
- self._write(":")
- if t.upper:
- self._dispatch(t.upper)
- #if t.step:
- # self._write(":")
- # self._dispatch(t.step)
- self._write("]")
-
- def _Sliceobj(self, t):
- for i, node in enumerate(t.nodes):
- if i != 0:
- self._write(":")
- if not (isinstance(node, Const) and node.value is None):
- self._dispatch(node)
-
- def _Stmt(self, tree):
- for node in tree.nodes:
- self._dispatch(node)
-
- def _Sub(self, t):
- self.__binary_op(t, '-')
-
- def _Subscript(self, t):
- self._dispatch(t.expr)
- self._write("[")
- for i, value in enumerate(t.subs):
- if i != 0:
- self._write(",")
- self._dispatch(value)
- self._write("]")
-
- def _TryExcept(self, t):
- self._fill("try")
- self._enter()
- self._dispatch(t.body)
- self._leave()
-
- for handler in t.handlers:
- self._fill('except ')
- self._dispatch(handler[0])
- if handler[1] is not None:
- self._write(', ')
- self._dispatch(handler[1])
- self._enter()
- self._dispatch(handler[2])
- self._leave()
-
- if t.else_:
- self._fill("else")
- self._enter()
- self._dispatch(t.else_)
- self._leave()
-
- def _Tuple(self, t):
-
- if not t.nodes:
- # Empty tuple.
- self._write("()")
- else:
- self._write("(")
-
- # _write each elements, separated by a comma.
- for element in t.nodes[:-1]:
- self._dispatch(element)
- self._write(", ")
-
- # Handle the last one without writing comma
- last_element = t.nodes[-1]
- self._dispatch(last_element)
-
- self._write(")")
-
- def _UnaryAdd(self, t):
- self._write("+")
- self._dispatch(t.expr)
-
- def _UnarySub(self, t):
- self._write("-")
- self._dispatch(t.expr)
-
- def _With(self, t):
- self._fill('with ')
- self._dispatch(t.expr)
- if t.vars:
- self._write(' as ')
- self._dispatch(t.vars.name)
- self._enter()
- self._dispatch(t.body)
- self._leave()
- self._write('\n')
-
- def _int(self, t):
- self._write(repr(t))
-
- def __binary_op(self, t, symbol):
- # Check if parenthesis are needed on left side and then dispatch
- has_paren = False
- left_class = str(t.left.__class__)
- if (left_class in op_precedence.keys() and
- op_precedence[left_class] < op_precedence[str(t.__class__)]):
- has_paren = True
- if has_paren:
- self._write('(')
- self._dispatch(t.left)
- if has_paren:
- self._write(')')
- # Write the appropriate symbol for operator
- self._write(symbol)
- # Check if parenthesis are needed on the right side and then dispatch
- has_paren = False
- right_class = str(t.right.__class__)
- if (right_class in op_precedence.keys() and
- op_precedence[right_class] < op_precedence[str(t.__class__)]):
- has_paren = True
- if has_paren:
- self._write('(')
- self._dispatch(t.right)
- if has_paren:
- self._write(')')
-
- def _float(self, t):
- # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
- # We prefer str here.
- self._write(str(t))
-
- def _str(self, t):
- self._write(repr(t))
-
- def _tuple(self, t):
- self._write(str(t))
-
- #########################################################################
- # These are the methods from the _ast modules unparse.
- #
- # As our needs to handle more advanced code increase, we may want to
- # modify some of the methods below so that they work for compiler.ast.
- #########################################################################
-
-# # stmt
-# def _Expr(self, tree):
-# self._fill()
-# self._dispatch(tree.value)
-#
-# def _Import(self, t):
-# self._fill("import ")
-# first = True
-# for a in t.names:
-# if first:
-# first = False
-# else:
-# self._write(", ")
-# self._write(a.name)
-# if a.asname:
-# self._write(" as "+a.asname)
-#
-## def _ImportFrom(self, t):
-## self._fill("from ")
-## self._write(t.module)
-## self._write(" import ")
-## for i, a in enumerate(t.names):
-## if i == 0:
-## self._write(", ")
-## self._write(a.name)
-## if a.asname:
-## self._write(" as "+a.asname)
-## # XXX(jpe) what is level for?
-##
-#
-# def _Break(self, t):
-# self._fill("break")
-#
-# def _Continue(self, t):
-# self._fill("continue")
-#
-# def _Delete(self, t):
-# self._fill("del ")
-# self._dispatch(t.targets)
-#
-# def _Assert(self, t):
-# self._fill("assert ")
-# self._dispatch(t.test)
-# if t.msg:
-# self._write(", ")
-# self._dispatch(t.msg)
-#
-# def _Exec(self, t):
-# self._fill("exec ")
-# self._dispatch(t.body)
-# if t.globals:
-# self._write(" in ")
-# self._dispatch(t.globals)
-# if t.locals:
-# self._write(", ")
-# self._dispatch(t.locals)
-#
-# def _Print(self, t):
-# self._fill("print ")
-# do_comma = False
-# if t.dest:
-# self._write(">>")
-# self._dispatch(t.dest)
-# do_comma = True
-# for e in t.values:
-# if do_comma:self._write(", ")
-# else:do_comma=True
-# self._dispatch(e)
-# if not t.nl:
-# self._write(",")
-#
-# def _Global(self, t):
-# self._fill("global")
-# for i, n in enumerate(t.names):
-# if i != 0:
-# self._write(",")
-# self._write(" " + n)
-#
-# def _Yield(self, t):
-# self._fill("yield")
-# if t.value:
-# self._write(" (")
-# self._dispatch(t.value)
-# self._write(")")
-#
-# def _Raise(self, t):
-# self._fill('raise ')
-# if t.type:
-# self._dispatch(t.type)
-# if t.inst:
-# self._write(", ")
-# self._dispatch(t.inst)
-# if t.tback:
-# self._write(", ")
-# self._dispatch(t.tback)
-#
-#
-# def _TryFinally(self, t):
-# self._fill("try")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# self._fill("finally")
-# self._enter()
-# self._dispatch(t.finalbody)
-# self._leave()
-#
-# def _excepthandler(self, t):
-# self._fill("except ")
-# if t.type:
-# self._dispatch(t.type)
-# if t.name:
-# self._write(", ")
-# self._dispatch(t.name)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _ClassDef(self, t):
-# self._write("\n")
-# self._fill("class "+t.name)
-# if t.bases:
-# self._write("(")
-# for a in t.bases:
-# self._dispatch(a)
-# self._write(", ")
-# self._write(")")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _FunctionDef(self, t):
-# self._write("\n")
-# for deco in t.decorators:
-# self._fill("@")
-# self._dispatch(deco)
-# self._fill("def "+t.name + "(")
-# self._dispatch(t.args)
-# self._write(")")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _For(self, t):
-# self._fill("for ")
-# self._dispatch(t.target)
-# self._write(" in ")
-# self._dispatch(t.iter)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-# if t.orelse:
-# self._fill("else")
-# self._enter()
-# self._dispatch(t.orelse)
-# self._leave
-#
-# def _While(self, t):
-# self._fill("while ")
-# self._dispatch(t.test)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-# if t.orelse:
-# self._fill("else")
-# self._enter()
-# self._dispatch(t.orelse)
-# self._leave
-#
-# # expr
-# def _Str(self, tree):
-# self._write(repr(tree.s))
-##
-# def _Repr(self, t):
-# self._write("`")
-# self._dispatch(t.value)
-# self._write("`")
-#
-# def _Num(self, t):
-# self._write(repr(t.n))
-#
-# def _ListComp(self, t):
-# self._write("[")
-# self._dispatch(t.elt)
-# for gen in t.generators:
-# self._dispatch(gen)
-# self._write("]")
-#
-# def _GeneratorExp(self, t):
-# self._write("(")
-# self._dispatch(t.elt)
-# for gen in t.generators:
-# self._dispatch(gen)
-# self._write(")")
-#
-# def _comprehension(self, t):
-# self._write(" for ")
-# self._dispatch(t.target)
-# self._write(" in ")
-# self._dispatch(t.iter)
-# for if_clause in t.ifs:
-# self._write(" if ")
-# self._dispatch(if_clause)
-#
-# def _IfExp(self, t):
-# self._dispatch(t.body)
-# self._write(" if ")
-# self._dispatch(t.test)
-# if t.orelse:
-# self._write(" else ")
-# self._dispatch(t.orelse)
-#
-# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
-# def _UnaryOp(self, t):
-# self._write(self.unop[t.op.__class__.__name__])
-# self._write("(")
-# self._dispatch(t.operand)
-# self._write(")")
-#
-# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
-# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
-# "FloorDiv":"//", "Pow": "**"}
-# def _BinOp(self, t):
-# self._write("(")
-# self._dispatch(t.left)
-# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
-# self._dispatch(t.right)
-# self._write(")")
-#
-# boolops = {_ast.And: 'and', _ast.Or: 'or'}
-# def _BoolOp(self, t):
-# self._write("(")
-# self._dispatch(t.values[0])
-# for v in t.values[1:]:
-# self._write(" %s " % self.boolops[t.op.__class__])
-# self._dispatch(v)
-# self._write(")")
-#
-# def _Attribute(self,t):
-# self._dispatch(t.value)
-# self._write(".")
-# self._write(t.attr)
-#
-## def _Call(self, t):
-## self._dispatch(t.func)
-## self._write("(")
-## comma = False
-## for e in t.args:
-## if comma: self._write(", ")
-## else: comma = True
-## self._dispatch(e)
-## for e in t.keywords:
-## if comma: self._write(", ")
-## else: comma = True
-## self._dispatch(e)
-## if t.starargs:
-## if comma: self._write(", ")
-## else: comma = True
-## self._write("*")
-## self._dispatch(t.starargs)
-## if t.kwargs:
-## if comma: self._write(", ")
-## else: comma = True
-## self._write("**")
-## self._dispatch(t.kwargs)
-## self._write(")")
-#
-# # slice
-# def _Index(self, t):
-# self._dispatch(t.value)
-#
-# def _ExtSlice(self, t):
-# for i, d in enumerate(t.dims):
-# if i != 0:
-# self._write(': ')
-# self._dispatch(d)
-#
-# # others
-# def _arguments(self, t):
-# first = True
-# nonDef = len(t.args)-len(t.defaults)
-# for a in t.args[0:nonDef]:
-# if first:first = False
-# else: self._write(", ")
-# self._dispatch(a)
-# for a,d in zip(t.args[nonDef:], t.defaults):
-# if first:first = False
-# else: self._write(", ")
-# self._dispatch(a),
-# self._write("=")
-# self._dispatch(d)
-# if t.vararg:
-# if first:first = False
-# else: self._write(", ")
-# self._write("*"+t.vararg)
-# if t.kwarg:
-# if first:first = False
-# else: self._write(", ")
-# self._write("**"+t.kwarg)
-#
-## def _keyword(self, t):
-## self._write(t.arg)
-## self._write("=")
-## self._dispatch(t.value)
-#
-# def _Lambda(self, t):
-# self._write("lambda ")
-# self._dispatch(t.args)
-# self._write(": ")
-# self._dispatch(t.body)
-
-
-
diff --git a/doc/ext/numpydoc/docscrape.py b/doc/ext/numpydoc/docscrape.py
deleted file mode 100755
index 615ea11..0000000
--- a/doc/ext/numpydoc/docscrape.py
+++ /dev/null
@@ -1,500 +0,0 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-
-import inspect
-import textwrap
-import re
-import pydoc
-from StringIO import StringIO
-from warnings import warn
-
-class Reader(object):
- """A line-based string reader.
-
- """
- def __init__(self, data):
- """
- Parameters
- ----------
- data : str
- String with lines separated by '\n'.
-
- """
- if isinstance(data,list):
- self._str = data
- else:
- self._str = data.split('\n') # store string as list of lines
-
- self.reset()
-
- def __getitem__(self, n):
- return self._str[n]
-
- def reset(self):
- self._l = 0 # current line nr
-
- def read(self):
- if not self.eof():
- out = self[self._l]
- self._l += 1
- return out
- else:
- return ''
-
- def seek_next_non_empty_line(self):
- for l in self[self._l:]:
- if l.strip():
- break
- else:
- self._l += 1
-
- def eof(self):
- return self._l >= len(self._str)
-
- def read_to_condition(self, condition_func):
- start = self._l
- for line in self[start:]:
- if condition_func(line):
- return self[start:self._l]
- self._l += 1
- if self.eof():
- return self[start:self._l+1]
- return []
-
- def read_to_next_empty_line(self):
- self.seek_next_non_empty_line()
- def is_empty(line):
- return not line.strip()
- return self.read_to_condition(is_empty)
-
- def read_to_next_unindented_line(self):
- def is_unindented(line):
- return (line.strip() and (len(line.lstrip()) == len(line)))
- return self.read_to_condition(is_unindented)
-
- def peek(self,n=0):
- if self._l + n < len(self._str):
- return self[self._l + n]
- else:
- return ''
-
- def is_empty(self):
- return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
- def __init__(self, docstring, config={}):
- docstring = textwrap.dedent(docstring).split('\n')
-
- self._doc = Reader(docstring)
- self._parsed_data = {
- 'Signature': '',
- 'Summary': [''],
- 'Extended Summary': [],
- 'Parameters': [],
- 'Returns': [],
- 'Raises': [],
- 'Warns': [],
- 'Other Parameters': [],
- 'Attributes': [],
- 'Methods': [],
- 'See Also': [],
- 'Notes': [],
- 'Warnings': [],
- 'References': '',
- 'Examples': '',
- 'index': {}
- }
-
- self._parse()
-
- def __getitem__(self,key):
- return self._parsed_data[key]
-
- def __setitem__(self,key,val):
- if not self._parsed_data.has_key(key):
- warn("Unknown section %s" % key)
- else:
- self._parsed_data[key] = val
-
- def _is_at_section(self):
- self._doc.seek_next_non_empty_line()
-
- if self._doc.eof():
- return False
-
- l1 = self._doc.peek().strip() # e.g. Parameters
-
- if l1.startswith('.. index::'):
- return True
-
- l2 = self._doc.peek(1).strip() # ---------- or ==========
- return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
- def _strip(self,doc):
- i = 0
- j = 0
- for i,line in enumerate(doc):
- if line.strip(): break
-
- for j,line in enumerate(doc[::-1]):
- if line.strip(): break
-
- return doc[i:len(doc)-j]
-
- def _read_to_next_section(self):
- section = self._doc.read_to_next_empty_line()
-
- while not self._is_at_section() and not self._doc.eof():
- if not self._doc.peek(-1).strip(): # previous line was empty
- section += ['']
-
- section += self._doc.read_to_next_empty_line()
-
- return section
-
- def _read_sections(self):
- while not self._doc.eof():
- data = self._read_to_next_section()
- name = data[0].strip()
-
- if name.startswith('..'): # index section
- yield name, data[1:]
- elif len(data) < 2:
- yield StopIteration
- else:
- yield name, self._strip(data[2:])
-
- def _parse_param_list(self,content):
- r = Reader(content)
- params = []
- while not r.eof():
- header = r.read().strip()
- if ' : ' in header:
- arg_name, arg_type = header.split(' : ')[:2]
- else:
- arg_name, arg_type = header, ''
-
- desc = r.read_to_next_unindented_line()
- desc = dedent_lines(desc)
-
- params.append((arg_name,arg_type,desc))
-
- return params
-
-
- _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
- r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
- def _parse_see_also(self, content):
- """
- func_name : Descriptive text
- continued text
- another_func_name : Descriptive text
- func_name1, func_name2, :meth:`func_name`, func_name3
-
- """
- items = []
-
- def parse_item_name(text):
- """Match ':role:`name`' or 'name'"""
- m = self._name_rgx.match(text)
- if m:
- g = m.groups()
- if g[1] is None:
- return g[3], None
- else:
- return g[2], g[1]
- raise ValueError("%s is not a item name" % text)
-
- def push_item(name, rest):
- if not name:
- return
- name, role = parse_item_name(name)
- items.append((name, list(rest), role))
- del rest[:]
-
- current_func = None
- rest = []
-
- for line in content:
- if not line.strip(): continue
-
- m = self._name_rgx.match(line)
- if m and line[m.end():].strip().startswith(':'):
- push_item(current_func, rest)
- current_func, line = line[:m.end()], line[m.end():]
- rest = [line.split(':', 1)[1].strip()]
- if not rest[0]:
- rest = []
- elif not line.startswith(' '):
- push_item(current_func, rest)
- current_func = None
- if ',' in line:
- for func in line.split(','):
- if func.strip():
- push_item(func, [])
- elif line.strip():
- current_func = line
- elif current_func is not None:
- rest.append(line.strip())
- push_item(current_func, rest)
- return items
-
- def _parse_index(self, section, content):
- """
- .. index: default
- :refguide: something, else, and more
-
- """
- def strip_each_in(lst):
- return [s.strip() for s in lst]
-
- out = {}
- section = section.split('::')
- if len(section) > 1:
- out['default'] = strip_each_in(section[1].split(','))[0]
- for line in content:
- line = line.split(':')
- if len(line) > 2:
- out[line[1]] = strip_each_in(line[2].split(','))
- return out
-
- def _parse_summary(self):
- """Grab signature (if given) and summary"""
- if self._is_at_section():
- return
-
- summary = self._doc.read_to_next_empty_line()
- summary_str = " ".join([s.strip() for s in summary]).strip()
- if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
- self['Signature'] = summary_str
- if not self._is_at_section():
- self['Summary'] = self._doc.read_to_next_empty_line()
- else:
- self['Summary'] = summary
-
- if not self._is_at_section():
- self['Extended Summary'] = self._read_to_next_section()
-
- def _parse(self):
- self._doc.reset()
- self._parse_summary()
-
- for (section,content) in self._read_sections():
- if not section.startswith('..'):
- section = ' '.join([s.capitalize() for s in section.split(' ')])
- if section in ('Parameters', 'Returns', 'Raises', 'Warns',
- 'Other Parameters', 'Attributes', 'Methods'):
- self[section] = self._parse_param_list(content)
- elif section.startswith('.. index::'):
- self['index'] = self._parse_index(section, content)
- elif section == 'See Also':
- self['See Also'] = self._parse_see_also(content)
- else:
- self[section] = content
-
- # string conversion routines
-
- def _str_header(self, name, symbol='-'):
- return [name, len(name)*symbol]
-
- def _str_indent(self, doc, indent=4):
- out = []
- for line in doc:
- out += [' '*indent + line]
- return out
-
- def _str_signature(self):
- if self['Signature']:
- return [self['Signature'].replace('*','\*')] + ['']
- else:
- return ['']
-
- def _str_summary(self):
- if self['Summary']:
- return self['Summary'] + ['']
- else:
- return []
-
- def _str_extended_summary(self):
- if self['Extended Summary']:
- return self['Extended Summary'] + ['']
- else:
- return []
-
- def _str_param_list(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- for param,param_type,desc in self[name]:
- out += ['%s : %s' % (param, param_type)]
- out += self._str_indent(desc)
- out += ['']
- return out
-
- def _str_section(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- out += self[name]
- out += ['']
- return out
-
- def _str_see_also(self, func_role):
- if not self['See Also']: return []
- out = []
- out += self._str_header("See Also")
- last_had_desc = True
- for func, desc, role in self['See Also']:
- if role:
- link = ':%s:`%s`' % (role, func)
- elif func_role:
- link = ':%s:`%s`' % (func_role, func)
- else:
- link = "`%s`_" % func
- if desc or last_had_desc:
- out += ['']
- out += [link]
- else:
- out[-1] += ", %s" % link
- if desc:
- out += self._str_indent([' '.join(desc)])
- last_had_desc = True
- else:
- last_had_desc = False
- out += ['']
- return out
-
- def _str_index(self):
- idx = self['index']
- out = []
- out += ['.. index:: %s' % idx.get('default','')]
- for section, references in idx.iteritems():
- if section == 'default':
- continue
- out += [' :%s: %s' % (section, ', '.join(references))]
- return out
-
- def __str__(self, func_role=''):
- out = []
- out += self._str_signature()
- out += self._str_summary()
- out += self._str_extended_summary()
- for param_list in ('Parameters', 'Returns', 'Other Parameters',
- 'Raises', 'Warns'):
- out += self._str_param_list(param_list)
- out += self._str_section('Warnings')
- out += self._str_see_also(func_role)
- for s in ('Notes','References','Examples'):
- out += self._str_section(s)
- for param_list in ('Attributes', 'Methods'):
- out += self._str_param_list(param_list)
- out += self._str_index()
- return '\n'.join(out)
-
-
-def indent(str,indent=4):
- indent_str = ' '*indent
- if str is None:
- return indent_str
- lines = str.split('\n')
- return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
- """Deindent a list of lines maximally"""
- return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
- return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
- def __init__(self, func, role='func', doc=None, config={}):
- self._f = func
- self._role = role # e.g. "func" or "meth"
-
- if doc is None:
- if func is None:
- raise ValueError("No function or docstring given")
- doc = inspect.getdoc(func) or ''
- NumpyDocString.__init__(self, doc)
-
- if not self['Signature'] and func is not None:
- func, func_name = self.get_func()
- try:
- # try to read signature
- argspec = inspect.getargspec(func)
- argspec = inspect.formatargspec(*argspec)
- argspec = argspec.replace('*','\*')
- signature = '%s%s' % (func_name, argspec)
- except TypeError, e:
- signature = '%s()' % func_name
- self['Signature'] = signature
-
- def get_func(self):
- func_name = getattr(self._f, '__name__', self.__class__.__name__)
- if inspect.isclass(self._f):
- func = getattr(self._f, '__call__', self._f.__init__)
- else:
- func = self._f
- return func, func_name
-
- def __str__(self):
- out = ''
-
- func, func_name = self.get_func()
- signature = self['Signature'].replace('*', '\*')
-
- roles = {'func': 'function',
- 'meth': 'method'}
-
- if self._role:
- if not roles.has_key(self._role):
- print "Warning: invalid role %s" % self._role
- out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
- func_name)
-
- out += super(FunctionDoc, self).__str__(func_role=self._role)
- return out
-
-
-class ClassDoc(NumpyDocString):
- def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
- config={}):
- if not inspect.isclass(cls) and cls is not None:
- raise ValueError("Expected a class or None, but got %r" % cls)
- self._cls = cls
-
- if modulename and not modulename.endswith('.'):
- modulename += '.'
- self._mod = modulename
-
- if doc is None:
- if cls is None:
- raise ValueError("No class or documentation string given")
- doc = pydoc.getdoc(cls)
-
- NumpyDocString.__init__(self, doc)
-
- if config.get('show_class_members', True):
- if not self['Methods']:
- self['Methods'] = [(name, '', '')
- for name in sorted(self.methods)]
- if not self['Attributes']:
- self['Attributes'] = [(name, '', '')
- for name in sorted(self.properties)]
-
- @property
- def methods(self):
- if self._cls is None:
- return []
- return [name for name,func in inspect.getmembers(self._cls)
- if not name.startswith('_') and callable(func)]
-
- @property
- def properties(self):
- if self._cls is None:
- return []
- return [name for name,func in inspect.getmembers(self._cls)
- if not name.startswith('_') and func is None]
diff --git a/doc/ext/numpydoc/docscrape_sphinx.py b/doc/ext/numpydoc/docscrape_sphinx.py
deleted file mode 100755
index e44e770..0000000
--- a/doc/ext/numpydoc/docscrape_sphinx.py
+++ /dev/null
@@ -1,227 +0,0 @@
-import re, inspect, textwrap, pydoc
-import sphinx
-from docscrape import NumpyDocString, FunctionDoc, ClassDoc
-
-class SphinxDocString(NumpyDocString):
- def __init__(self, docstring, config={}):
- self.use_plots = config.get('use_plots', False)
- NumpyDocString.__init__(self, docstring, config=config)
-
- # string conversion routines
- def _str_header(self, name, symbol='`'):
- return ['.. rubric:: ' + name, '']
-
- def _str_field_list(self, name):
- return [':' + name + ':']
-
- def _str_indent(self, doc, indent=4):
- out = []
- for line in doc:
- out += [' '*indent + line]
- return out
-
- def _str_signature(self):
- return ['']
- if self['Signature']:
- return ['``%s``' % self['Signature']] + ['']
- else:
- return ['']
-
- def _str_summary(self):
- return self['Summary'] + ['']
-
- def _str_extended_summary(self):
- return self['Extended Summary'] + ['']
-
- def _str_param_list(self, name):
- out = []
- if self[name]:
- out += self._str_field_list(name)
- out += ['']
- for param,param_type,desc in self[name]:
- out += self._str_indent(['**%s** : %s' % (param.strip(),
- param_type)])
- out += ['']
- out += self._str_indent(desc,8)
- out += ['']
- return out
-
- @property
- def _obj(self):
- if hasattr(self, '_cls'):
- return self._cls
- elif hasattr(self, '_f'):
- return self._f
- return None
-
- def _str_member_list(self, name):
- """
- Generate a member listing, autosummary:: table where possible,
- and a table where not.
-
- """
- out = []
- if self[name]:
- out += ['.. rubric:: %s' % name, '']
- prefix = getattr(self, '_name', '')
-
- if prefix:
- prefix = '~%s.' % prefix
-
- autosum = []
- others = []
- for param, param_type, desc in self[name]:
- param = param.strip()
- if not self._obj or hasattr(self._obj, param):
- autosum += [" %s%s" % (prefix, param)]
- else:
- others.append((param, param_type, desc))
-
- if autosum:
- out += ['.. autosummary::', ' :toctree:', '']
- out += autosum
-
- if others:
- maxlen_0 = max([len(x[0]) for x in others])
- maxlen_1 = max([len(x[1]) for x in others])
- hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
- fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
- n_indent = maxlen_0 + maxlen_1 + 4
- out += [hdr]
- for param, param_type, desc in others:
- out += [fmt % (param.strip(), param_type)]
- out += self._str_indent(desc, n_indent)
- out += [hdr]
- out += ['']
- return out
-
- def _str_section(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- out += ['']
- content = textwrap.dedent("\n".join(self[name])).split("\n")
- out += content
- out += ['']
- return out
-
- def _str_see_also(self, func_role):
- out = []
- if self['See Also']:
- see_also = super(SphinxDocString, self)._str_see_also(func_role)
- out = ['.. seealso::', '']
- out += self._str_indent(see_also[2:])
- return out
-
- def _str_warnings(self):
- out = []
- if self['Warnings']:
- out = ['.. warning::', '']
- out += self._str_indent(self['Warnings'])
- return out
-
- def _str_index(self):
- idx = self['index']
- out = []
- if len(idx) == 0:
- return out
-
- out += ['.. index:: %s' % idx.get('default','')]
- for section, references in idx.iteritems():
- if section == 'default':
- continue
- elif section == 'refguide':
- out += [' single: %s' % (', '.join(references))]
- else:
- out += [' %s: %s' % (section, ','.join(references))]
- return out
-
- def _str_references(self):
- out = []
- if self['References']:
- out += self._str_header('References')
- if isinstance(self['References'], str):
- self['References'] = [self['References']]
- out.extend(self['References'])
- out += ['']
- # Latex collects all references to a separate bibliography,
- # so we need to insert links to it
- if sphinx.__version__ >= "0.6":
- out += ['.. only:: latex','']
- else:
- out += ['.. latexonly::','']
- items = []
- for line in self['References']:
- m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
- if m:
- items.append(m.group(1))
- out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
- return out
-
- def _str_examples(self):
- examples_str = "\n".join(self['Examples'])
-
- if (self.use_plots and 'import matplotlib' in examples_str
- and 'plot::' not in examples_str):
- out = []
- out += self._str_header('Examples')
- out += ['.. plot::', '']
- out += self._str_indent(self['Examples'])
- out += ['']
- return out
- else:
- return self._str_section('Examples')
-
- def __str__(self, indent=0, func_role="obj"):
- out = []
- out += self._str_signature()
- out += self._str_index() + ['']
- out += self._str_summary()
- out += self._str_extended_summary()
- for param_list in ('Parameters', 'Returns', 'Other Parameters',
- 'Raises', 'Warns'):
- out += self._str_param_list(param_list)
- out += self._str_warnings()
- out += self._str_see_also(func_role)
- out += self._str_section('Notes')
- out += self._str_references()
- out += self._str_examples()
- for param_list in ('Attributes', 'Methods'):
- out += self._str_member_list(param_list)
- out = self._str_indent(out,indent)
- return '\n'.join(out)
-
-class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
- def __init__(self, obj, doc=None, config={}):
- self.use_plots = config.get('use_plots', False)
- FunctionDoc.__init__(self, obj, doc=doc, config=config)
-
-class SphinxClassDoc(SphinxDocString, ClassDoc):
- def __init__(self, obj, doc=None, func_doc=None, config={}):
- self.use_plots = config.get('use_plots', False)
- ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
-
-class SphinxObjDoc(SphinxDocString):
- def __init__(self, obj, doc=None, config={}):
- self._f = obj
- SphinxDocString.__init__(self, doc, config=config)
-
-def get_doc_object(obj, what=None, doc=None, config={}):
- if what is None:
- if inspect.isclass(obj):
- what = 'class'
- elif inspect.ismodule(obj):
- what = 'module'
- elif callable(obj):
- what = 'function'
- else:
- what = 'object'
- if what == 'class':
- return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
- config=config)
- elif what in ('function', 'method'):
- return SphinxFunctionDoc(obj, doc=doc, config=config)
- else:
- if doc is None:
- doc = pydoc.getdoc(obj)
- return SphinxObjDoc(obj, doc, config=config)
diff --git a/doc/ext/numpydoc/numpydoc.py b/doc/ext/numpydoc/numpydoc.py
deleted file mode 100755
index aa39005..0000000
--- a/doc/ext/numpydoc/numpydoc.py
+++ /dev/null
@@ -1,164 +0,0 @@
-"""
-========
-numpydoc
-========
-
-Sphinx extension that handles docstrings in the Numpy standard format. [1]
-
-It will:
-
-- Convert Parameters etc. sections to field lists.
-- Convert See Also section to a See also entry.
-- Renumber references.
-- Extract the signature from the docstring, if it can't be determined otherwise.
-
-.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
-
-"""
-
-import os, re, pydoc
-from docscrape_sphinx import get_doc_object, SphinxDocString
-from sphinx.util.compat import Directive
-import inspect
-
-def mangle_docstrings(app, what, name, obj, options, lines,
- reference_offset=[0]):
-
- cfg = dict(use_plots=app.config.numpydoc_use_plots,
- show_class_members=app.config.numpydoc_show_class_members)
-
- if what == 'module':
- # Strip top title
- title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
- re.I|re.S)
- lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
- else:
- doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
- lines[:] = unicode(doc).split(u"\n")
-
- if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
- obj.__name__:
- if hasattr(obj, '__module__'):
- v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
- else:
- v = dict(full_name=obj.__name__)
- lines += [u'', u'.. htmlonly::', '']
- lines += [u' %s' % x for x in
- (app.config.numpydoc_edit_link % v).split("\n")]
-
- # replace reference numbers so that there are no duplicates
- references = []
- for line in lines:
- line = line.strip()
- m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
- if m:
- references.append(m.group(1))
-
- # start renaming from the longest string, to avoid overwriting parts
- references.sort(key=lambda x: -len(x))
- if references:
- for i, line in enumerate(lines):
- for r in references:
- if re.match(ur'^\d+$', r):
- new_r = u"R%d" % (reference_offset[0] + int(r))
- else:
- new_r = u"%s%d" % (r, reference_offset[0])
- lines[i] = lines[i].replace(u'[%s]_' % r,
- u'[%s]_' % new_r)
- lines[i] = lines[i].replace(u'.. [%s]' % r,
- u'.. [%s]' % new_r)
-
- reference_offset[0] += len(references)
-
-def mangle_signature(app, what, name, obj, options, sig, retann):
- # Do not try to inspect classes that don't define `__init__`
- if (inspect.isclass(obj) and
- (not hasattr(obj, '__init__') or
- 'initializes x; see ' in pydoc.getdoc(obj.__init__))):
- return '', ''
-
- if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
- if not hasattr(obj, '__doc__'): return
-
- doc = SphinxDocString(pydoc.getdoc(obj))
- if doc['Signature']:
- sig = re.sub(u"^[^(]*", u"", doc['Signature'])
- return sig, u''
-
-def setup(app, get_doc_object_=get_doc_object):
- global get_doc_object
- get_doc_object = get_doc_object_
-
- app.connect('autodoc-process-docstring', mangle_docstrings)
- app.connect('autodoc-process-signature', mangle_signature)
- app.add_config_value('numpydoc_edit_link', None, False)
- app.add_config_value('numpydoc_use_plots', None, False)
- app.add_config_value('numpydoc_show_class_members', True, True)
-
- # Extra mangling domains
- app.add_domain(NumpyPythonDomain)
- app.add_domain(NumpyCDomain)
-
-#------------------------------------------------------------------------------
-# Docstring-mangling domains
-#------------------------------------------------------------------------------
-
-from docutils.statemachine import ViewList
-from sphinx.domains.c import CDomain
-from sphinx.domains.python import PythonDomain
-
-class ManglingDomainBase(object):
- directive_mangling_map = {}
-
- def __init__(self, *a, **kw):
- super(ManglingDomainBase, self).__init__(*a, **kw)
- self.wrap_mangling_directives()
-
- def wrap_mangling_directives(self):
- for name, objtype in self.directive_mangling_map.items():
- self.directives[name] = wrap_mangling_directive(
- self.directives[name], objtype)
-
-class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
- name = 'np'
- directive_mangling_map = {
- 'function': 'function',
- 'class': 'class',
- 'exception': 'class',
- 'method': 'function',
- 'classmethod': 'function',
- 'staticmethod': 'function',
- 'attribute': 'attribute',
- }
-
-class NumpyCDomain(ManglingDomainBase, CDomain):
- name = 'np-c'
- directive_mangling_map = {
- 'function': 'function',
- 'member': 'attribute',
- 'macro': 'function',
- 'type': 'class',
- 'var': 'object',
- }
-
-def wrap_mangling_directive(base_directive, objtype):
- class directive(base_directive):
- def run(self):
- env = self.state.document.settings.env
-
- name = None
- if self.arguments:
- m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
- name = m.group(2).strip()
-
- if not name:
- name = self.arguments[0]
-
- lines = list(self.content)
- mangle_docstrings(env.app, objtype, name, None, None, lines)
- self.content = ViewList(lines, self.content.parent)
-
- return base_directive.run(self)
-
- return directive
-
diff --git a/doc/ext/numpydoc/phantom_import.py b/doc/ext/numpydoc/phantom_import.py
deleted file mode 100755
index c77eeb5..0000000
--- a/doc/ext/numpydoc/phantom_import.py
+++ /dev/null
@@ -1,162 +0,0 @@
-"""
-==============
-phantom_import
-==============
-
-Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
-extensions to use docstrings loaded from an XML file.
-
-This extension loads an XML file in the Pydocweb format [1] and
-creates a dummy module that contains the specified docstrings. This
-can be used to get the current docstrings from a Pydocweb instance
-without needing to rebuild the documented module.
-
-.. [1] http://code.google.com/p/pydocweb
-
-"""
-import imp, sys, compiler, types, os, inspect, re
-
-def setup(app):
- app.connect('builder-inited', initialize)
- app.add_config_value('phantom_import_file', None, True)
-
-def initialize(app):
- fn = app.config.phantom_import_file
- if (fn and os.path.isfile(fn)):
- print "[numpydoc] Phantom importing modules from", fn, "..."
- import_phantom_module(fn)
-
-#------------------------------------------------------------------------------
-# Creating 'phantom' modules from an XML description
-#------------------------------------------------------------------------------
-def import_phantom_module(xml_file):
- """
- Insert a fake Python module to sys.modules, based on a XML file.
-
- The XML file is expected to conform to Pydocweb DTD. The fake
- module will contain dummy objects, which guarantee the following:
-
- - Docstrings are correct.
- - Class inheritance relationships are correct (if present in XML).
- - Function argspec is *NOT* correct (even if present in XML).
- Instead, the function signature is prepended to the function docstring.
- - Class attributes are *NOT* correct; instead, they are dummy objects.
-
- Parameters
- ----------
- xml_file : str
- Name of an XML file to read
-
- """
- import lxml.etree as etree
-
- object_cache = {}
-
- tree = etree.parse(xml_file)
- root = tree.getroot()
-
- # Sort items so that
- # - Base classes come before classes inherited from them
- # - Modules come before their contents
- all_nodes = dict([(n.attrib['id'], n) for n in root])
-
- def _get_bases(node, recurse=False):
- bases = [x.attrib['ref'] for x in node.findall('base')]
- if recurse:
- j = 0
- while True:
- try:
- b = bases[j]
- except IndexError: break
- if b in all_nodes:
- bases.extend(_get_bases(all_nodes[b]))
- j += 1
- return bases
-
- type_index = ['module', 'class', 'callable', 'object']
-
- def base_cmp(a, b):
- x = cmp(type_index.index(a.tag), type_index.index(b.tag))
- if x != 0: return x
-
- if a.tag == 'class' and b.tag == 'class':
- a_bases = _get_bases(a, recurse=True)
- b_bases = _get_bases(b, recurse=True)
- x = cmp(len(a_bases), len(b_bases))
- if x != 0: return x
- if a.attrib['id'] in b_bases: return -1
- if b.attrib['id'] in a_bases: return 1
-
- return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
-
- nodes = root.getchildren()
- nodes.sort(base_cmp)
-
- # Create phantom items
- for node in nodes:
- name = node.attrib['id']
- doc = (node.text or '').decode('string-escape') + "\n"
- if doc == "\n": doc = ""
-
- # create parent, if missing
- parent = name
- while True:
- parent = '.'.join(parent.split('.')[:-1])
- if not parent: break
- if parent in object_cache: break
- obj = imp.new_module(parent)
- object_cache[parent] = obj
- sys.modules[parent] = obj
-
- # create object
- if node.tag == 'module':
- obj = imp.new_module(name)
- obj.__doc__ = doc
- sys.modules[name] = obj
- elif node.tag == 'class':
- bases = [object_cache[b] for b in _get_bases(node)
- if b in object_cache]
- bases.append(object)
- init = lambda self: None
- init.__doc__ = doc
- obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
- obj.__name__ = name.split('.')[-1]
- elif node.tag == 'callable':
- funcname = node.attrib['id'].split('.')[-1]
- argspec = node.attrib.get('argspec')
- if argspec:
- argspec = re.sub('^[^(]*', '', argspec)
- doc = "%s%s\n\n%s" % (funcname, argspec, doc)
- obj = lambda: 0
- obj.__argspec_is_invalid_ = True
- obj.func_name = funcname
- obj.__name__ = name
- obj.__doc__ = doc
- if inspect.isclass(object_cache[parent]):
- obj.__objclass__ = object_cache[parent]
- else:
- class Dummy(object): pass
- obj = Dummy()
- obj.__name__ = name
- obj.__doc__ = doc
- if inspect.isclass(object_cache[parent]):
- obj.__get__ = lambda: None
- object_cache[name] = obj
-
- if parent:
- if inspect.ismodule(object_cache[parent]):
- obj.__module__ = parent
- setattr(object_cache[parent], name.split('.')[-1], obj)
-
- # Populate items
- for node in root:
- obj = object_cache.get(node.attrib['id'])
- if obj is None: continue
- for ref in node.findall('ref'):
- if node.tag == 'class':
- if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
- setattr(obj, ref.attrib['name'],
- object_cache.get(ref.attrib['ref']))
- else:
- setattr(obj, ref.attrib['name'],
- object_cache.get(ref.attrib['ref']))
diff --git a/doc/ext/numpydoc/plot_directive.py b/doc/ext/numpydoc/plot_directive.py
deleted file mode 100755
index 545d2e3..0000000
--- a/doc/ext/numpydoc/plot_directive.py
+++ /dev/null
@@ -1,619 +0,0 @@
-"""
-A special directive for generating a matplotlib plot.
-
-.. warning::
-
- This is a hacked version of plot_directive.py from Matplotlib.
- It's very much subject to change!
-
-
-Usage
------
-
-Can be used like this::
-
- .. plot:: examples/example.py
-
- .. plot::
-
- import matplotlib.pyplot as plt
- plt.plot([1,2,3], [4,5,6])
-
- .. plot::
-
- A plotting example:
-
- >>> import matplotlib.pyplot as plt
- >>> plt.plot([1,2,3], [4,5,6])
-
-The content is interpreted as doctest formatted if it has a line starting
-with ``>>>``.
-
-The ``plot`` directive supports the options
-
- format : {'python', 'doctest'}
- Specify the format of the input
-
- include-source : bool
- Whether to display the source code. Default can be changed in conf.py
-
-and the ``image`` directive options ``alt``, ``height``, ``width``,
-``scale``, ``align``, ``class``.
-
-Configuration options
----------------------
-
-The plot directive has the following configuration options:
-
- plot_include_source
- Default value for the include-source option
-
- plot_pre_code
- Code that should be executed before each plot.
-
- plot_basedir
- Base directory, to which plot:: file names are relative to.
- (If None or empty, file names are relative to the directoly where
- the file containing the directive is.)
-
- plot_formats
- File formats to generate. List of tuples or strings::
-
- [(suffix, dpi), suffix, ...]
-
- that determine the file format and the DPI. For entries whose
- DPI was omitted, sensible defaults are chosen.
-
- plot_html_show_formats
- Whether to show links to the files in HTML.
-
-TODO
-----
-
-* Refactor Latex output; now it's plain images, but it would be nice
- to make them appear side-by-side, or in floats.
-
-"""
-
-import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback
-import sphinx
-
-import warnings
-warnings.warn("A plot_directive module is also available under "
- "matplotlib.sphinxext; expect this numpydoc.plot_directive "
- "module to be deprecated after relevant features have been "
- "integrated there.",
- FutureWarning, stacklevel=2)
-
-
-#------------------------------------------------------------------------------
-# Registration hook
-#------------------------------------------------------------------------------
-
-def setup(app):
- setup.app = app
- setup.config = app.config
- setup.confdir = app.confdir
-
- app.add_config_value('plot_pre_code', '', True)
- app.add_config_value('plot_include_source', False, True)
- app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
- app.add_config_value('plot_basedir', None, True)
- app.add_config_value('plot_html_show_formats', True, True)
-
- app.add_directive('plot', plot_directive, True, (0, 1, False),
- **plot_directive_options)
-
-#------------------------------------------------------------------------------
-# plot:: directive
-#------------------------------------------------------------------------------
-from docutils.parsers.rst import directives
-from docutils import nodes
-
-def plot_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- return run(arguments, content, options, state_machine, state, lineno)
-plot_directive.__doc__ = __doc__
-
-def _option_boolean(arg):
- if not arg or not arg.strip():
- # no argument given, assume used as a flag
- return True
- elif arg.strip().lower() in ('no', '0', 'false'):
- return False
- elif arg.strip().lower() in ('yes', '1', 'true'):
- return True
- else:
- raise ValueError('"%s" unknown boolean' % arg)
-
-def _option_format(arg):
- return directives.choice(arg, ('python', 'lisp'))
-
-def _option_align(arg):
- return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
- "right"))
-
-plot_directive_options = {'alt': directives.unchanged,
- 'height': directives.length_or_unitless,
- 'width': directives.length_or_percentage_or_unitless,
- 'scale': directives.nonnegative_int,
- 'align': _option_align,
- 'class': directives.class_option,
- 'include-source': _option_boolean,
- 'format': _option_format,
- }
-
-#------------------------------------------------------------------------------
-# Generating output
-#------------------------------------------------------------------------------
-
-from docutils import nodes, utils
-
-try:
- # Sphinx depends on either Jinja or Jinja2
- import jinja2
- def format_template(template, **kw):
- return jinja2.Template(template).render(**kw)
-except ImportError:
- import jinja
- def format_template(template, **kw):
- return jinja.from_string(template, **kw)
-
-TEMPLATE = """
-{{ source_code }}
-
-{{ only_html }}
-
- {% if source_link or (html_show_formats and not multi_image) %}
- (
- {%- if source_link -%}
- `Source code <{{ source_link }}>`__
- {%- endif -%}
- {%- if html_show_formats and not multi_image -%}
- {%- for img in images -%}
- {%- for fmt in img.formats -%}
- {%- if source_link or not loop.first -%}, {% endif -%}
- `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
- {%- endfor -%}
- {%- endfor -%}
- {%- endif -%}
- )
- {% endif %}
-
- {% for img in images %}
- .. figure:: {{ build_dir }}/{{ img.basename }}.png
- {%- for option in options %}
- {{ option }}
- {% endfor %}
-
- {% if html_show_formats and multi_image -%}
- (
- {%- for fmt in img.formats -%}
- {%- if not loop.first -%}, {% endif -%}
- `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
- {%- endfor -%}
- )
- {%- endif -%}
- {% endfor %}
-
-{{ only_latex }}
-
- {% for img in images %}
- .. image:: {{ build_dir }}/{{ img.basename }}.pdf
- {% endfor %}
-
-"""
-
-class ImageFile(object):
- def __init__(self, basename, dirname):
- self.basename = basename
- self.dirname = dirname
- self.formats = []
-
- def filename(self, format):
- return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
-
- def filenames(self):
- return [self.filename(fmt) for fmt in self.formats]
-
-def run(arguments, content, options, state_machine, state, lineno):
- if arguments and content:
- raise RuntimeError("plot:: directive can't have both args and content")
-
- document = state_machine.document
- config = document.settings.env.config
-
- options.setdefault('include-source', config.plot_include_source)
-
- # determine input
- rst_file = document.attributes['source']
- rst_dir = os.path.dirname(rst_file)
-
- if arguments:
- if not config.plot_basedir:
- source_file_name = os.path.join(rst_dir,
- directives.uri(arguments[0]))
- else:
- source_file_name = os.path.join(setup.confdir, config.plot_basedir,
- directives.uri(arguments[0]))
- code = open(source_file_name, 'r').read()
- output_base = os.path.basename(source_file_name)
- else:
- source_file_name = rst_file
- code = textwrap.dedent("\n".join(map(str, content)))
- counter = document.attributes.get('_plot_counter', 0) + 1
- document.attributes['_plot_counter'] = counter
- base, ext = os.path.splitext(os.path.basename(source_file_name))
- output_base = '%s-%d.py' % (base, counter)
-
- base, source_ext = os.path.splitext(output_base)
- if source_ext in ('.py', '.rst', '.txt'):
- output_base = base
- else:
- source_ext = ''
-
- # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
- output_base = output_base.replace('.', '-')
-
- # is it in doctest format?
- is_doctest = contains_doctest(code)
- if options.has_key('format'):
- if options['format'] == 'python':
- is_doctest = False
- else:
- is_doctest = True
-
- # determine output directory name fragment
- source_rel_name = relpath(source_file_name, setup.confdir)
- source_rel_dir = os.path.dirname(source_rel_name)
- while source_rel_dir.startswith(os.path.sep):
- source_rel_dir = source_rel_dir[1:]
-
- # build_dir: where to place output files (temporarily)
- build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
- 'plot_directive',
- source_rel_dir)
- if not os.path.exists(build_dir):
- os.makedirs(build_dir)
-
- # output_dir: final location in the builder's directory
- dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
- source_rel_dir))
-
- # how to link to files from the RST file
- dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
- source_rel_dir).replace(os.path.sep, '/')
- build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
- source_link = dest_dir_link + '/' + output_base + source_ext
-
- # make figures
- try:
- results = makefig(code, source_file_name, build_dir, output_base,
- config)
- errors = []
- except PlotError, err:
- reporter = state.memo.reporter
- sm = reporter.system_message(
- 2, "Exception occurred in plotting %s: %s" % (output_base, err),
- line=lineno)
- results = [(code, [])]
- errors = [sm]
-
- # generate output restructuredtext
- total_lines = []
- for j, (code_piece, images) in enumerate(results):
- if options['include-source']:
- if is_doctest:
- lines = ['']
- lines += [row.rstrip() for row in code_piece.split('\n')]
- else:
- lines = ['.. code-block:: python', '']
- lines += [' %s' % row.rstrip()
- for row in code_piece.split('\n')]
- source_code = "\n".join(lines)
- else:
- source_code = ""
-
- opts = [':%s: %s' % (key, val) for key, val in options.items()
- if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
-
- only_html = ".. only:: html"
- only_latex = ".. only:: latex"
-
- if j == 0:
- src_link = source_link
- else:
- src_link = None
-
- result = format_template(
- TEMPLATE,
- dest_dir=dest_dir_link,
- build_dir=build_dir_link,
- source_link=src_link,
- multi_image=len(images) > 1,
- only_html=only_html,
- only_latex=only_latex,
- options=opts,
- images=images,
- source_code=source_code,
- html_show_formats=config.plot_html_show_formats)
-
- total_lines.extend(result.split("\n"))
- total_lines.extend("\n")
-
- if total_lines:
- state_machine.insert_input(total_lines, source=source_file_name)
-
- # copy image files to builder's output directory
- if not os.path.exists(dest_dir):
- os.makedirs(dest_dir)
-
- for code_piece, images in results:
- for img in images:
- for fn in img.filenames():
- shutil.copyfile(fn, os.path.join(dest_dir,
- os.path.basename(fn)))
-
- # copy script (if necessary)
- if source_file_name == rst_file:
- target_name = os.path.join(dest_dir, output_base + source_ext)
- f = open(target_name, 'w')
- f.write(unescape_doctest(code))
- f.close()
-
- return errors
-
-
-#------------------------------------------------------------------------------
-# Run code and capture figures
-#------------------------------------------------------------------------------
-
-import matplotlib
-matplotlib.use('Agg')
-import matplotlib.pyplot as plt
-import matplotlib.image as image
-from matplotlib import _pylab_helpers
-
-import exceptions
-
-def contains_doctest(text):
- try:
- # check if it's valid Python as-is
- compile(text, '<string>', 'exec')
- return False
- except SyntaxError:
- pass
- r = re.compile(r'^\s*>>>', re.M)
- m = r.search(text)
- return bool(m)
-
-def unescape_doctest(text):
- """
- Extract code from a piece of text, which contains either Python code
- or doctests.
-
- """
- if not contains_doctest(text):
- return text
-
- code = ""
- for line in text.split("\n"):
- m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
- if m:
- code += m.group(2) + "\n"
- elif line.strip():
- code += "# " + line.strip() + "\n"
- else:
- code += "\n"
- return code
-
-def split_code_at_show(text):
- """
- Split code at plt.show()
-
- """
-
- parts = []
- is_doctest = contains_doctest(text)
-
- part = []
- for line in text.split("\n"):
- if (not is_doctest and line.strip() == 'plt.show()') or \
- (is_doctest and line.strip() == '>>> plt.show()'):
- part.append(line)
- parts.append("\n".join(part))
- part = []
- else:
- part.append(line)
- if "\n".join(part).strip():
- parts.append("\n".join(part))
- return parts
-
-class PlotError(RuntimeError):
- pass
-
-def run_code(code, code_path, ns=None):
- # Change the working directory to the directory of the example, so
- # it can get at its data files, if any.
- pwd = os.getcwd()
- old_sys_path = list(sys.path)
- if code_path is not None:
- dirname = os.path.abspath(os.path.dirname(code_path))
- os.chdir(dirname)
- sys.path.insert(0, dirname)
-
- # Redirect stdout
- stdout = sys.stdout
- sys.stdout = cStringIO.StringIO()
-
- # Reset sys.argv
- old_sys_argv = sys.argv
- sys.argv = [code_path]
-
- try:
- try:
- code = unescape_doctest(code)
- if ns is None:
- ns = {}
- if not ns:
- exec setup.config.plot_pre_code in ns
- exec code in ns
- except (Exception, SystemExit), err:
- raise PlotError(traceback.format_exc())
- finally:
- os.chdir(pwd)
- sys.argv = old_sys_argv
- sys.path[:] = old_sys_path
- sys.stdout = stdout
- return ns
-
-
-#------------------------------------------------------------------------------
-# Generating figures
-#------------------------------------------------------------------------------
-
-def out_of_date(original, derived):
- """
- Returns True if derivative is out-of-date wrt original,
- both of which are full file paths.
- """
- return (not os.path.exists(derived)
- or os.stat(derived).st_mtime < os.stat(original).st_mtime)
-
-
-def makefig(code, code_path, output_dir, output_base, config):
- """
- Run a pyplot script *code* and save the images under *output_dir*
- with file names derived from *output_base*
-
- """
-
- # -- Parse format list
- default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
- formats = []
- for fmt in config.plot_formats:
- if isinstance(fmt, str):
- formats.append((fmt, default_dpi.get(fmt, 80)))
- elif type(fmt) in (tuple, list) and len(fmt)==2:
- formats.append((str(fmt[0]), int(fmt[1])))
- else:
- raise PlotError('invalid image format "%r" in plot_formats' % fmt)
-
- # -- Try to determine if all images already exist
-
- code_pieces = split_code_at_show(code)
-
- # Look for single-figure output files first
- all_exists = True
- img = ImageFile(output_base, output_dir)
- for format, dpi in formats:
- if out_of_date(code_path, img.filename(format)):
- all_exists = False
- break
- img.formats.append(format)
-
- if all_exists:
- return [(code, [img])]
-
- # Then look for multi-figure output files
- results = []
- all_exists = True
- for i, code_piece in enumerate(code_pieces):
- images = []
- for j in xrange(1000):
- img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
- for format, dpi in formats:
- if out_of_date(code_path, img.filename(format)):
- all_exists = False
- break
- img.formats.append(format)
-
- # assume that if we have one, we have them all
- if not all_exists:
- all_exists = (j > 0)
- break
- images.append(img)
- if not all_exists:
- break
- results.append((code_piece, images))
-
- if all_exists:
- return results
-
- # -- We didn't find the files, so build them
-
- results = []
- ns = {}
-
- for i, code_piece in enumerate(code_pieces):
- # Clear between runs
- plt.close('all')
-
- # Run code
- run_code(code_piece, code_path, ns)
-
- # Collect images
- images = []
- fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
- for j, figman in enumerate(fig_managers):
- if len(fig_managers) == 1 and len(code_pieces) == 1:
- img = ImageFile(output_base, output_dir)
- else:
- img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
- output_dir)
- images.append(img)
- for format, dpi in formats:
- try:
- figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
- except exceptions.BaseException, err:
- raise PlotError(traceback.format_exc())
- img.formats.append(format)
-
- # Results
- results.append((code_piece, images))
-
- return results
-
-
-#------------------------------------------------------------------------------
-# Relative pathnames
-#------------------------------------------------------------------------------
-
-try:
- from os.path import relpath
-except ImportError:
- def relpath(target, base=os.curdir):
- """
- Return a relative path to the target from either the current
- dir or an optional base dir. Base can be a directory
- specified either as absolute or relative to current dir.
- """
-
- if not os.path.exists(target):
- raise OSError, 'Target does not exist: '+target
-
- if not os.path.isdir(base):
- raise OSError, 'Base is not a directory or does not exist: '+base
-
- base_list = (os.path.abspath(base)).split(os.sep)
- target_list = (os.path.abspath(target)).split(os.sep)
-
- # On the windows platform the target may be on a completely
- # different drive from the base.
- if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
- raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
-
- # Starting from the filepath root, work out how much of the
- # filepath is shared by base and target.
- for i in range(min(len(base_list), len(target_list))):
- if base_list[i] <> target_list[i]: break
- else:
- # If we broke out of the loop, i is pointing to the first
- # differing path elements. If we didn't break out of the
- # loop, i is pointing to identical path elements.
- # Increment i so that in all cases it points to the first
- # differing path elements.
- i+=1
-
- rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
- return os.path.join(*rel_list)
diff --git a/doc/ext/numpydoc/traitsdoc.py b/doc/ext/numpydoc/traitsdoc.py
deleted file mode 100755
index 0fcf2c1..0000000
--- a/doc/ext/numpydoc/traitsdoc.py
+++ /dev/null
@@ -1,140 +0,0 @@
-"""
-=========
-traitsdoc
-=========
-
-Sphinx extension that handles docstrings in the Numpy standard format, [1]
-and support Traits [2].
-
-This extension can be used as a replacement for ``numpydoc`` when support
-for Traits is required.
-
-.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
-.. [2] http://code.enthought.com/projects/traits/
-
-"""
-
-import inspect
-import os
-import pydoc
-
-import docscrape
-import docscrape_sphinx
-from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString
-
-import numpydoc
-
-import comment_eater
-
-class SphinxTraitsDoc(SphinxClassDoc):
- def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
- if not inspect.isclass(cls):
- raise ValueError("Initialise using a class. Got %r" % cls)
- self._cls = cls
-
- if modulename and not modulename.endswith('.'):
- modulename += '.'
- self._mod = modulename
- self._name = cls.__name__
- self._func_doc = func_doc
-
- docstring = pydoc.getdoc(cls)
- docstring = docstring.split('\n')
-
- # De-indent paragraph
- try:
- indent = min(len(s) - len(s.lstrip()) for s in docstring
- if s.strip())
- except ValueError:
- indent = 0
-
- for n,line in enumerate(docstring):
- docstring[n] = docstring[n][indent:]
-
- self._doc = docscrape.Reader(docstring)
- self._parsed_data = {
- 'Signature': '',
- 'Summary': '',
- 'Description': [],
- 'Extended Summary': [],
- 'Parameters': [],
- 'Returns': [],
- 'Raises': [],
- 'Warns': [],
- 'Other Parameters': [],
- 'Traits': [],
- 'Methods': [],
- 'See Also': [],
- 'Notes': [],
- 'References': '',
- 'Example': '',
- 'Examples': '',
- 'index': {}
- }
-
- self._parse()
-
- def _str_summary(self):
- return self['Summary'] + ['']
-
- def _str_extended_summary(self):
- return self['Description'] + self['Extended Summary'] + ['']
-
- def __str__(self, indent=0, func_role="func"):
- out = []
- out += self._str_signature()
- out += self._str_index() + ['']
- out += self._str_summary()
- out += self._str_extended_summary()
- for param_list in ('Parameters', 'Traits', 'Methods',
- 'Returns','Raises'):
- out += self._str_param_list(param_list)
- out += self._str_see_also("obj")
- out += self._str_section('Notes')
- out += self._str_references()
- out += self._str_section('Example')
- out += self._str_section('Examples')
- out = self._str_indent(out,indent)
- return '\n'.join(out)
-
-def looks_like_issubclass(obj, classname):
- """ Return True if the object has a class or superclass with the given class
- name.
-
- Ignores old-style classes.
- """
- t = obj
- if t.__name__ == classname:
- return True
- for klass in t.__mro__:
- if klass.__name__ == classname:
- return True
- return False
-
-def get_doc_object(obj, what=None, config=None):
- if what is None:
- if inspect.isclass(obj):
- what = 'class'
- elif inspect.ismodule(obj):
- what = 'module'
- elif callable(obj):
- what = 'function'
- else:
- what = 'object'
- if what == 'class':
- doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config)
- if looks_like_issubclass(obj, 'HasTraits'):
- for name, trait, comment in comment_eater.get_class_traits(obj):
- # Exclude private traits.
- if not name.startswith('_'):
- doc['Traits'].append((name, trait, comment.splitlines()))
- return doc
- elif what in ('function', 'method'):
- return SphinxFunctionDoc(obj, '', config=config)
- else:
- return SphinxDocString(pydoc.getdoc(obj), config=config)
-
-def setup(app):
- # init numpydoc
- numpydoc.setup(app, get_doc_object)
-
diff --git a/doc/installation.rst b/doc/installation.rst
index 1d6840d..ff1cb24 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -24,10 +24,10 @@ Downloads
The latest stable version is available from PyPI:
-.. _lmfit-0.7.4.tar.gz (PyPI): http://pypi.python.org/packages/source/l/lmfit/lmfit-0.7.4.tar.gz
-.. _lmfit-0.7.4.win32-py2.6.exe (PyPI): http://pypi.python.org/packages/2.6/l/lmfit/lmfit-0.7.4.win32-py2.6.exe
-.. _lmfit-0.7.4.win32-py2.7.exe (PyPI): http://pypi.python.org/packages/2.7/l/lmfit/lmfit-0.7.4.win32-py2.7.exe
-.. _lmfit-0.7.4.win32-py3.2.exe (PyPI): http://pypi.python.org/packages/3.2/l/lmfit/lmfit-0.7.4.win32-py3.2.exe
+.. _lmfit-0.7.2.tar.gz (PyPI): http://pypi.python.org/packages/source/l/lmfit/lmfit-0.7.2.tar.gz
+.. _lmfit-0.7.2.win32-py2.6.exe (PyPI): http://pypi.python.org/packages/2.6/l/lmfit/lmfit-0.7.2.win32-py2.6.exe
+.. _lmfit-0.7.2.win32-py2.7.exe (PyPI): http://pypi.python.org/packages/2.7/l/lmfit/lmfit-0.7.2.win32-py2.7.exe
+.. _lmfit-0.7.2.win32-py3.2.exe (PyPI): http://pypi.python.org/packages/3.2/l/lmfit/lmfit-0.7.2.win32-py3.2.exe
.. _lmfit github repository: http://github.com/lmfit/lmfit-py
.. _lmfit at pypi: http://pypi.python.org/pypi/lmfit/
@@ -36,13 +36,13 @@ The latest stable version is available from PyPI:
+----------------------+------------------+--------------------------------------------+
| Download Option | Python Versions | Location |
+======================+==================+============================================+
-| Source Kit | 2.6, 2.7, 3.2 | - `lmfit-0.7.4.tar.gz (PyPI)`_ |
+| Source Kit | 2.6, 2.7, 3.2 | - `lmfit-0.7.2.tar.gz (PyPI)`_ |
+----------------------+------------------+--------------------------------------------+
-| Win32 Installer | 2.6 | - `lmfit-0.7.4.win32-py2.6.exe (PyPI)`_ |
+| Win32 Installer | 2.6 | - `lmfit-0.7.2.win32-py2.6.exe (PyPI)`_ |
+----------------------+------------------+--------------------------------------------+
-| Win32 Installer | 2.7 | - `lmfit-0.7.4.win32-py2.7.exe (PyPI)`_ |
+| Win32 Installer | 2.7 | - `lmfit-0.7.2.win32-py2.7.exe (PyPI)`_ |
+----------------------+------------------+--------------------------------------------+
-| Win32 Installer | 3.2 | - `lmfit-0.7.4.win32-py3.2.exe (PyPI)`_ |
+| Win32 Installer | 3.2 | - `lmfit-0.7.2.win32-py3.2.exe (PyPI)`_ |
+----------------------+------------------+--------------------------------------------+
| Development Version | all | use `lmfit github repository`_ |
+----------------------+------------------+--------------------------------------------+
diff --git a/doc/models1d.rst b/doc/models1d.rst
index 7040c0e..0bc95b0 100644
--- a/doc/models1d.rst
+++ b/doc/models1d.rst
@@ -19,7 +19,7 @@ Example
Let's start with a very simple example. We'll read data from a simple
datafile, and fit it to a Gaussian peak. A script to do this could be:
-.. literalinclude:: ../examples/model1d_doc1.py
+.. literalinclude:: ../tests/model1d_doc1.py
First, we read in the data for 'x' and 'y', then build a Gaussian model.
This 'model' contains all the Parameters for a Gaussian line shape. We
diff --git a/doc/parameters.rst b/doc/parameters.rst
index c65258e..91fae30 100644
--- a/doc/parameters.rst
+++ b/doc/parameters.rst
@@ -240,6 +240,6 @@ Simple Example
Putting it all together, a simple example of using a dictionary of
:class:`Parameter` objects and :func:`minimize` might look like this:
-.. literalinclude:: ../examples/simple.py
+.. literalinclude:: ../tests/simple.py
diff --git a/examples/NISTModels.py b/examples/NISTModels.py
new file mode 100644
index 0000000..03f6850
--- /dev/null
+++ b/examples/NISTModels.py
@@ -0,0 +1,196 @@
+import os
+from numpy import exp, log, log10, sin, cos, arctan, array
+from lmfit import Parameters
+NIST_DIR = 'NIST_STRD'
+
+def read_params(params):
+ if isinstance(params, Parameters):
+ return [par.value for par in params.values()]
+ else:
+ return params
+
+def Bennet5(b, x, y=0):
+ b = read_params(b)
+ return y - b[0] * (b[1]+x)**(-1/b[2])
+
+def BoxBOD(b, x, y=0):
+ b = read_params(b)
+ return y - b[0]*(1-exp(-b[1]*x))
+
+def Chwirut(b, x, y=0):
+ b = read_params(b)
+ return y - exp(-b[0]*x)/(b[1]+b[2]*x)
+
+def DanWood(b, x, y=0):
+ b = read_params(b)
+ return y - b[0]*x**b[1]
+
+def ENSO(b, x, y=0):
+ b = read_params(b)
+ pi = 3.141592653589793238462643383279
+
+ return y - b[0] + (b[1]*cos( 2*pi*x/12 ) + b[2]*sin( 2*pi*x/12 ) +
+ b[4]*cos( 2*pi*x/b[3] ) + b[5]*sin( 2*pi*x/b[3] ) +
+ b[7]*cos( 2*pi*x/b[6] ) + b[8]*sin( 2*pi*x/b[6] ) )
+
+def Eckerle4(b, x, y=0):
+ b = read_params(b)
+ return y - (b[0]/b[1]) * exp(-0.5*((x-b[2])/b[1])**2)
+
+def Gauss(b, x, y=0):
+ b = read_params(b)
+ return y - b[0]*exp( -b[1]*x ) + (b[2]*exp( -(x-b[3])**2 / b[4]**2 ) +
+ b[5]*exp( -(x-b[6])**2 / b[7]**2 ) )
+
+def Hahn1(b, x, y=0):
+ b = read_params(b)
+ return y - ((b[0]+b[1]*x+b[2]*x**2+b[3]*x**3) /
+ (1+b[4]*x+b[5]*x**2+b[6]*x**3) )
+
+def Kirby(b, x, y=0):
+ b = read_params(b)
+ return y - (b[0] + b[1]*x + b[2]*x**2) / (1 + b[3]*x + b[4]*x**2)
+
+def Lanczos(b, x, y=0):
+ b = read_params(b)
+ return y - b[0]*exp(-b[1]*x) + b[2]*exp(-b[3]*x) + b[4]*exp(-b[5]*x)
+
+def MGH09(b, x, y=0):
+ b = read_params(b)
+ return y - b[0]*(x**2+x*b[1]) / (x**2+x*b[2]+b[3])
+
+def MGH10(b, x, y=0):
+ b = read_params(b)
+ return y - b[0] * exp( b[1]/(x+b[2]) )
+
+def MGH17(b, x, y=0):
+ b = read_params(b)
+ return y - b[0] + b[1]*exp(-x*b[3]) + b[2]*exp(-x*b[4])
+
+def Misra1a(b, x, y=0):
+ b = read_params(b)
+ return y - b[0]*(1-exp(-b[1]*x))
+
+def Misra1b(b, x, y=0):
+ b = read_params(b)
+ return y - b[0] * (1-(1+b[1]*x/2)**(-2))
+
+def Misra1c(b, x, y=0):
+ b = read_params(b)
+ return y - b[0] * (1-(1+2*b[1]*x)**(-.5))
+
+def Misra1d(b, x, y=0):
+ b = read_params(b)
+ return y - b[0]*b[1]*x*((1+b[1]*x)**(-1))
+
+def Nelson(b, x, y=None):
+ b = read_params(b)
+ x1 = x[:,0]
+ x2 = x[:,1]
+ if y is None:
+ return - exp(b[0] - b[1]*x1 * exp(-b[2]*x2))
+ return log(y) - (b[0] - b[1]*x1 * exp(-b[2]*x2) )
+
+def Rat42(b, x, y=0):
+ b = read_params(b)
+ return y - b[0] / (1+exp(b[1]-b[2]*x))
+
+def Rat43(b, x, y=0):
+ b = read_params(b)
+ return y - b[0] / ((1+exp(b[1]-b[2]*x))**(1/b[3]))
+
+def Roszman1(b, x, y=0):
+ b = read_params(b)
+ pi = 3.141592653589793238462643383279
+ return y - b[0] - b[1]*x - arctan(b[2]/(x-b[3]))/pi
+
+def Thurber(b, x, y=0):
+ b = read_params(b)
+ return y - ( (b[0] + b[1]*x + b[2]*x**2 + b[3]*x**3) /
+ (1 + b[4]*x + b[5]*x**2 + b[6]*x**3) )
+
+# Model name fcn, #fitting params, dim of x
+Models = {'Bennett5': (Bennet5, 3, 1),
+ 'BoxBOD': (BoxBOD, 2, 1),
+ 'Chwirut1': (Chwirut, 3, 1),
+ 'Chwirut2': (Chwirut, 3, 1),
+ 'DanWood': (DanWood, 2, 1),
+ 'ENSO': (ENSO, 9, 1),
+ 'Eckerle4': (Eckerle4, 3, 1),
+ 'Gauss1': (Gauss, 8, 1),
+ 'Gauss2': (Gauss, 8, 1),
+ 'Gauss3': (Gauss, 8, 1),
+ 'Hahn1': (Hahn1, 7, 1),
+ 'Kirby2': (Kirby, 5, 1),
+ 'Lanczos1': (Lanczos, 6, 1),
+ 'Lanczos2': (Lanczos, 6, 1),
+ 'Lanczos3': (Lanczos, 6, 1),
+ 'MGH09': (MGH09, 4, 1),
+ 'MGH10': (MGH10, 3, 1),
+ 'MGH17': (MGH17, 5, 1),
+ 'Misra1a': (Misra1a, 2, 1),
+ 'Misra1b' : (Misra1b, 2, 1),
+ 'Misra1c' : (Misra1c, 2, 1),
+ 'Misra1d' : (Misra1d, 2, 1),
+ 'Nelson': (Nelson, 3, 2),
+ 'Rat42': (Rat42, 3, 1),
+ 'Rat43': (Rat43, 4, 1),
+ 'Roszman1': (Roszman1, 4, 1),
+ 'Thurber': (Thurber, 7, 1) }
+
+def ReadNistData(dataset):
+ """NIST STRD data is in a simple, fixed format with
+ line numbers being significant!
+ """
+ finp = open(os.path.join(NIST_DIR, "%s.dat" % dataset), 'r')
+ lines = [l[:-1] for l in finp.readlines()]
+ finp.close()
+ ModelLines = lines[30:39]
+ ParamLines = lines[40:58]
+ DataLines = lines[60:]
+
+ words = ModelLines[1].strip().split()
+ nparams = int(words[0])
+
+ start1 = [0]*nparams
+ start2 = [0]*nparams
+ certval = [0]*nparams
+ certerr = [0]*nparams
+ for i, text in enumerate(ParamLines[:nparams]):
+ [s1, s2, val, err] = [float(x) for x in text.split('=')[1].split()]
+ start1[i] = s1
+ start2[i] = s2
+ certval[i] = val
+ certerr[i] = err
+
+ #
+ for t in ParamLines[nparams:]:
+ t = t.strip()
+ if ':' not in t:
+ continue
+ val = float(t.split(':')[1])
+ if t.startswith('Residual Sum of Squares'):
+ sum_squares = val
+ elif t.startswith('Residual Standard Deviation'):
+ std_dev = val
+ elif t.startswith('Degrees of Freedom'):
+ nfree = int(val)
+ elif t.startswith('Number of Observations'):
+ ndata = int(val)
+
+ y, x = [], []
+ for d in DataLines:
+ vals = [float(i) for i in d.strip().split()]
+ y.append(vals[0])
+ if len(vals) > 2:
+ x.append(vals[1:])
+ else:
+ x.append(vals[1])
+
+ y = array(y)
+ x = array(x)
+ out = {'y': y, 'x': x, 'nparams': nparams, 'ndata': ndata,
+ 'nfree': nfree, 'start1': start1, 'start2': start2,
+ 'sum_squares': sum_squares, 'std_dev': std_dev,
+ 'cert_values': certval, 'cert_stderr': certerr }
+ return out
diff --git a/examples/NIST_STRD/Bennett5.dat b/examples/NIST_STRD/Bennett5.dat
new file mode 100644
index 0000000..51335f4
--- /dev/null
+++ b/examples/NIST_STRD/Bennett5.dat
@@ -0,0 +1,214 @@
+NIST/ITL StRD
+Dataset Name: Bennett5 (Bennett5.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 214)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ superconductivity magnetization modeling. The
+ response variable is magnetism, and the predictor
+ variable is the log of time in minutes.
+
+Reference: Bennett, L., L. Swartzendruber, and H. Brown,
+ NIST (1994).
+ Superconductivity Magnetization Modeling.
+
+
+
+
+
+
+Data: 1 Response Variable (y = magnetism)
+ 1 Predictor Variable (x = log[time])
+ 154 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 3 Parameters (b1 to b3)
+
+ y = b1 * (b2+x)**(-1/b3) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = -2000 -1500 -2.5235058043E+03 2.9715175411E+02
+ b2 = 50 45 4.6736564644E+01 1.2448871856E+00
+ b3 = 0.8 0.85 9.3218483193E-01 2.0272299378E-02
+
+Residual Sum of Squares: 5.2404744073E-04
+Residual Standard Deviation: 1.8629312528E-03
+Degrees of Freedom: 151
+Number of Observations: 154
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ -34.834702E0 7.447168E0
+ -34.393200E0 8.102586E0
+ -34.152901E0 8.452547E0
+ -33.979099E0 8.711278E0
+ -33.845901E0 8.916774E0
+ -33.732899E0 9.087155E0
+ -33.640301E0 9.232590E0
+ -33.559200E0 9.359535E0
+ -33.486801E0 9.472166E0
+ -33.423100E0 9.573384E0
+ -33.365101E0 9.665293E0
+ -33.313000E0 9.749461E0
+ -33.260899E0 9.827092E0
+ -33.217400E0 9.899128E0
+ -33.176899E0 9.966321E0
+ -33.139198E0 10.029280E0
+ -33.101601E0 10.088510E0
+ -33.066799E0 10.144430E0
+ -33.035000E0 10.197380E0
+ -33.003101E0 10.247670E0
+ -32.971298E0 10.295560E0
+ -32.942299E0 10.341250E0
+ -32.916302E0 10.384950E0
+ -32.890202E0 10.426820E0
+ -32.864101E0 10.467000E0
+ -32.841000E0 10.505640E0
+ -32.817799E0 10.542830E0
+ -32.797501E0 10.578690E0
+ -32.774300E0 10.613310E0
+ -32.757000E0 10.646780E0
+ -32.733799E0 10.679150E0
+ -32.716400E0 10.710520E0
+ -32.699100E0 10.740920E0
+ -32.678799E0 10.770440E0
+ -32.661400E0 10.799100E0
+ -32.644001E0 10.826970E0
+ -32.626701E0 10.854080E0
+ -32.612202E0 10.880470E0
+ -32.597698E0 10.906190E0
+ -32.583199E0 10.931260E0
+ -32.568699E0 10.955720E0
+ -32.554298E0 10.979590E0
+ -32.539799E0 11.002910E0
+ -32.525299E0 11.025700E0
+ -32.510799E0 11.047980E0
+ -32.499199E0 11.069770E0
+ -32.487598E0 11.091100E0
+ -32.473202E0 11.111980E0
+ -32.461601E0 11.132440E0
+ -32.435501E0 11.152480E0
+ -32.435501E0 11.172130E0
+ -32.426800E0 11.191410E0
+ -32.412300E0 11.210310E0
+ -32.400799E0 11.228870E0
+ -32.392101E0 11.247090E0
+ -32.380501E0 11.264980E0
+ -32.366001E0 11.282560E0
+ -32.357300E0 11.299840E0
+ -32.348598E0 11.316820E0
+ -32.339901E0 11.333520E0
+ -32.328400E0 11.349940E0
+ -32.319698E0 11.366100E0
+ -32.311001E0 11.382000E0
+ -32.299400E0 11.397660E0
+ -32.290699E0 11.413070E0
+ -32.282001E0 11.428240E0
+ -32.273300E0 11.443200E0
+ -32.264599E0 11.457930E0
+ -32.256001E0 11.472440E0
+ -32.247299E0 11.486750E0
+ -32.238602E0 11.500860E0
+ -32.229900E0 11.514770E0
+ -32.224098E0 11.528490E0
+ -32.215401E0 11.542020E0
+ -32.203800E0 11.555380E0
+ -32.198002E0 11.568550E0
+ -32.189400E0 11.581560E0
+ -32.183601E0 11.594420E0
+ -32.174900E0 11.607121E0
+ -32.169102E0 11.619640E0
+ -32.163300E0 11.632000E0
+ -32.154598E0 11.644210E0
+ -32.145901E0 11.656280E0
+ -32.140099E0 11.668200E0
+ -32.131401E0 11.679980E0
+ -32.125599E0 11.691620E0
+ -32.119801E0 11.703130E0
+ -32.111198E0 11.714510E0
+ -32.105400E0 11.725760E0
+ -32.096699E0 11.736880E0
+ -32.090900E0 11.747890E0
+ -32.088001E0 11.758780E0
+ -32.079300E0 11.769550E0
+ -32.073502E0 11.780200E0
+ -32.067699E0 11.790730E0
+ -32.061901E0 11.801160E0
+ -32.056099E0 11.811480E0
+ -32.050301E0 11.821700E0
+ -32.044498E0 11.831810E0
+ -32.038799E0 11.841820E0
+ -32.033001E0 11.851730E0
+ -32.027199E0 11.861550E0
+ -32.024300E0 11.871270E0
+ -32.018501E0 11.880890E0
+ -32.012699E0 11.890420E0
+ -32.004002E0 11.899870E0
+ -32.001099E0 11.909220E0
+ -31.995300E0 11.918490E0
+ -31.989500E0 11.927680E0
+ -31.983700E0 11.936780E0
+ -31.977900E0 11.945790E0
+ -31.972099E0 11.954730E0
+ -31.969299E0 11.963590E0
+ -31.963501E0 11.972370E0
+ -31.957701E0 11.981070E0
+ -31.951900E0 11.989700E0
+ -31.946100E0 11.998260E0
+ -31.940300E0 12.006740E0
+ -31.937401E0 12.015150E0
+ -31.931601E0 12.023490E0
+ -31.925800E0 12.031760E0
+ -31.922899E0 12.039970E0
+ -31.917101E0 12.048100E0
+ -31.911301E0 12.056170E0
+ -31.908400E0 12.064180E0
+ -31.902599E0 12.072120E0
+ -31.896900E0 12.080010E0
+ -31.893999E0 12.087820E0
+ -31.888201E0 12.095580E0
+ -31.885300E0 12.103280E0
+ -31.882401E0 12.110920E0
+ -31.876600E0 12.118500E0
+ -31.873699E0 12.126030E0
+ -31.867901E0 12.133500E0
+ -31.862101E0 12.140910E0
+ -31.859200E0 12.148270E0
+ -31.856300E0 12.155570E0
+ -31.850500E0 12.162830E0
+ -31.844700E0 12.170030E0
+ -31.841801E0 12.177170E0
+ -31.838900E0 12.184270E0
+ -31.833099E0 12.191320E0
+ -31.830200E0 12.198320E0
+ -31.827299E0 12.205270E0
+ -31.821600E0 12.212170E0
+ -31.818701E0 12.219030E0
+ -31.812901E0 12.225840E0
+ -31.809999E0 12.232600E0
+ -31.807100E0 12.239320E0
+ -31.801300E0 12.245990E0
+ -31.798401E0 12.252620E0
+ -31.795500E0 12.259200E0
+ -31.789700E0 12.265750E0
+ -31.786800E0 12.272240E0
diff --git a/examples/NIST_STRD/BoxBOD.dat b/examples/NIST_STRD/BoxBOD.dat
new file mode 100644
index 0000000..49163c7
--- /dev/null
+++ b/examples/NIST_STRD/BoxBOD.dat
@@ -0,0 +1,66 @@
+NIST/ITL StRD
+Dataset Name: BoxBOD (BoxBOD.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 66)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are described in detail in Box, Hunter and
+ Hunter (1978). The response variable is biochemical
+ oxygen demand (BOD) in mg/l, and the predictor
+ variable is incubation time in days.
+
+
+Reference: Box, G. P., W. G. Hunter, and J. S. Hunter (1978).
+ Statistics for Experimenters.
+ New York, NY: Wiley, pp. 483-487.
+
+
+
+
+
+Data: 1 Response (y = biochemical oxygen demand)
+ 1 Predictor (x = incubation time)
+ 6 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 2 Parameters (b1 and b2)
+
+ y = b1*(1-exp[-b2*x]) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1 100 2.1380940889E+02 1.2354515176E+01
+ b2 = 1 0.75 5.4723748542E-01 1.0455993237E-01
+
+Residual Sum of Squares: 1.1680088766E+03
+Residual Standard Deviation: 1.7088072423E+01
+Degrees of Freedom: 4
+Number of Observations: 6
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 109 1
+ 149 2
+ 149 3
+ 191 5
+ 213 7
+ 224 10
diff --git a/examples/NIST_STRD/Chwirut1.dat b/examples/NIST_STRD/Chwirut1.dat
new file mode 100644
index 0000000..5e72e4e
--- /dev/null
+++ b/examples/NIST_STRD/Chwirut1.dat
@@ -0,0 +1,274 @@
+NIST/ITL StRD
+Dataset Name: Chwirut1 (Chwirut1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 274)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ ultrasonic calibration. The response variable is
+ ultrasonic response, and the predictor variable is
+ metal distance.
+
+Reference: Chwirut, D., NIST (197?).
+ Ultrasonic Reference Block Study.
+
+
+
+
+
+
+
+Data: 1 Response Variable (y = ultrasonic response)
+ 1 Predictor Variable (x = metal distance)
+ 214 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = exp[-b1*x]/(b2+b3*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 0.1 0.15 1.9027818370E-01 2.1938557035E-02
+ b2 = 0.01 0.008 6.1314004477E-03 3.4500025051E-04
+ b3 = 0.02 0.010 1.0530908399E-02 7.9281847748E-04
+
+Residual Sum of Squares: 2.3844771393E+03
+Residual Standard Deviation: 3.3616721320E+00
+Degrees of Freedom: 211
+Number of Observations: 214
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 92.9000E0 0.5000E0
+ 78.7000E0 0.6250E0
+ 64.2000E0 0.7500E0
+ 64.9000E0 0.8750E0
+ 57.1000E0 1.0000E0
+ 43.3000E0 1.2500E0
+ 31.1000E0 1.7500E0
+ 23.6000E0 2.2500E0
+ 31.0500E0 1.7500E0
+ 23.7750E0 2.2500E0
+ 17.7375E0 2.7500E0
+ 13.8000E0 3.2500E0
+ 11.5875E0 3.7500E0
+ 9.4125E0 4.2500E0
+ 7.7250E0 4.7500E0
+ 7.3500E0 5.2500E0
+ 8.0250E0 5.7500E0
+ 90.6000E0 0.5000E0
+ 76.9000E0 0.6250E0
+ 71.6000E0 0.7500E0
+ 63.6000E0 0.8750E0
+ 54.0000E0 1.0000E0
+ 39.2000E0 1.2500E0
+ 29.3000E0 1.7500E0
+ 21.4000E0 2.2500E0
+ 29.1750E0 1.7500E0
+ 22.1250E0 2.2500E0
+ 17.5125E0 2.7500E0
+ 14.2500E0 3.2500E0
+ 9.4500E0 3.7500E0
+ 9.1500E0 4.2500E0
+ 7.9125E0 4.7500E0
+ 8.4750E0 5.2500E0
+ 6.1125E0 5.7500E0
+ 80.0000E0 0.5000E0
+ 79.0000E0 0.6250E0
+ 63.8000E0 0.7500E0
+ 57.2000E0 0.8750E0
+ 53.2000E0 1.0000E0
+ 42.5000E0 1.2500E0
+ 26.8000E0 1.7500E0
+ 20.4000E0 2.2500E0
+ 26.8500E0 1.7500E0
+ 21.0000E0 2.2500E0
+ 16.4625E0 2.7500E0
+ 12.5250E0 3.2500E0
+ 10.5375E0 3.7500E0
+ 8.5875E0 4.2500E0
+ 7.1250E0 4.7500E0
+ 6.1125E0 5.2500E0
+ 5.9625E0 5.7500E0
+ 74.1000E0 0.5000E0
+ 67.3000E0 0.6250E0
+ 60.8000E0 0.7500E0
+ 55.5000E0 0.8750E0
+ 50.3000E0 1.0000E0
+ 41.0000E0 1.2500E0
+ 29.4000E0 1.7500E0
+ 20.4000E0 2.2500E0
+ 29.3625E0 1.7500E0
+ 21.1500E0 2.2500E0
+ 16.7625E0 2.7500E0
+ 13.2000E0 3.2500E0
+ 10.8750E0 3.7500E0
+ 8.1750E0 4.2500E0
+ 7.3500E0 4.7500E0
+ 5.9625E0 5.2500E0
+ 5.6250E0 5.7500E0
+ 81.5000E0 .5000E0
+ 62.4000E0 .7500E0
+ 32.5000E0 1.5000E0
+ 12.4100E0 3.0000E0
+ 13.1200E0 3.0000E0
+ 15.5600E0 3.0000E0
+ 5.6300E0 6.0000E0
+ 78.0000E0 .5000E0
+ 59.9000E0 .7500E0
+ 33.2000E0 1.5000E0
+ 13.8400E0 3.0000E0
+ 12.7500E0 3.0000E0
+ 14.6200E0 3.0000E0
+ 3.9400E0 6.0000E0
+ 76.8000E0 .5000E0
+ 61.0000E0 .7500E0
+ 32.9000E0 1.5000E0
+ 13.8700E0 3.0000E0
+ 11.8100E0 3.0000E0
+ 13.3100E0 3.0000E0
+ 5.4400E0 6.0000E0
+ 78.0000E0 .5000E0
+ 63.5000E0 .7500E0
+ 33.8000E0 1.5000E0
+ 12.5600E0 3.0000E0
+ 5.6300E0 6.0000E0
+ 12.7500E0 3.0000E0
+ 13.1200E0 3.0000E0
+ 5.4400E0 6.0000E0
+ 76.8000E0 .5000E0
+ 60.0000E0 .7500E0
+ 47.8000E0 1.0000E0
+ 32.0000E0 1.5000E0
+ 22.2000E0 2.0000E0
+ 22.5700E0 2.0000E0
+ 18.8200E0 2.5000E0
+ 13.9500E0 3.0000E0
+ 11.2500E0 4.0000E0
+ 9.0000E0 5.0000E0
+ 6.6700E0 6.0000E0
+ 75.8000E0 .5000E0
+ 62.0000E0 .7500E0
+ 48.8000E0 1.0000E0
+ 35.2000E0 1.5000E0
+ 20.0000E0 2.0000E0
+ 20.3200E0 2.0000E0
+ 19.3100E0 2.5000E0
+ 12.7500E0 3.0000E0
+ 10.4200E0 4.0000E0
+ 7.3100E0 5.0000E0
+ 7.4200E0 6.0000E0
+ 70.5000E0 .5000E0
+ 59.5000E0 .7500E0
+ 48.5000E0 1.0000E0
+ 35.8000E0 1.5000E0
+ 21.0000E0 2.0000E0
+ 21.6700E0 2.0000E0
+ 21.0000E0 2.5000E0
+ 15.6400E0 3.0000E0
+ 8.1700E0 4.0000E0
+ 8.5500E0 5.0000E0
+ 10.1200E0 6.0000E0
+ 78.0000E0 .5000E0
+ 66.0000E0 .6250E0
+ 62.0000E0 .7500E0
+ 58.0000E0 .8750E0
+ 47.7000E0 1.0000E0
+ 37.8000E0 1.2500E0
+ 20.2000E0 2.2500E0
+ 21.0700E0 2.2500E0
+ 13.8700E0 2.7500E0
+ 9.6700E0 3.2500E0
+ 7.7600E0 3.7500E0
+ 5.4400E0 4.2500E0
+ 4.8700E0 4.7500E0
+ 4.0100E0 5.2500E0
+ 3.7500E0 5.7500E0
+ 24.1900E0 3.0000E0
+ 25.7600E0 3.0000E0
+ 18.0700E0 3.0000E0
+ 11.8100E0 3.0000E0
+ 12.0700E0 3.0000E0
+ 16.1200E0 3.0000E0
+ 70.8000E0 .5000E0
+ 54.7000E0 .7500E0
+ 48.0000E0 1.0000E0
+ 39.8000E0 1.5000E0
+ 29.8000E0 2.0000E0
+ 23.7000E0 2.5000E0
+ 29.6200E0 2.0000E0
+ 23.8100E0 2.5000E0
+ 17.7000E0 3.0000E0
+ 11.5500E0 4.0000E0
+ 12.0700E0 5.0000E0
+ 8.7400E0 6.0000E0
+ 80.7000E0 .5000E0
+ 61.3000E0 .7500E0
+ 47.5000E0 1.0000E0
+ 29.0000E0 1.5000E0
+ 24.0000E0 2.0000E0
+ 17.7000E0 2.5000E0
+ 24.5600E0 2.0000E0
+ 18.6700E0 2.5000E0
+ 16.2400E0 3.0000E0
+ 8.7400E0 4.0000E0
+ 7.8700E0 5.0000E0
+ 8.5100E0 6.0000E0
+ 66.7000E0 .5000E0
+ 59.2000E0 .7500E0
+ 40.8000E0 1.0000E0
+ 30.7000E0 1.5000E0
+ 25.7000E0 2.0000E0
+ 16.3000E0 2.5000E0
+ 25.9900E0 2.0000E0
+ 16.9500E0 2.5000E0
+ 13.3500E0 3.0000E0
+ 8.6200E0 4.0000E0
+ 7.2000E0 5.0000E0
+ 6.6400E0 6.0000E0
+ 13.6900E0 3.0000E0
+ 81.0000E0 .5000E0
+ 64.5000E0 .7500E0
+ 35.5000E0 1.5000E0
+ 13.3100E0 3.0000E0
+ 4.8700E0 6.0000E0
+ 12.9400E0 3.0000E0
+ 5.0600E0 6.0000E0
+ 15.1900E0 3.0000E0
+ 14.6200E0 3.0000E0
+ 15.6400E0 3.0000E0
+ 25.5000E0 1.7500E0
+ 25.9500E0 1.7500E0
+ 81.7000E0 .5000E0
+ 61.6000E0 .7500E0
+ 29.8000E0 1.7500E0
+ 29.8100E0 1.7500E0
+ 17.1700E0 2.7500E0
+ 10.3900E0 3.7500E0
+ 28.4000E0 1.7500E0
+ 28.6900E0 1.7500E0
+ 81.3000E0 .5000E0
+ 60.9000E0 .7500E0
+ 16.6500E0 2.7500E0
+ 10.0500E0 3.7500E0
+ 28.9000E0 1.7500E0
+ 28.9500E0 1.7500E0
diff --git a/examples/NIST_STRD/Chwirut2.dat b/examples/NIST_STRD/Chwirut2.dat
new file mode 100644
index 0000000..0651faa
--- /dev/null
+++ b/examples/NIST_STRD/Chwirut2.dat
@@ -0,0 +1,114 @@
+NIST/ITL StRD
+Dataset Name: Chwirut2 (Chwirut2.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 114)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ ultrasonic calibration. The response variable is
+ ultrasonic response, and the predictor variable is
+ metal distance.
+
+
+
+Reference: Chwirut, D., NIST (197?).
+ Ultrasonic Reference Block Study.
+
+
+
+
+
+Data: 1 Response (y = ultrasonic response)
+ 1 Predictor (x = metal distance)
+ 54 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = exp(-b1*x)/(b2+b3*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 0.1 0.15 1.6657666537E-01 3.8303286810E-02
+ b2 = 0.01 0.008 5.1653291286E-03 6.6621605126E-04
+ b3 = 0.02 0.010 1.2150007096E-02 1.5304234767E-03
+
+Residual Sum of Squares: 5.1304802941E+02
+Residual Standard Deviation: 3.1717133040E+00
+Degrees of Freedom: 51
+Number of Observations: 54
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 92.9000E0 0.500E0
+ 57.1000E0 1.000E0
+ 31.0500E0 1.750E0
+ 11.5875E0 3.750E0
+ 8.0250E0 5.750E0
+ 63.6000E0 0.875E0
+ 21.4000E0 2.250E0
+ 14.2500E0 3.250E0
+ 8.4750E0 5.250E0
+ 63.8000E0 0.750E0
+ 26.8000E0 1.750E0
+ 16.4625E0 2.750E0
+ 7.1250E0 4.750E0
+ 67.3000E0 0.625E0
+ 41.0000E0 1.250E0
+ 21.1500E0 2.250E0
+ 8.1750E0 4.250E0
+ 81.5000E0 .500E0
+ 13.1200E0 3.000E0
+ 59.9000E0 .750E0
+ 14.6200E0 3.000E0
+ 32.9000E0 1.500E0
+ 5.4400E0 6.000E0
+ 12.5600E0 3.000E0
+ 5.4400E0 6.000E0
+ 32.0000E0 1.500E0
+ 13.9500E0 3.000E0
+ 75.8000E0 .500E0
+ 20.0000E0 2.000E0
+ 10.4200E0 4.000E0
+ 59.5000E0 .750E0
+ 21.6700E0 2.000E0
+ 8.5500E0 5.000E0
+ 62.0000E0 .750E0
+ 20.2000E0 2.250E0
+ 7.7600E0 3.750E0
+ 3.7500E0 5.750E0
+ 11.8100E0 3.000E0
+ 54.7000E0 .750E0
+ 23.7000E0 2.500E0
+ 11.5500E0 4.000E0
+ 61.3000E0 .750E0
+ 17.7000E0 2.500E0
+ 8.7400E0 4.000E0
+ 59.2000E0 .750E0
+ 16.3000E0 2.500E0
+ 8.6200E0 4.000E0
+ 81.0000E0 .500E0
+ 4.8700E0 6.000E0
+ 14.6200E0 3.000E0
+ 81.7000E0 .500E0
+ 17.1700E0 2.750E0
+ 81.3000E0 .500E0
+ 28.9000E0 1.750E0
diff --git a/examples/NIST_STRD/DanWood.dat b/examples/NIST_STRD/DanWood.dat
new file mode 100644
index 0000000..317f6a7
--- /dev/null
+++ b/examples/NIST_STRD/DanWood.dat
@@ -0,0 +1,66 @@
+NIST/ITL StRD
+Dataset Name: DanWood (DanWood.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 66)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data and model are described in Daniel and Wood
+ (1980), and originally published in E.S.Keeping,
+ "Introduction to Statistical Inference," Van Nostrand
+ Company, Princeton, NJ, 1962, p. 354. The response
+ variable is energy radieted from a carbon filament
+ lamp per cm**2 per second, and the predictor variable
+ is the absolute temperature of the filament in 1000
+ degrees Kelvin.
+
+Reference: Daniel, C. and F. S. Wood (1980).
+ Fitting Equations to Data, Second Edition.
+ New York, NY: John Wiley and Sons, pp. 428-431.
+
+
+Data: 1 Response Variable (y = energy)
+ 1 Predictor Variable (x = temperature)
+ 6 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 2 Parameters (b1 and b2)
+
+ y = b1*x**b2 + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1 0.7 7.6886226176E-01 1.8281973860E-02
+ b2 = 5 4 3.8604055871E+00 5.1726610913E-02
+
+Residual Sum of Squares: 4.3173084083E-03
+Residual Standard Deviation: 3.2853114039E-02
+Degrees of Freedom: 4
+Number of Observations: 6
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 2.138E0 1.309E0
+ 3.421E0 1.471E0
+ 3.597E0 1.490E0
+ 4.340E0 1.565E0
+ 4.882E0 1.611E0
+ 5.660E0 1.680E0
diff --git a/examples/NIST_STRD/ENSO.dat b/examples/NIST_STRD/ENSO.dat
new file mode 100644
index 0000000..efe5cd8
--- /dev/null
+++ b/examples/NIST_STRD/ENSO.dat
@@ -0,0 +1,228 @@
+NIST/ITL StRD
+Dataset Name: ENSO (ENSO.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 49)
+ Certified Values (lines 41 to 54)
+ Data (lines 61 to 228)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: The data are monthly averaged atmospheric pressure
+ differences between Easter Island and Darwin,
+ Australia. This difference drives the trade winds in
+ the southern hemisphere. Fourier analysis of the data
+ reveals 3 significant cycles. The annual cycle is the
+ strongest, but cycles with periods of approximately 44
+ and 26 months are also present. These cycles
+ correspond to the El Nino and the Southern Oscillation.
+ Arguments to the SIN and COS functions are in radians.
+
+Reference: Kahaner, D., C. Moler, and S. Nash, (1989).
+ Numerical Methods and Software.
+ Englewood Cliffs, NJ: Prentice Hall, pp. 441-445.
+
+Data: 1 Response (y = atmospheric pressure)
+ 1 Predictor (x = time)
+ 168 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 9 Parameters (b1 to b9)
+
+ y = b1 + b2*cos( 2*pi*x/12 ) + b3*sin( 2*pi*x/12 )
+ + b5*cos( 2*pi*x/b4 ) + b6*sin( 2*pi*x/b4 )
+ + b8*cos( 2*pi*x/b7 ) + b9*sin( 2*pi*x/b7 ) + e
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 11.0 10.0 1.0510749193E+01 1.7488832467E-01
+ b2 = 3.0 3.0 3.0762128085E+00 2.4310052139E-01
+ b3 = 0.5 0.5 5.3280138227E-01 2.4354686618E-01
+ b4 = 40.0 44.0 4.4311088700E+01 9.4408025976E-01
+ b5 = -0.7 -1.5 -1.6231428586E+00 2.8078369611E-01
+ b6 = -1.3 0.5 5.2554493756E-01 4.8073701119E-01
+ b7 = 25.0 26.0 2.6887614440E+01 4.1612939130E-01
+ b8 = -0.3 -0.1 2.1232288488E-01 5.1460022911E-01
+ b9 = 1.4 1.5 1.4966870418E+00 2.5434468893E-01
+
+Residual Sum of Squares: 7.8853978668E+02
+Residual Standard Deviation: 2.2269642403E+00
+Degrees of Freedom: 159
+Number of Observations: 168
+
+
+
+
+
+Data: y x
+ 12.90000 1.000000
+ 11.30000 2.000000
+ 10.60000 3.000000
+ 11.20000 4.000000
+ 10.90000 5.000000
+ 7.500000 6.000000
+ 7.700000 7.000000
+ 11.70000 8.000000
+ 12.90000 9.000000
+ 14.30000 10.000000
+ 10.90000 11.00000
+ 13.70000 12.00000
+ 17.10000 13.00000
+ 14.00000 14.00000
+ 15.30000 15.00000
+ 8.500000 16.00000
+ 5.700000 17.00000
+ 5.500000 18.00000
+ 7.600000 19.00000
+ 8.600000 20.00000
+ 7.300000 21.00000
+ 7.600000 22.00000
+ 12.70000 23.00000
+ 11.00000 24.00000
+ 12.70000 25.00000
+ 12.90000 26.00000
+ 13.00000 27.00000
+ 10.90000 28.00000
+ 10.400000 29.00000
+ 10.200000 30.00000
+ 8.000000 31.00000
+ 10.90000 32.00000
+ 13.60000 33.00000
+ 10.500000 34.00000
+ 9.200000 35.00000
+ 12.40000 36.00000
+ 12.70000 37.00000
+ 13.30000 38.00000
+ 10.100000 39.00000
+ 7.800000 40.00000
+ 4.800000 41.00000
+ 3.000000 42.00000
+ 2.500000 43.00000
+ 6.300000 44.00000
+ 9.700000 45.00000
+ 11.60000 46.00000
+ 8.600000 47.00000
+ 12.40000 48.00000
+ 10.500000 49.00000
+ 13.30000 50.00000
+ 10.400000 51.00000
+ 8.100000 52.00000
+ 3.700000 53.00000
+ 10.70000 54.00000
+ 5.100000 55.00000
+ 10.400000 56.00000
+ 10.90000 57.00000
+ 11.70000 58.00000
+ 11.40000 59.00000
+ 13.70000 60.00000
+ 14.10000 61.00000
+ 14.00000 62.00000
+ 12.50000 63.00000
+ 6.300000 64.00000
+ 9.600000 65.00000
+ 11.70000 66.00000
+ 5.000000 67.00000
+ 10.80000 68.00000
+ 12.70000 69.00000
+ 10.80000 70.00000
+ 11.80000 71.00000
+ 12.60000 72.00000
+ 15.70000 73.00000
+ 12.60000 74.00000
+ 14.80000 75.00000
+ 7.800000 76.00000
+ 7.100000 77.00000
+ 11.20000 78.00000
+ 8.100000 79.00000
+ 6.400000 80.00000
+ 5.200000 81.00000
+ 12.00000 82.00000
+ 10.200000 83.00000
+ 12.70000 84.00000
+ 10.200000 85.00000
+ 14.70000 86.00000
+ 12.20000 87.00000
+ 7.100000 88.00000
+ 5.700000 89.00000
+ 6.700000 90.00000
+ 3.900000 91.00000
+ 8.500000 92.00000
+ 8.300000 93.00000
+ 10.80000 94.00000
+ 16.70000 95.00000
+ 12.60000 96.00000
+ 12.50000 97.00000
+ 12.50000 98.00000
+ 9.800000 99.00000
+ 7.200000 100.00000
+ 4.100000 101.00000
+ 10.60000 102.00000
+ 10.100000 103.00000
+ 10.100000 104.00000
+ 11.90000 105.00000
+ 13.60000 106.0000
+ 16.30000 107.0000
+ 17.60000 108.0000
+ 15.50000 109.0000
+ 16.00000 110.0000
+ 15.20000 111.0000
+ 11.20000 112.0000
+ 14.30000 113.0000
+ 14.50000 114.0000
+ 8.500000 115.0000
+ 12.00000 116.0000
+ 12.70000 117.0000
+ 11.30000 118.0000
+ 14.50000 119.0000
+ 15.10000 120.0000
+ 10.400000 121.0000
+ 11.50000 122.0000
+ 13.40000 123.0000
+ 7.500000 124.0000
+ 0.6000000 125.0000
+ 0.3000000 126.0000
+ 5.500000 127.0000
+ 5.000000 128.0000
+ 4.600000 129.0000
+ 8.200000 130.0000
+ 9.900000 131.0000
+ 9.200000 132.0000
+ 12.50000 133.0000
+ 10.90000 134.0000
+ 9.900000 135.0000
+ 8.900000 136.0000
+ 7.600000 137.0000
+ 9.500000 138.0000
+ 8.400000 139.0000
+ 10.70000 140.0000
+ 13.60000 141.0000
+ 13.70000 142.0000
+ 13.70000 143.0000
+ 16.50000 144.0000
+ 16.80000 145.0000
+ 17.10000 146.0000
+ 15.40000 147.0000
+ 9.500000 148.0000
+ 6.100000 149.0000
+ 10.100000 150.0000
+ 9.300000 151.0000
+ 5.300000 152.0000
+ 11.20000 153.0000
+ 16.60000 154.0000
+ 15.60000 155.0000
+ 12.00000 156.0000
+ 11.50000 157.0000
+ 8.600000 158.0000
+ 13.80000 159.0000
+ 8.700000 160.0000
+ 8.600000 161.0000
+ 8.600000 162.0000
+ 8.700000 163.0000
+ 12.80000 164.0000
+ 13.20000 165.0000
+ 14.00000 166.0000
+ 13.40000 167.0000
+ 14.80000 168.0000
diff --git a/examples/NIST_STRD/Eckerle4.dat b/examples/NIST_STRD/Eckerle4.dat
new file mode 100644
index 0000000..dd54f5a
--- /dev/null
+++ b/examples/NIST_STRD/Eckerle4.dat
@@ -0,0 +1,95 @@
+NIST/ITL StRD
+Dataset Name: Eckerle4 (Eckerle4.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 95)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ circular interference transmittance. The response
+ variable is transmittance, and the predictor variable
+ is wavelength.
+
+
+Reference: Eckerle, K., NIST (197?).
+ Circular Interference Transmittance Study.
+
+
+
+
+
+
+Data: 1 Response Variable (y = transmittance)
+ 1 Predictor Variable (x = wavelength)
+ 35 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = (b1/b2) * exp[-0.5*((x-b3)/b2)**2] + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1 1.5 1.5543827178E+00 1.5408051163E-02
+ b2 = 10 5 4.0888321754E+00 4.6803020753E-02
+ b3 = 500 450 4.5154121844E+02 4.6800518816E-02
+
+Residual Sum of Squares: 1.4635887487E-03
+Residual Standard Deviation: 6.7629245447E-03
+Degrees of Freedom: 32
+Number of Observations: 35
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 0.0001575E0 400.000000E0
+ 0.0001699E0 405.000000E0
+ 0.0002350E0 410.000000E0
+ 0.0003102E0 415.000000E0
+ 0.0004917E0 420.000000E0
+ 0.0008710E0 425.000000E0
+ 0.0017418E0 430.000000E0
+ 0.0046400E0 435.000000E0
+ 0.0065895E0 436.500000E0
+ 0.0097302E0 438.000000E0
+ 0.0149002E0 439.500000E0
+ 0.0237310E0 441.000000E0
+ 0.0401683E0 442.500000E0
+ 0.0712559E0 444.000000E0
+ 0.1264458E0 445.500000E0
+ 0.2073413E0 447.000000E0
+ 0.2902366E0 448.500000E0
+ 0.3445623E0 450.000000E0
+ 0.3698049E0 451.500000E0
+ 0.3668534E0 453.000000E0
+ 0.3106727E0 454.500000E0
+ 0.2078154E0 456.000000E0
+ 0.1164354E0 457.500000E0
+ 0.0616764E0 459.000000E0
+ 0.0337200E0 460.500000E0
+ 0.0194023E0 462.000000E0
+ 0.0117831E0 463.500000E0
+ 0.0074357E0 465.000000E0
+ 0.0022732E0 470.000000E0
+ 0.0008800E0 475.000000E0
+ 0.0004579E0 480.000000E0
+ 0.0002345E0 485.000000E0
+ 0.0001586E0 490.000000E0
+ 0.0001143E0 495.000000E0
+ 0.0000710E0 500.000000E0
diff --git a/examples/NIST_STRD/Gauss1.dat b/examples/NIST_STRD/Gauss1.dat
new file mode 100644
index 0000000..89c389e
--- /dev/null
+++ b/examples/NIST_STRD/Gauss1.dat
@@ -0,0 +1,310 @@
+NIST/ITL StRD
+Dataset Name: Gauss1 (Gauss1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 48)
+ Certified Values (lines 41 to 53)
+ Data (lines 61 to 310)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: The data are two well-separated Gaussians on a
+ decaying exponential baseline plus normally
+ distributed zero-mean noise with variance = 6.25.
+
+Reference: Rust, B., NIST (1996).
+
+
+
+
+
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 250 Observations
+ Lower Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 8 Parameters (b1 to b8)
+
+ y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ + b6*exp( -(x-b7)**2 / b8**2 ) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 97.0 94.0 9.8778210871E+01 5.7527312730E-01
+ b2 = 0.009 0.0105 1.0497276517E-02 1.1406289017E-04
+ b3 = 100.0 99.0 1.0048990633E+02 5.8831775752E-01
+ b4 = 65.0 63.0 6.7481111276E+01 1.0460593412E-01
+ b5 = 20.0 25.0 2.3129773360E+01 1.7439951146E-01
+ b6 = 70.0 71.0 7.1994503004E+01 6.2622793913E-01
+ b7 = 178.0 180.0 1.7899805021E+02 1.2436988217E-01
+ b8 = 16.5 20.0 1.8389389025E+01 2.0134312832E-01
+
+Residual Sum of Squares: 1.3158222432E+03
+Residual Standard Deviation: 2.3317980180E+00
+Degrees of Freedom: 242
+Number of Observations: 250
+
+
+
+
+
+
+Data: y x
+ 97.62227 1.000000
+ 97.80724 2.000000
+ 96.62247 3.000000
+ 92.59022 4.000000
+ 91.23869 5.000000
+ 95.32704 6.000000
+ 90.35040 7.000000
+ 89.46235 8.000000
+ 91.72520 9.000000
+ 89.86916 10.000000
+ 86.88076 11.00000
+ 85.94360 12.00000
+ 87.60686 13.00000
+ 86.25839 14.00000
+ 80.74976 15.00000
+ 83.03551 16.00000
+ 88.25837 17.00000
+ 82.01316 18.00000
+ 82.74098 19.00000
+ 83.30034 20.00000
+ 81.27850 21.00000
+ 81.85506 22.00000
+ 80.75195 23.00000
+ 80.09573 24.00000
+ 81.07633 25.00000
+ 78.81542 26.00000
+ 78.38596 27.00000
+ 79.93386 28.00000
+ 79.48474 29.00000
+ 79.95942 30.00000
+ 76.10691 31.00000
+ 78.39830 32.00000
+ 81.43060 33.00000
+ 82.48867 34.00000
+ 81.65462 35.00000
+ 80.84323 36.00000
+ 88.68663 37.00000
+ 84.74438 38.00000
+ 86.83934 39.00000
+ 85.97739 40.00000
+ 91.28509 41.00000
+ 97.22411 42.00000
+ 93.51733 43.00000
+ 94.10159 44.00000
+ 101.91760 45.00000
+ 98.43134 46.00000
+ 110.4214 47.00000
+ 107.6628 48.00000
+ 111.7288 49.00000
+ 116.5115 50.00000
+ 120.7609 51.00000
+ 123.9553 52.00000
+ 124.2437 53.00000
+ 130.7996 54.00000
+ 133.2960 55.00000
+ 130.7788 56.00000
+ 132.0565 57.00000
+ 138.6584 58.00000
+ 142.9252 59.00000
+ 142.7215 60.00000
+ 144.1249 61.00000
+ 147.4377 62.00000
+ 148.2647 63.00000
+ 152.0519 64.00000
+ 147.3863 65.00000
+ 149.2074 66.00000
+ 148.9537 67.00000
+ 144.5876 68.00000
+ 148.1226 69.00000
+ 148.0144 70.00000
+ 143.8893 71.00000
+ 140.9088 72.00000
+ 143.4434 73.00000
+ 139.3938 74.00000
+ 135.9878 75.00000
+ 136.3927 76.00000
+ 126.7262 77.00000
+ 124.4487 78.00000
+ 122.8647 79.00000
+ 113.8557 80.00000
+ 113.7037 81.00000
+ 106.8407 82.00000
+ 107.0034 83.00000
+ 102.46290 84.00000
+ 96.09296 85.00000
+ 94.57555 86.00000
+ 86.98824 87.00000
+ 84.90154 88.00000
+ 81.18023 89.00000
+ 76.40117 90.00000
+ 67.09200 91.00000
+ 72.67155 92.00000
+ 68.10848 93.00000
+ 67.99088 94.00000
+ 63.34094 95.00000
+ 60.55253 96.00000
+ 56.18687 97.00000
+ 53.64482 98.00000
+ 53.70307 99.00000
+ 48.07893 100.00000
+ 42.21258 101.00000
+ 45.65181 102.00000
+ 41.69728 103.00000
+ 41.24946 104.00000
+ 39.21349 105.00000
+ 37.71696 106.0000
+ 36.68395 107.0000
+ 37.30393 108.0000
+ 37.43277 109.0000
+ 37.45012 110.0000
+ 32.64648 111.0000
+ 31.84347 112.0000
+ 31.39951 113.0000
+ 26.68912 114.0000
+ 32.25323 115.0000
+ 27.61008 116.0000
+ 33.58649 117.0000
+ 28.10714 118.0000
+ 30.26428 119.0000
+ 28.01648 120.0000
+ 29.11021 121.0000
+ 23.02099 122.0000
+ 25.65091 123.0000
+ 28.50295 124.0000
+ 25.23701 125.0000
+ 26.13828 126.0000
+ 33.53260 127.0000
+ 29.25195 128.0000
+ 27.09847 129.0000
+ 26.52999 130.0000
+ 25.52401 131.0000
+ 26.69218 132.0000
+ 24.55269 133.0000
+ 27.71763 134.0000
+ 25.20297 135.0000
+ 25.61483 136.0000
+ 25.06893 137.0000
+ 27.63930 138.0000
+ 24.94851 139.0000
+ 25.86806 140.0000
+ 22.48183 141.0000
+ 26.90045 142.0000
+ 25.39919 143.0000
+ 17.90614 144.0000
+ 23.76039 145.0000
+ 25.89689 146.0000
+ 27.64231 147.0000
+ 22.86101 148.0000
+ 26.47003 149.0000
+ 23.72888 150.0000
+ 27.54334 151.0000
+ 30.52683 152.0000
+ 28.07261 153.0000
+ 34.92815 154.0000
+ 28.29194 155.0000
+ 34.19161 156.0000
+ 35.41207 157.0000
+ 37.09336 158.0000
+ 40.98330 159.0000
+ 39.53923 160.0000
+ 47.80123 161.0000
+ 47.46305 162.0000
+ 51.04166 163.0000
+ 54.58065 164.0000
+ 57.53001 165.0000
+ 61.42089 166.0000
+ 62.79032 167.0000
+ 68.51455 168.0000
+ 70.23053 169.0000
+ 74.42776 170.0000
+ 76.59911 171.0000
+ 81.62053 172.0000
+ 83.42208 173.0000
+ 79.17451 174.0000
+ 88.56985 175.0000
+ 85.66525 176.0000
+ 86.55502 177.0000
+ 90.65907 178.0000
+ 84.27290 179.0000
+ 85.72220 180.0000
+ 83.10702 181.0000
+ 82.16884 182.0000
+ 80.42568 183.0000
+ 78.15692 184.0000
+ 79.79691 185.0000
+ 77.84378 186.0000
+ 74.50327 187.0000
+ 71.57289 188.0000
+ 65.88031 189.0000
+ 65.01385 190.0000
+ 60.19582 191.0000
+ 59.66726 192.0000
+ 52.95478 193.0000
+ 53.87792 194.0000
+ 44.91274 195.0000
+ 41.09909 196.0000
+ 41.68018 197.0000
+ 34.53379 198.0000
+ 34.86419 199.0000
+ 33.14787 200.0000
+ 29.58864 201.0000
+ 27.29462 202.0000
+ 21.91439 203.0000
+ 19.08159 204.0000
+ 24.90290 205.0000
+ 19.82341 206.0000
+ 16.75551 207.0000
+ 18.24558 208.0000
+ 17.23549 209.0000
+ 16.34934 210.0000
+ 13.71285 211.0000
+ 14.75676 212.0000
+ 13.97169 213.0000
+ 12.42867 214.0000
+ 14.35519 215.0000
+ 7.703309 216.0000
+ 10.234410 217.0000
+ 11.78315 218.0000
+ 13.87768 219.0000
+ 4.535700 220.0000
+ 10.059280 221.0000
+ 8.424824 222.0000
+ 10.533120 223.0000
+ 9.602255 224.0000
+ 7.877514 225.0000
+ 6.258121 226.0000
+ 8.899865 227.0000
+ 7.877754 228.0000
+ 12.51191 229.0000
+ 10.66205 230.0000
+ 6.035400 231.0000
+ 6.790655 232.0000
+ 8.783535 233.0000
+ 4.600288 234.0000
+ 8.400915 235.0000
+ 7.216561 236.0000
+ 10.017410 237.0000
+ 7.331278 238.0000
+ 6.527863 239.0000
+ 2.842001 240.0000
+ 10.325070 241.0000
+ 4.790995 242.0000
+ 8.377101 243.0000
+ 6.264445 244.0000
+ 2.706213 245.0000
+ 8.362329 246.0000
+ 8.983658 247.0000
+ 3.362571 248.0000
+ 1.182746 249.0000
+ 4.875359 250.0000
diff --git a/examples/NIST_STRD/Gauss2.dat b/examples/NIST_STRD/Gauss2.dat
new file mode 100644
index 0000000..ff185d1
--- /dev/null
+++ b/examples/NIST_STRD/Gauss2.dat
@@ -0,0 +1,310 @@
+NIST/ITL StRD
+Dataset Name: Gauss2 (Gauss2.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 48)
+ Certified Values (lines 41 to 53)
+ Data (lines 61 to 310)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: The data are two slightly-blended Gaussians on a
+ decaying exponential baseline plus normally
+ distributed zero-mean noise with variance = 6.25.
+
+Reference: Rust, B., NIST (1996).
+
+
+
+
+
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 250 Observations
+ Lower Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 8 Parameters (b1 to b8)
+
+ y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ + b6*exp( -(x-b7)**2 / b8**2 ) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 96.0 98.0 9.9018328406E+01 5.3748766879E-01
+ b2 = 0.009 0.0105 1.0994945399E-02 1.3335306766E-04
+ b3 = 103.0 103.0 1.0188022528E+02 5.9217315772E-01
+ b4 = 106.0 105.0 1.0703095519E+02 1.5006798316E-01
+ b5 = 18.0 20.0 2.3578584029E+01 2.2695595067E-01
+ b6 = 72.0 73.0 7.2045589471E+01 6.1721965884E-01
+ b7 = 151.0 150.0 1.5327010194E+02 1.9466674341E-01
+ b8 = 18.0 20.0 1.9525972636E+01 2.6416549393E-01
+
+Residual Sum of Squares: 1.2475282092E+03
+Residual Standard Deviation: 2.2704790782E+00
+Degrees of Freedom: 242
+Number of Observations: 250
+
+
+
+
+
+
+Data: y x
+ 97.58776 1.000000
+ 97.76344 2.000000
+ 96.56705 3.000000
+ 92.52037 4.000000
+ 91.15097 5.000000
+ 95.21728 6.000000
+ 90.21355 7.000000
+ 89.29235 8.000000
+ 91.51479 9.000000
+ 89.60966 10.000000
+ 86.56187 11.00000
+ 85.55316 12.00000
+ 87.13054 13.00000
+ 85.67940 14.00000
+ 80.04851 15.00000
+ 82.18925 16.00000
+ 87.24081 17.00000
+ 80.79407 18.00000
+ 81.28570 19.00000
+ 81.56940 20.00000
+ 79.22715 21.00000
+ 79.43275 22.00000
+ 77.90195 23.00000
+ 76.75468 24.00000
+ 77.17377 25.00000
+ 74.27348 26.00000
+ 73.11900 27.00000
+ 73.84826 28.00000
+ 72.47870 29.00000
+ 71.92292 30.00000
+ 66.92176 31.00000
+ 67.93835 32.00000
+ 69.56207 33.00000
+ 69.07066 34.00000
+ 66.53983 35.00000
+ 63.87883 36.00000
+ 69.71537 37.00000
+ 63.60588 38.00000
+ 63.37154 39.00000
+ 60.01835 40.00000
+ 62.67481 41.00000
+ 65.80666 42.00000
+ 59.14304 43.00000
+ 56.62951 44.00000
+ 61.21785 45.00000
+ 54.38790 46.00000
+ 62.93443 47.00000
+ 56.65144 48.00000
+ 57.13362 49.00000
+ 58.29689 50.00000
+ 58.91744 51.00000
+ 58.50172 52.00000
+ 55.22885 53.00000
+ 58.30375 54.00000
+ 57.43237 55.00000
+ 51.69407 56.00000
+ 49.93132 57.00000
+ 53.70760 58.00000
+ 55.39712 59.00000
+ 52.89709 60.00000
+ 52.31649 61.00000
+ 53.98720 62.00000
+ 53.54158 63.00000
+ 56.45046 64.00000
+ 51.32276 65.00000
+ 53.11676 66.00000
+ 53.28631 67.00000
+ 49.80555 68.00000
+ 54.69564 69.00000
+ 56.41627 70.00000
+ 54.59362 71.00000
+ 54.38520 72.00000
+ 60.15354 73.00000
+ 59.78773 74.00000
+ 60.49995 75.00000
+ 65.43885 76.00000
+ 60.70001 77.00000
+ 63.71865 78.00000
+ 67.77139 79.00000
+ 64.70934 80.00000
+ 70.78193 81.00000
+ 70.38651 82.00000
+ 77.22359 83.00000
+ 79.52665 84.00000
+ 80.13077 85.00000
+ 85.67823 86.00000
+ 85.20647 87.00000
+ 90.24548 88.00000
+ 93.61953 89.00000
+ 95.86509 90.00000
+ 93.46992 91.00000
+ 105.8137 92.00000
+ 107.8269 93.00000
+ 114.0607 94.00000
+ 115.5019 95.00000
+ 118.5110 96.00000
+ 119.6177 97.00000
+ 122.1940 98.00000
+ 126.9903 99.00000
+ 125.7005 100.00000
+ 123.7447 101.00000
+ 130.6543 102.00000
+ 129.7168 103.00000
+ 131.8240 104.00000
+ 131.8759 105.00000
+ 131.9994 106.0000
+ 132.1221 107.0000
+ 133.4414 108.0000
+ 133.8252 109.0000
+ 133.6695 110.0000
+ 128.2851 111.0000
+ 126.5182 112.0000
+ 124.7550 113.0000
+ 118.4016 114.0000
+ 122.0334 115.0000
+ 115.2059 116.0000
+ 118.7856 117.0000
+ 110.7387 118.0000
+ 110.2003 119.0000
+ 105.17290 120.0000
+ 103.44720 121.0000
+ 94.54280 122.0000
+ 94.40526 123.0000
+ 94.57964 124.0000
+ 88.76605 125.0000
+ 87.28747 126.0000
+ 92.50443 127.0000
+ 86.27997 128.0000
+ 82.44307 129.0000
+ 80.47367 130.0000
+ 78.36608 131.0000
+ 78.74307 132.0000
+ 76.12786 133.0000
+ 79.13108 134.0000
+ 76.76062 135.0000
+ 77.60769 136.0000
+ 77.76633 137.0000
+ 81.28220 138.0000
+ 79.74307 139.0000
+ 81.97964 140.0000
+ 80.02952 141.0000
+ 85.95232 142.0000
+ 85.96838 143.0000
+ 79.94789 144.0000
+ 87.17023 145.0000
+ 90.50992 146.0000
+ 93.23373 147.0000
+ 89.14803 148.0000
+ 93.11492 149.0000
+ 90.34337 150.0000
+ 93.69421 151.0000
+ 95.74256 152.0000
+ 91.85105 153.0000
+ 96.74503 154.0000
+ 87.60996 155.0000
+ 90.47012 156.0000
+ 88.11690 157.0000
+ 85.70673 158.0000
+ 85.01361 159.0000
+ 78.53040 160.0000
+ 81.34148 161.0000
+ 75.19295 162.0000
+ 72.66115 163.0000
+ 69.85504 164.0000
+ 66.29476 165.0000
+ 63.58502 166.0000
+ 58.33847 167.0000
+ 57.50766 168.0000
+ 52.80498 169.0000
+ 50.79319 170.0000
+ 47.03490 171.0000
+ 46.47090 172.0000
+ 43.09016 173.0000
+ 34.11531 174.0000
+ 39.28235 175.0000
+ 32.68386 176.0000
+ 30.44056 177.0000
+ 31.98932 178.0000
+ 23.63330 179.0000
+ 23.69643 180.0000
+ 20.26812 181.0000
+ 19.07074 182.0000
+ 17.59544 183.0000
+ 16.08785 184.0000
+ 18.94267 185.0000
+ 18.61354 186.0000
+ 17.25800 187.0000
+ 16.62285 188.0000
+ 13.48367 189.0000
+ 15.37647 190.0000
+ 13.47208 191.0000
+ 15.96188 192.0000
+ 12.32547 193.0000
+ 16.33880 194.0000
+ 10.438330 195.0000
+ 9.628715 196.0000
+ 13.12268 197.0000
+ 8.772417 198.0000
+ 11.76143 199.0000
+ 12.55020 200.0000
+ 11.33108 201.0000
+ 11.20493 202.0000
+ 7.816916 203.0000
+ 6.800675 204.0000
+ 14.26581 205.0000
+ 10.66285 206.0000
+ 8.911574 207.0000
+ 11.56733 208.0000
+ 11.58207 209.0000
+ 11.59071 210.0000
+ 9.730134 211.0000
+ 11.44237 212.0000
+ 11.22912 213.0000
+ 10.172130 214.0000
+ 12.50905 215.0000
+ 6.201493 216.0000
+ 9.019605 217.0000
+ 10.80607 218.0000
+ 13.09625 219.0000
+ 3.914271 220.0000
+ 9.567886 221.0000
+ 8.038448 222.0000
+ 10.231040 223.0000
+ 9.367410 224.0000
+ 7.695971 225.0000
+ 6.118575 226.0000
+ 8.793207 227.0000
+ 7.796692 228.0000
+ 12.45065 229.0000
+ 10.61601 230.0000
+ 6.001003 231.0000
+ 6.765098 232.0000
+ 8.764653 233.0000
+ 4.586418 234.0000
+ 8.390783 235.0000
+ 7.209202 236.0000
+ 10.012090 237.0000
+ 7.327461 238.0000
+ 6.525136 239.0000
+ 2.840065 240.0000
+ 10.323710 241.0000
+ 4.790035 242.0000
+ 8.376431 243.0000
+ 6.263980 244.0000
+ 2.705892 245.0000
+ 8.362109 246.0000
+ 8.983507 247.0000
+ 3.362469 248.0000
+ 1.182678 249.0000
+ 4.875312 250.0000
diff --git a/examples/NIST_STRD/Gauss3.dat b/examples/NIST_STRD/Gauss3.dat
new file mode 100644
index 0000000..0f880b0
--- /dev/null
+++ b/examples/NIST_STRD/Gauss3.dat
@@ -0,0 +1,310 @@
+NIST/ITL StRD
+Dataset Name: Gauss3 (Gauss3.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 48)
+ Certified Values (lines 41 to 53)
+ Data (lines 61 to 310)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: The data are two strongly-blended Gaussians on a
+ decaying exponential baseline plus normally
+ distributed zero-mean noise with variance = 6.25.
+
+Reference: Rust, B., NIST (1996).
+
+
+
+
+
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 250 Observations
+ Average Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 8 Parameters (b1 to b8)
+
+ y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+ + b6*exp( -(x-b7)**2 / b8**2 ) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 94.9 96.0 9.8940368970E+01 5.3005192833E-01
+ b2 = 0.009 0.0096 1.0945879335E-02 1.2554058911E-04
+ b3 = 90.1 80.0 1.0069553078E+02 8.1256587317E-01
+ b4 = 113.0 110.0 1.1163619459E+02 3.5317859757E-01
+ b5 = 20.0 25.0 2.3300500029E+01 3.6584783023E-01
+ b6 = 73.8 74.0 7.3705031418E+01 1.2091239082E+00
+ b7 = 140.0 139.0 1.4776164251E+02 4.0488183351E-01
+ b8 = 20.0 25.0 1.9668221230E+01 3.7806634336E-01
+
+Residual Sum of Squares: 1.2444846360E+03
+Residual Standard Deviation: 2.2677077625E+00
+Degrees of Freedom: 242
+Number of Observations: 250
+
+
+
+
+
+
+Data: y x
+ 97.58776 1.000000
+ 97.76344 2.000000
+ 96.56705 3.000000
+ 92.52037 4.000000
+ 91.15097 5.000000
+ 95.21728 6.000000
+ 90.21355 7.000000
+ 89.29235 8.000000
+ 91.51479 9.000000
+ 89.60965 10.000000
+ 86.56187 11.00000
+ 85.55315 12.00000
+ 87.13053 13.00000
+ 85.67938 14.00000
+ 80.04849 15.00000
+ 82.18922 16.00000
+ 87.24078 17.00000
+ 80.79401 18.00000
+ 81.28564 19.00000
+ 81.56932 20.00000
+ 79.22703 21.00000
+ 79.43259 22.00000
+ 77.90174 23.00000
+ 76.75438 24.00000
+ 77.17338 25.00000
+ 74.27296 26.00000
+ 73.11830 27.00000
+ 73.84732 28.00000
+ 72.47746 29.00000
+ 71.92128 30.00000
+ 66.91962 31.00000
+ 67.93554 32.00000
+ 69.55841 33.00000
+ 69.06592 34.00000
+ 66.53371 35.00000
+ 63.87094 36.00000
+ 69.70526 37.00000
+ 63.59295 38.00000
+ 63.35509 39.00000
+ 59.99747 40.00000
+ 62.64843 41.00000
+ 65.77345 42.00000
+ 59.10141 43.00000
+ 56.57750 44.00000
+ 61.15313 45.00000
+ 54.30767 46.00000
+ 62.83535 47.00000
+ 56.52957 48.00000
+ 56.98427 49.00000
+ 58.11459 50.00000
+ 58.69576 51.00000
+ 58.23322 52.00000
+ 54.90490 53.00000
+ 57.91442 54.00000
+ 56.96629 55.00000
+ 51.13831 56.00000
+ 49.27123 57.00000
+ 52.92668 58.00000
+ 54.47693 59.00000
+ 51.81710 60.00000
+ 51.05401 61.00000
+ 52.51731 62.00000
+ 51.83710 63.00000
+ 54.48196 64.00000
+ 49.05859 65.00000
+ 50.52315 66.00000
+ 50.32755 67.00000
+ 46.44419 68.00000
+ 50.89281 69.00000
+ 52.13203 70.00000
+ 49.78741 71.00000
+ 49.01637 72.00000
+ 54.18198 73.00000
+ 53.17456 74.00000
+ 53.20827 75.00000
+ 57.43459 76.00000
+ 51.95282 77.00000
+ 54.20282 78.00000
+ 57.46687 79.00000
+ 53.60268 80.00000
+ 58.86728 81.00000
+ 57.66652 82.00000
+ 63.71034 83.00000
+ 65.24244 84.00000
+ 65.10878 85.00000
+ 69.96313 86.00000
+ 68.85475 87.00000
+ 73.32574 88.00000
+ 76.21241 89.00000
+ 78.06311 90.00000
+ 75.37701 91.00000
+ 87.54449 92.00000
+ 89.50588 93.00000
+ 95.82098 94.00000
+ 97.48390 95.00000
+ 100.86070 96.00000
+ 102.48510 97.00000
+ 105.7311 98.00000
+ 111.3489 99.00000
+ 111.0305 100.00000
+ 110.1920 101.00000
+ 118.3581 102.00000
+ 118.8086 103.00000
+ 122.4249 104.00000
+ 124.0953 105.00000
+ 125.9337 106.0000
+ 127.8533 107.0000
+ 131.0361 108.0000
+ 133.3343 109.0000
+ 135.1278 110.0000
+ 131.7113 111.0000
+ 131.9151 112.0000
+ 132.1107 113.0000
+ 127.6898 114.0000
+ 133.2148 115.0000
+ 128.2296 116.0000
+ 133.5902 117.0000
+ 127.2539 118.0000
+ 128.3482 119.0000
+ 124.8694 120.0000
+ 124.6031 121.0000
+ 117.0648 122.0000
+ 118.1966 123.0000
+ 119.5408 124.0000
+ 114.7946 125.0000
+ 114.2780 126.0000
+ 120.3484 127.0000
+ 114.8647 128.0000
+ 111.6514 129.0000
+ 110.1826 130.0000
+ 108.4461 131.0000
+ 109.0571 132.0000
+ 106.5308 133.0000
+ 109.4691 134.0000
+ 106.8709 135.0000
+ 107.3192 136.0000
+ 106.9000 137.0000
+ 109.6526 138.0000
+ 107.1602 139.0000
+ 108.2509 140.0000
+ 104.96310 141.0000
+ 109.3601 142.0000
+ 107.6696 143.0000
+ 99.77286 144.0000
+ 104.96440 145.0000
+ 106.1376 146.0000
+ 106.5816 147.0000
+ 100.12860 148.0000
+ 101.66910 149.0000
+ 96.44254 150.0000
+ 97.34169 151.0000
+ 96.97412 152.0000
+ 90.73460 153.0000
+ 93.37949 154.0000
+ 82.12331 155.0000
+ 83.01657 156.0000
+ 78.87360 157.0000
+ 74.86971 158.0000
+ 72.79341 159.0000
+ 65.14744 160.0000
+ 67.02127 161.0000
+ 60.16136 162.0000
+ 57.13996 163.0000
+ 54.05769 164.0000
+ 50.42265 165.0000
+ 47.82430 166.0000
+ 42.85748 167.0000
+ 42.45495 168.0000
+ 38.30808 169.0000
+ 36.95794 170.0000
+ 33.94543 171.0000
+ 34.19017 172.0000
+ 31.66097 173.0000
+ 23.56172 174.0000
+ 29.61143 175.0000
+ 23.88765 176.0000
+ 22.49812 177.0000
+ 24.86901 178.0000
+ 17.29481 179.0000
+ 18.09291 180.0000
+ 15.34813 181.0000
+ 14.77997 182.0000
+ 13.87832 183.0000
+ 12.88891 184.0000
+ 16.20763 185.0000
+ 16.29024 186.0000
+ 15.29712 187.0000
+ 14.97839 188.0000
+ 12.11330 189.0000
+ 14.24168 190.0000
+ 12.53824 191.0000
+ 15.19818 192.0000
+ 11.70478 193.0000
+ 15.83745 194.0000
+ 10.035850 195.0000
+ 9.307574 196.0000
+ 12.86800 197.0000
+ 8.571671 198.0000
+ 11.60415 199.0000
+ 12.42772 200.0000
+ 11.23627 201.0000
+ 11.13198 202.0000
+ 7.761117 203.0000
+ 6.758250 204.0000
+ 14.23375 205.0000
+ 10.63876 206.0000
+ 8.893581 207.0000
+ 11.55398 208.0000
+ 11.57221 209.0000
+ 11.58347 210.0000
+ 9.724857 211.0000
+ 11.43854 212.0000
+ 11.22636 213.0000
+ 10.170150 214.0000
+ 12.50765 215.0000
+ 6.200494 216.0000
+ 9.018902 217.0000
+ 10.80557 218.0000
+ 13.09591 219.0000
+ 3.914033 220.0000
+ 9.567723 221.0000
+ 8.038338 222.0000
+ 10.230960 223.0000
+ 9.367358 224.0000
+ 7.695937 225.0000
+ 6.118552 226.0000
+ 8.793192 227.0000
+ 7.796682 228.0000
+ 12.45064 229.0000
+ 10.61601 230.0000
+ 6.001000 231.0000
+ 6.765096 232.0000
+ 8.764652 233.0000
+ 4.586417 234.0000
+ 8.390782 235.0000
+ 7.209201 236.0000
+ 10.012090 237.0000
+ 7.327461 238.0000
+ 6.525136 239.0000
+ 2.840065 240.0000
+ 10.323710 241.0000
+ 4.790035 242.0000
+ 8.376431 243.0000
+ 6.263980 244.0000
+ 2.705892 245.0000
+ 8.362109 246.0000
+ 8.983507 247.0000
+ 3.362469 248.0000
+ 1.182678 249.0000
+ 4.875312 250.0000
diff --git a/examples/NIST_STRD/Hahn1.dat b/examples/NIST_STRD/Hahn1.dat
new file mode 100644
index 0000000..0e493a4
--- /dev/null
+++ b/examples/NIST_STRD/Hahn1.dat
@@ -0,0 +1,296 @@
+NIST/ITL StRD
+Dataset Name: Hahn1 (Hahn1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 47)
+ Certified Values (lines 41 to 52)
+ Data (lines 61 to 296)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ the thermal expansion of copper. The response
+ variable is the coefficient of thermal expansion, and
+ the predictor variable is temperature in degrees
+ kelvin.
+
+
+Reference: Hahn, T., NIST (197?).
+ Copper Thermal Expansion Study.
+
+
+
+
+
+Data: 1 Response (y = coefficient of thermal expansion)
+ 1 Predictor (x = temperature, degrees kelvin)
+ 236 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Rational Class (cubic/cubic)
+ 7 Parameters (b1 to b7)
+
+ y = (b1+b2*x+b3*x**2+b4*x**3) /
+ (1+b5*x+b6*x**2+b7*x**3) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 10 1 1.0776351733E+00 1.7070154742E-01
+ b2 = -1 -0.1 -1.2269296921E-01 1.2000289189E-02
+ b3 = 0.05 0.005 4.0863750610E-03 2.2508314937E-04
+ b4 = -0.00001 -0.000001 -1.4262662514E-06 2.7578037666E-07
+ b5 = -0.05 -0.005 -5.7609940901E-03 2.4712888219E-04
+ b6 = 0.001 0.0001 2.4053735503E-04 1.0449373768E-05
+ b7 = -0.000001 -0.0000001 -1.2314450199E-07 1.3027335327E-08
+
+Residual Sum of Squares: 1.5324382854E+00
+Residual Standard Deviation: 8.1803852243E-02
+Degrees of Freedom: 229
+Number of Observations: 236
+
+
+
+
+
+
+
+Data: y x
+ .591E0 24.41E0
+ 1.547E0 34.82E0
+ 2.902E0 44.09E0
+ 2.894E0 45.07E0
+ 4.703E0 54.98E0
+ 6.307E0 65.51E0
+ 7.03E0 70.53E0
+ 7.898E0 75.70E0
+ 9.470E0 89.57E0
+ 9.484E0 91.14E0
+ 10.072E0 96.40E0
+ 10.163E0 97.19E0
+ 11.615E0 114.26E0
+ 12.005E0 120.25E0
+ 12.478E0 127.08E0
+ 12.982E0 133.55E0
+ 12.970E0 133.61E0
+ 13.926E0 158.67E0
+ 14.452E0 172.74E0
+ 14.404E0 171.31E0
+ 15.190E0 202.14E0
+ 15.550E0 220.55E0
+ 15.528E0 221.05E0
+ 15.499E0 221.39E0
+ 16.131E0 250.99E0
+ 16.438E0 268.99E0
+ 16.387E0 271.80E0
+ 16.549E0 271.97E0
+ 16.872E0 321.31E0
+ 16.830E0 321.69E0
+ 16.926E0 330.14E0
+ 16.907E0 333.03E0
+ 16.966E0 333.47E0
+ 17.060E0 340.77E0
+ 17.122E0 345.65E0
+ 17.311E0 373.11E0
+ 17.355E0 373.79E0
+ 17.668E0 411.82E0
+ 17.767E0 419.51E0
+ 17.803E0 421.59E0
+ 17.765E0 422.02E0
+ 17.768E0 422.47E0
+ 17.736E0 422.61E0
+ 17.858E0 441.75E0
+ 17.877E0 447.41E0
+ 17.912E0 448.7E0
+ 18.046E0 472.89E0
+ 18.085E0 476.69E0
+ 18.291E0 522.47E0
+ 18.357E0 522.62E0
+ 18.426E0 524.43E0
+ 18.584E0 546.75E0
+ 18.610E0 549.53E0
+ 18.870E0 575.29E0
+ 18.795E0 576.00E0
+ 19.111E0 625.55E0
+ .367E0 20.15E0
+ .796E0 28.78E0
+ 0.892E0 29.57E0
+ 1.903E0 37.41E0
+ 2.150E0 39.12E0
+ 3.697E0 50.24E0
+ 5.870E0 61.38E0
+ 6.421E0 66.25E0
+ 7.422E0 73.42E0
+ 9.944E0 95.52E0
+ 11.023E0 107.32E0
+ 11.87E0 122.04E0
+ 12.786E0 134.03E0
+ 14.067E0 163.19E0
+ 13.974E0 163.48E0
+ 14.462E0 175.70E0
+ 14.464E0 179.86E0
+ 15.381E0 211.27E0
+ 15.483E0 217.78E0
+ 15.59E0 219.14E0
+ 16.075E0 262.52E0
+ 16.347E0 268.01E0
+ 16.181E0 268.62E0
+ 16.915E0 336.25E0
+ 17.003E0 337.23E0
+ 16.978E0 339.33E0
+ 17.756E0 427.38E0
+ 17.808E0 428.58E0
+ 17.868E0 432.68E0
+ 18.481E0 528.99E0
+ 18.486E0 531.08E0
+ 19.090E0 628.34E0
+ 16.062E0 253.24E0
+ 16.337E0 273.13E0
+ 16.345E0 273.66E0
+ 16.388E0 282.10E0
+ 17.159E0 346.62E0
+ 17.116E0 347.19E0
+ 17.164E0 348.78E0
+ 17.123E0 351.18E0
+ 17.979E0 450.10E0
+ 17.974E0 450.35E0
+ 18.007E0 451.92E0
+ 17.993E0 455.56E0
+ 18.523E0 552.22E0
+ 18.669E0 553.56E0
+ 18.617E0 555.74E0
+ 19.371E0 652.59E0
+ 19.330E0 656.20E0
+ 0.080E0 14.13E0
+ 0.248E0 20.41E0
+ 1.089E0 31.30E0
+ 1.418E0 33.84E0
+ 2.278E0 39.70E0
+ 3.624E0 48.83E0
+ 4.574E0 54.50E0
+ 5.556E0 60.41E0
+ 7.267E0 72.77E0
+ 7.695E0 75.25E0
+ 9.136E0 86.84E0
+ 9.959E0 94.88E0
+ 9.957E0 96.40E0
+ 11.600E0 117.37E0
+ 13.138E0 139.08E0
+ 13.564E0 147.73E0
+ 13.871E0 158.63E0
+ 13.994E0 161.84E0
+ 14.947E0 192.11E0
+ 15.473E0 206.76E0
+ 15.379E0 209.07E0
+ 15.455E0 213.32E0
+ 15.908E0 226.44E0
+ 16.114E0 237.12E0
+ 17.071E0 330.90E0
+ 17.135E0 358.72E0
+ 17.282E0 370.77E0
+ 17.368E0 372.72E0
+ 17.483E0 396.24E0
+ 17.764E0 416.59E0
+ 18.185E0 484.02E0
+ 18.271E0 495.47E0
+ 18.236E0 514.78E0
+ 18.237E0 515.65E0
+ 18.523E0 519.47E0
+ 18.627E0 544.47E0
+ 18.665E0 560.11E0
+ 19.086E0 620.77E0
+ 0.214E0 18.97E0
+ 0.943E0 28.93E0
+ 1.429E0 33.91E0
+ 2.241E0 40.03E0
+ 2.951E0 44.66E0
+ 3.782E0 49.87E0
+ 4.757E0 55.16E0
+ 5.602E0 60.90E0
+ 7.169E0 72.08E0
+ 8.920E0 85.15E0
+ 10.055E0 97.06E0
+ 12.035E0 119.63E0
+ 12.861E0 133.27E0
+ 13.436E0 143.84E0
+ 14.167E0 161.91E0
+ 14.755E0 180.67E0
+ 15.168E0 198.44E0
+ 15.651E0 226.86E0
+ 15.746E0 229.65E0
+ 16.216E0 258.27E0
+ 16.445E0 273.77E0
+ 16.965E0 339.15E0
+ 17.121E0 350.13E0
+ 17.206E0 362.75E0
+ 17.250E0 371.03E0
+ 17.339E0 393.32E0
+ 17.793E0 448.53E0
+ 18.123E0 473.78E0
+ 18.49E0 511.12E0
+ 18.566E0 524.70E0
+ 18.645E0 548.75E0
+ 18.706E0 551.64E0
+ 18.924E0 574.02E0
+ 19.1E0 623.86E0
+ 0.375E0 21.46E0
+ 0.471E0 24.33E0
+ 1.504E0 33.43E0
+ 2.204E0 39.22E0
+ 2.813E0 44.18E0
+ 4.765E0 55.02E0
+ 9.835E0 94.33E0
+ 10.040E0 96.44E0
+ 11.946E0 118.82E0
+ 12.596E0 128.48E0
+ 13.303E0 141.94E0
+ 13.922E0 156.92E0
+ 14.440E0 171.65E0
+ 14.951E0 190.00E0
+ 15.627E0 223.26E0
+ 15.639E0 223.88E0
+ 15.814E0 231.50E0
+ 16.315E0 265.05E0
+ 16.334E0 269.44E0
+ 16.430E0 271.78E0
+ 16.423E0 273.46E0
+ 17.024E0 334.61E0
+ 17.009E0 339.79E0
+ 17.165E0 349.52E0
+ 17.134E0 358.18E0
+ 17.349E0 377.98E0
+ 17.576E0 394.77E0
+ 17.848E0 429.66E0
+ 18.090E0 468.22E0
+ 18.276E0 487.27E0
+ 18.404E0 519.54E0
+ 18.519E0 523.03E0
+ 19.133E0 612.99E0
+ 19.074E0 638.59E0
+ 19.239E0 641.36E0
+ 19.280E0 622.05E0
+ 19.101E0 631.50E0
+ 19.398E0 663.97E0
+ 19.252E0 646.9E0
+ 19.89E0 748.29E0
+ 20.007E0 749.21E0
+ 19.929E0 750.14E0
+ 19.268E0 647.04E0
+ 19.324E0 646.89E0
+ 20.049E0 746.9E0
+ 20.107E0 748.43E0
+ 20.062E0 747.35E0
+ 20.065E0 749.27E0
+ 19.286E0 647.61E0
+ 19.972E0 747.78E0
+ 20.088E0 750.51E0
+ 20.743E0 851.37E0
+ 20.83E0 845.97E0
+ 20.935E0 847.54E0
+ 21.035E0 849.93E0
+ 20.93E0 851.61E0
+ 21.074E0 849.75E0
+ 21.085E0 850.98E0
+ 20.935E0 848.23E0
diff --git a/examples/NIST_STRD/Kirby2.dat b/examples/NIST_STRD/Kirby2.dat
new file mode 100644
index 0000000..75cd80f
--- /dev/null
+++ b/examples/NIST_STRD/Kirby2.dat
@@ -0,0 +1,211 @@
+NIST/ITL StRD
+Dataset Name: Kirby2 (Kirby2.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 45)
+ Certified Values (lines 41 to 50)
+ Data (lines 61 to 211)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ scanning electron microscope line with standards.
+
+
+Reference: Kirby, R., NIST (197?).
+ Scanning electron microscope line width standards.
+
+
+
+
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 151 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Rational Class (quadratic/quadratic)
+ 5 Parameters (b1 to b5)
+
+ y = (b1 + b2*x + b3*x**2) /
+ (1 + b4*x + b5*x**2) + e
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 2 1.5 1.6745063063E+00 8.7989634338E-02
+ b2 = -0.1 -0.15 -1.3927397867E-01 4.1182041386E-03
+ b3 = 0.003 0.0025 2.5961181191E-03 4.1856520458E-05
+ b4 = -0.001 -0.0015 -1.7241811870E-03 5.8931897355E-05
+ b5 = 0.00001 0.00002 2.1664802578E-05 2.0129761919E-07
+
+Residual Sum of Squares: 3.9050739624E+00
+Residual Standard Deviation: 1.6354535131E-01
+Degrees of Freedom: 146
+Number of Observations: 151
+
+
+
+
+
+
+
+
+
+Data: y x
+ 0.0082E0 9.65E0
+ 0.0112E0 10.74E0
+ 0.0149E0 11.81E0
+ 0.0198E0 12.88E0
+ 0.0248E0 14.06E0
+ 0.0324E0 15.28E0
+ 0.0420E0 16.63E0
+ 0.0549E0 18.19E0
+ 0.0719E0 19.88E0
+ 0.0963E0 21.84E0
+ 0.1291E0 24.00E0
+ 0.1710E0 26.25E0
+ 0.2314E0 28.86E0
+ 0.3227E0 31.85E0
+ 0.4809E0 35.79E0
+ 0.7084E0 40.18E0
+ 1.0220E0 44.74E0
+ 1.4580E0 49.53E0
+ 1.9520E0 53.94E0
+ 2.5410E0 58.29E0
+ 3.2230E0 62.63E0
+ 3.9990E0 67.03E0
+ 4.8520E0 71.25E0
+ 5.7320E0 75.22E0
+ 6.7270E0 79.33E0
+ 7.8350E0 83.56E0
+ 9.0250E0 87.75E0
+ 10.2670E0 91.93E0
+ 11.5780E0 96.10E0
+ 12.9440E0 100.28E0
+ 14.3770E0 104.46E0
+ 15.8560E0 108.66E0
+ 17.3310E0 112.71E0
+ 18.8850E0 116.88E0
+ 20.5750E0 121.33E0
+ 22.3200E0 125.79E0
+ 22.3030E0 125.79E0
+ 23.4600E0 128.74E0
+ 24.0600E0 130.27E0
+ 25.2720E0 133.33E0
+ 25.8530E0 134.79E0
+ 27.1100E0 137.93E0
+ 27.6580E0 139.33E0
+ 28.9240E0 142.46E0
+ 29.5110E0 143.90E0
+ 30.7100E0 146.91E0
+ 31.3500E0 148.51E0
+ 32.5200E0 151.41E0
+ 33.2300E0 153.17E0
+ 34.3300E0 155.97E0
+ 35.0600E0 157.76E0
+ 36.1700E0 160.56E0
+ 36.8400E0 162.30E0
+ 38.0100E0 165.21E0
+ 38.6700E0 166.90E0
+ 39.8700E0 169.92E0
+ 40.0300E0 170.32E0
+ 40.5000E0 171.54E0
+ 41.3700E0 173.79E0
+ 41.6700E0 174.57E0
+ 42.3100E0 176.25E0
+ 42.7300E0 177.34E0
+ 43.4600E0 179.19E0
+ 44.1400E0 181.02E0
+ 44.5500E0 182.08E0
+ 45.2200E0 183.88E0
+ 45.9200E0 185.75E0
+ 46.3000E0 186.80E0
+ 47.0000E0 188.63E0
+ 47.6800E0 190.45E0
+ 48.0600E0 191.48E0
+ 48.7400E0 193.35E0
+ 49.4100E0 195.22E0
+ 49.7600E0 196.23E0
+ 50.4300E0 198.05E0
+ 51.1100E0 199.97E0
+ 51.5000E0 201.06E0
+ 52.1200E0 202.83E0
+ 52.7600E0 204.69E0
+ 53.1800E0 205.86E0
+ 53.7800E0 207.58E0
+ 54.4600E0 209.50E0
+ 54.8300E0 210.65E0
+ 55.4000E0 212.33E0
+ 56.4300E0 215.43E0
+ 57.0300E0 217.16E0
+ 58.0000E0 220.21E0
+ 58.6100E0 221.98E0
+ 59.5800E0 225.06E0
+ 60.1100E0 226.79E0
+ 61.1000E0 229.92E0
+ 61.6500E0 231.69E0
+ 62.5900E0 234.77E0
+ 63.1200E0 236.60E0
+ 64.0300E0 239.63E0
+ 64.6200E0 241.50E0
+ 65.4900E0 244.48E0
+ 66.0300E0 246.40E0
+ 66.8900E0 249.35E0
+ 67.4200E0 251.32E0
+ 68.2300E0 254.22E0
+ 68.7700E0 256.24E0
+ 69.5900E0 259.11E0
+ 70.1100E0 261.18E0
+ 70.8600E0 264.02E0
+ 71.4300E0 266.13E0
+ 72.1600E0 268.94E0
+ 72.7000E0 271.09E0
+ 73.4000E0 273.87E0
+ 73.9300E0 276.08E0
+ 74.6000E0 278.83E0
+ 75.1600E0 281.08E0
+ 75.8200E0 283.81E0
+ 76.3400E0 286.11E0
+ 76.9800E0 288.81E0
+ 77.4800E0 291.08E0
+ 78.0800E0 293.75E0
+ 78.6000E0 295.99E0
+ 79.1700E0 298.64E0
+ 79.6200E0 300.84E0
+ 79.8800E0 302.02E0
+ 80.1900E0 303.48E0
+ 80.6600E0 305.65E0
+ 81.2200E0 308.27E0
+ 81.6600E0 310.41E0
+ 82.1600E0 313.01E0
+ 82.5900E0 315.12E0
+ 83.1400E0 317.71E0
+ 83.5000E0 319.79E0
+ 84.0000E0 322.36E0
+ 84.4000E0 324.42E0
+ 84.8900E0 326.98E0
+ 85.2600E0 329.01E0
+ 85.7400E0 331.56E0
+ 86.0700E0 333.56E0
+ 86.5400E0 336.10E0
+ 86.8900E0 338.08E0
+ 87.3200E0 340.60E0
+ 87.6500E0 342.57E0
+ 88.1000E0 345.08E0
+ 88.4300E0 347.02E0
+ 88.8300E0 349.52E0
+ 89.1200E0 351.44E0
+ 89.5400E0 353.93E0
+ 89.8500E0 355.83E0
+ 90.2500E0 358.32E0
+ 90.5500E0 360.20E0
+ 90.9300E0 362.67E0
+ 91.2000E0 364.53E0
+ 91.5500E0 367.00E0
+ 92.2000E0 371.30E0
diff --git a/examples/NIST_STRD/Lanczos1.dat b/examples/NIST_STRD/Lanczos1.dat
new file mode 100644
index 0000000..d23d5e4
--- /dev/null
+++ b/examples/NIST_STRD/Lanczos1.dat
@@ -0,0 +1,84 @@
+NIST/ITL StRD
+Dataset Name: Lanczos1 (Lanczos1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 46)
+ Certified Values (lines 41 to 51)
+ Data (lines 61 to 84)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are taken from an example discussed in
+ Lanczos (1956). The data were generated to 14-digits
+ of accuracy using
+ f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x)
+ + 1.5576*exp(-5*x).
+
+
+Reference: Lanczos, C. (1956).
+ Applied Analysis.
+ Englewood Cliffs, NJ: Prentice Hall, pp. 272-280.
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 24 Observations
+ Average Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 6 Parameters (b1 to b6)
+
+ y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1.2 0.5 9.5100000027E-02 5.3347304234E-11
+ b2 = 0.3 0.7 1.0000000001E+00 2.7473038179E-10
+ b3 = 5.6 3.6 8.6070000013E-01 1.3576062225E-10
+ b4 = 5.5 4.2 3.0000000002E+00 3.3308253069E-10
+ b5 = 6.5 4 1.5575999998E+00 1.8815731448E-10
+ b6 = 7.6 6.3 5.0000000001E+00 1.1057500538E-10
+
+Residual Sum of Squares: 1.4307867721E-25
+Residual Standard Deviation: 8.9156129349E-14
+Degrees of Freedom: 18
+Number of Observations: 24
+
+
+
+
+
+
+
+
+Data: y x
+ 2.513400000000E+00 0.000000000000E+00
+ 2.044333373291E+00 5.000000000000E-02
+ 1.668404436564E+00 1.000000000000E-01
+ 1.366418021208E+00 1.500000000000E-01
+ 1.123232487372E+00 2.000000000000E-01
+ 9.268897180037E-01 2.500000000000E-01
+ 7.679338563728E-01 3.000000000000E-01
+ 6.388775523106E-01 3.500000000000E-01
+ 5.337835317402E-01 4.000000000000E-01
+ 4.479363617347E-01 4.500000000000E-01
+ 3.775847884350E-01 5.000000000000E-01
+ 3.197393199326E-01 5.500000000000E-01
+ 2.720130773746E-01 6.000000000000E-01
+ 2.324965529032E-01 6.500000000000E-01
+ 1.996589546065E-01 7.000000000000E-01
+ 1.722704126914E-01 7.500000000000E-01
+ 1.493405660168E-01 8.000000000000E-01
+ 1.300700206922E-01 8.500000000000E-01
+ 1.138119324644E-01 9.000000000000E-01
+ 1.000415587559E-01 9.500000000000E-01
+ 8.833209084540E-02 1.000000000000E+00
+ 7.833544019350E-02 1.050000000000E+00
+ 6.976693743449E-02 1.100000000000E+00
+ 6.239312536719E-02 1.150000000000E+00
diff --git a/examples/NIST_STRD/Lanczos2.dat b/examples/NIST_STRD/Lanczos2.dat
new file mode 100644
index 0000000..f9f2b4b
--- /dev/null
+++ b/examples/NIST_STRD/Lanczos2.dat
@@ -0,0 +1,84 @@
+NIST/ITL StRD
+Dataset Name: Lanczos2 (Lanczos2.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 46)
+ Certified Values (lines 41 to 51)
+ Data (lines 61 to 84)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are taken from an example discussed in
+ Lanczos (1956). The data were generated to 6-digits
+ of accuracy using
+ f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x)
+ + 1.5576*exp(-5*x).
+
+
+Reference: Lanczos, C. (1956).
+ Applied Analysis.
+ Englewood Cliffs, NJ: Prentice Hall, pp. 272-280.
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 24 Observations
+ Average Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 6 Parameters (b1 to b6)
+
+ y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1.2 0.5 9.6251029939E-02 6.6770575477E-04
+ b2 = 0.3 0.7 1.0057332849E+00 3.3989646176E-03
+ b3 = 5.6 3.6 8.6424689056E-01 1.7185846685E-03
+ b4 = 5.5 4.2 3.0078283915E+00 4.1707005856E-03
+ b5 = 6.5 4 1.5529016879E+00 2.3744381417E-03
+ b6 = 7.6 6.3 5.0028798100E+00 1.3958787284E-03
+
+Residual Sum of Squares: 2.2299428125E-11
+Residual Standard Deviation: 1.1130395851E-06
+Degrees of Freedom: 18
+Number of Observations: 24
+
+
+
+
+
+
+
+
+Data: y x
+ 2.51340E+00 0.00000E+00
+ 2.04433E+00 5.00000E-02
+ 1.66840E+00 1.00000E-01
+ 1.36642E+00 1.50000E-01
+ 1.12323E+00 2.00000E-01
+ 9.26890E-01 2.50000E-01
+ 7.67934E-01 3.00000E-01
+ 6.38878E-01 3.50000E-01
+ 5.33784E-01 4.00000E-01
+ 4.47936E-01 4.50000E-01
+ 3.77585E-01 5.00000E-01
+ 3.19739E-01 5.50000E-01
+ 2.72013E-01 6.00000E-01
+ 2.32497E-01 6.50000E-01
+ 1.99659E-01 7.00000E-01
+ 1.72270E-01 7.50000E-01
+ 1.49341E-01 8.00000E-01
+ 1.30070E-01 8.50000E-01
+ 1.13812E-01 9.00000E-01
+ 1.00042E-01 9.50000E-01
+ 8.83321E-02 1.00000E+00
+ 7.83354E-02 1.05000E+00
+ 6.97669E-02 1.10000E+00
+ 6.23931E-02 1.15000E+00
diff --git a/examples/NIST_STRD/Lanczos3.dat b/examples/NIST_STRD/Lanczos3.dat
new file mode 100644
index 0000000..67c1512
--- /dev/null
+++ b/examples/NIST_STRD/Lanczos3.dat
@@ -0,0 +1,84 @@
+NIST/ITL StRD
+Dataset Name: Lanczos3 (Lanczos3.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 46)
+ Certified Values (lines 41 to 51)
+ Data (lines 61 to 84)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are taken from an example discussed in
+ Lanczos (1956). The data were generated to 5-digits
+ of accuracy using
+ f(x) = 0.0951*exp(-x) + 0.8607*exp(-3*x)
+ + 1.5576*exp(-5*x).
+
+
+Reference: Lanczos, C. (1956).
+ Applied Analysis.
+ Englewood Cliffs, NJ: Prentice Hall, pp. 272-280.
+
+
+
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 24 Observations
+ Lower Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 6 Parameters (b1 to b6)
+
+ y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1.2 0.5 8.6816414977E-02 1.7197908859E-02
+ b2 = 0.3 0.7 9.5498101505E-01 9.7041624475E-02
+ b3 = 5.6 3.6 8.4400777463E-01 4.1488663282E-02
+ b4 = 5.5 4.2 2.9515951832E+00 1.0766312506E-01
+ b5 = 6.5 4 1.5825685901E+00 5.8371576281E-02
+ b6 = 7.6 6.3 4.9863565084E+00 3.4436403035E-02
+
+Residual Sum of Squares: 1.6117193594E-08
+Residual Standard Deviation: 2.9923229172E-05
+Degrees of Freedom: 18
+Number of Observations: 24
+
+
+
+
+
+
+
+
+Data: y x
+ 2.5134E+00 0.00000E+00
+ 2.0443E+00 5.00000E-02
+ 1.6684E+00 1.00000E-01
+ 1.3664E+00 1.50000E-01
+ 1.1232E+00 2.00000E-01
+ 0.9269E+00 2.50000E-01
+ 0.7679E+00 3.00000E-01
+ 0.6389E+00 3.50000E-01
+ 0.5338E+00 4.00000E-01
+ 0.4479E+00 4.50000E-01
+ 0.3776E+00 5.00000E-01
+ 0.3197E+00 5.50000E-01
+ 0.2720E+00 6.00000E-01
+ 0.2325E+00 6.50000E-01
+ 0.1997E+00 7.00000E-01
+ 0.1723E+00 7.50000E-01
+ 0.1493E+00 8.00000E-01
+ 0.1301E+00 8.50000E-01
+ 0.1138E+00 9.00000E-01
+ 0.1000E+00 9.50000E-01
+ 0.0883E+00 1.00000E+00
+ 0.0783E+00 1.05000E+00
+ 0.0698E+00 1.10000E+00
+ 0.0624E+00 1.15000E+00
diff --git a/examples/NIST_STRD/MGH09.dat b/examples/NIST_STRD/MGH09.dat
new file mode 100644
index 0000000..55a2d42
--- /dev/null
+++ b/examples/NIST_STRD/MGH09.dat
@@ -0,0 +1,71 @@
+NIST/ITL StRD
+Dataset Name: MGH09 (MGH09.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 44)
+ Certified Values (lines 41 to 49)
+ Data (lines 61 to 71)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This problem was found to be difficult for some very
+ good algorithms. There is a local minimum at (+inf,
+ -14.07..., -inf, -inf) with final sum of squares
+ 0.00102734....
+
+ See More, J. J., Garbow, B. S., and Hillstrom, K. E.
+ (1981). Testing unconstrained optimization software.
+ ACM Transactions on Mathematical Software. 7(1):
+ pp. 17-41.
+
+Reference: Kowalik, J.S., and M. R. Osborne, (1978).
+ Methods for Unconstrained Optimization Problems.
+ New York, NY: Elsevier North-Holland.
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 11 Observations
+ Higher Level of Difficulty
+ Generated Data
+
+Model: Rational Class (linear/quadratic)
+ 4 Parameters (b1 to b4)
+
+ y = b1*(x**2+x*b2) / (x**2+x*b3+b4) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 25 0.25 1.9280693458E-01 1.1435312227E-02
+ b2 = 39 0.39 1.9128232873E-01 1.9633220911E-01
+ b3 = 41.5 0.415 1.2305650693E-01 8.0842031232E-02
+ b4 = 39 0.39 1.3606233068E-01 9.0025542308E-02
+
+Residual Sum of Squares: 3.0750560385E-04
+Residual Standard Deviation: 6.6279236551E-03
+Degrees of Freedom: 7
+Number of Observations: 11
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 1.957000E-01 4.000000E+00
+ 1.947000E-01 2.000000E+00
+ 1.735000E-01 1.000000E+00
+ 1.600000E-01 5.000000E-01
+ 8.440000E-02 2.500000E-01
+ 6.270000E-02 1.670000E-01
+ 4.560000E-02 1.250000E-01
+ 3.420000E-02 1.000000E-01
+ 3.230000E-02 8.330000E-02
+ 2.350000E-02 7.140000E-02
+ 2.460000E-02 6.250000E-02
diff --git a/examples/NIST_STRD/MGH10.dat b/examples/NIST_STRD/MGH10.dat
new file mode 100644
index 0000000..b2ffbec
--- /dev/null
+++ b/examples/NIST_STRD/MGH10.dat
@@ -0,0 +1,76 @@
+NIST/ITL StRD
+Dataset Name: MGH10 (MGH10.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 76)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This problem was found to be difficult for some very
+ good algorithms.
+
+ See More, J. J., Garbow, B. S., and Hillstrom, K. E.
+ (1981). Testing unconstrained optimization software.
+ ACM Transactions on Mathematical Software. 7(1):
+ pp. 17-41.
+
+Reference: Meyer, R. R. (1970).
+ Theoretical and computational aspects of nonlinear
+ regression. In Nonlinear Programming, Rosen,
+ Mangasarian and Ritter (Eds).
+ New York, NY: Academic Press, pp. 465-486.
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 16 Observations
+ Higher Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = b1 * exp[b2/(x+b3)] + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 2 0.02 5.6096364710E-03 1.5687892471E-04
+ b2 = 400000 4000 6.1813463463E+03 2.3309021107E+01
+ b3 = 25000 250 3.4522363462E+02 7.8486103508E-01
+
+Residual Sum of Squares: 8.7945855171E+01
+Residual Standard Deviation: 2.6009740065E+00
+Degrees of Freedom: 13
+Number of Observations: 16
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 3.478000E+04 5.000000E+01
+ 2.861000E+04 5.500000E+01
+ 2.365000E+04 6.000000E+01
+ 1.963000E+04 6.500000E+01
+ 1.637000E+04 7.000000E+01
+ 1.372000E+04 7.500000E+01
+ 1.154000E+04 8.000000E+01
+ 9.744000E+03 8.500000E+01
+ 8.261000E+03 9.000000E+01
+ 7.030000E+03 9.500000E+01
+ 6.005000E+03 1.000000E+02
+ 5.147000E+03 1.050000E+02
+ 4.427000E+03 1.100000E+02
+ 3.820000E+03 1.150000E+02
+ 3.307000E+03 1.200000E+02
+ 2.872000E+03 1.250000E+02
diff --git a/examples/NIST_STRD/MGH17.dat b/examples/NIST_STRD/MGH17.dat
new file mode 100644
index 0000000..584f73c
--- /dev/null
+++ b/examples/NIST_STRD/MGH17.dat
@@ -0,0 +1,93 @@
+NIST/ITL StRD
+Dataset Name: MGH17 (MGH17.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 45)
+ Certified Values (lines 41 to 50)
+ Data (lines 61 to 93)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This problem was found to be difficult for some very
+ good algorithms.
+
+ See More, J. J., Garbow, B. S., and Hillstrom, K. E.
+ (1981). Testing unconstrained optimization software.
+ ACM Transactions on Mathematical Software. 7(1):
+ pp. 17-41.
+
+Reference: Osborne, M. R. (1972).
+ Some aspects of nonlinear least squares
+ calculations. In Numerical Methods for Nonlinear
+ Optimization, Lootsma (Ed).
+ New York, NY: Academic Press, pp. 171-189.
+
+Data: 1 Response (y)
+ 1 Predictor (x)
+ 33 Observations
+ Average Level of Difficulty
+ Generated Data
+
+Model: Exponential Class
+ 5 Parameters (b1 to b5)
+
+ y = b1 + b2*exp[-x*b4] + b3*exp[-x*b5] + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 50 0.5 3.7541005211E-01 2.0723153551E-03
+ b2 = 150 1.5 1.9358469127E+00 2.2031669222E-01
+ b3 = -100 -1 -1.4646871366E+00 2.2175707739E-01
+ b4 = 1 0.01 1.2867534640E-02 4.4861358114E-04
+ b5 = 2 0.02 2.2122699662E-02 8.9471996575E-04
+
+Residual Sum of Squares: 5.4648946975E-05
+Residual Standard Deviation: 1.3970497866E-03
+Degrees of Freedom: 28
+Number of Observations: 33
+
+
+
+
+
+
+
+
+
+Data: y x
+ 8.440000E-01 0.000000E+00
+ 9.080000E-01 1.000000E+01
+ 9.320000E-01 2.000000E+01
+ 9.360000E-01 3.000000E+01
+ 9.250000E-01 4.000000E+01
+ 9.080000E-01 5.000000E+01
+ 8.810000E-01 6.000000E+01
+ 8.500000E-01 7.000000E+01
+ 8.180000E-01 8.000000E+01
+ 7.840000E-01 9.000000E+01
+ 7.510000E-01 1.000000E+02
+ 7.180000E-01 1.100000E+02
+ 6.850000E-01 1.200000E+02
+ 6.580000E-01 1.300000E+02
+ 6.280000E-01 1.400000E+02
+ 6.030000E-01 1.500000E+02
+ 5.800000E-01 1.600000E+02
+ 5.580000E-01 1.700000E+02
+ 5.380000E-01 1.800000E+02
+ 5.220000E-01 1.900000E+02
+ 5.060000E-01 2.000000E+02
+ 4.900000E-01 2.100000E+02
+ 4.780000E-01 2.200000E+02
+ 4.670000E-01 2.300000E+02
+ 4.570000E-01 2.400000E+02
+ 4.480000E-01 2.500000E+02
+ 4.380000E-01 2.600000E+02
+ 4.310000E-01 2.700000E+02
+ 4.240000E-01 2.800000E+02
+ 4.200000E-01 2.900000E+02
+ 4.140000E-01 3.000000E+02
+ 4.110000E-01 3.100000E+02
+ 4.060000E-01 3.200000E+02
diff --git a/examples/NIST_STRD/Misra1a.dat b/examples/NIST_STRD/Misra1a.dat
new file mode 100644
index 0000000..24f92a8
--- /dev/null
+++ b/examples/NIST_STRD/Misra1a.dat
@@ -0,0 +1,74 @@
+NIST/ITL StRD
+Dataset Name: Misra1a (Misra1a.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 74)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study regarding
+ dental research in monomolecular adsorption. The
+ response variable is volume, and the predictor
+ variable is pressure.
+
+Reference: Misra, D., NIST (1978).
+ Dental Research Monomolecular Adsorption Study.
+
+
+
+
+
+
+
+Data: 1 Response Variable (y = volume)
+ 1 Predictor Variable (x = pressure)
+ 14 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 2 Parameters (b1 and b2)
+
+ y = b1*(1-exp[-b2*x]) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 500 250 2.3894212918E+02 2.7070075241E+00
+ b2 = 0.0001 0.0005 5.5015643181E-04 7.2668688436E-06
+
+Residual Sum of Squares: 1.2455138894E-01
+Residual Standard Deviation: 1.0187876330E-01
+Degrees of Freedom: 12
+Number of Observations: 14
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 10.07E0 77.6E0
+ 14.73E0 114.9E0
+ 17.94E0 141.1E0
+ 23.93E0 190.8E0
+ 29.61E0 239.9E0
+ 35.18E0 289.0E0
+ 40.02E0 332.8E0
+ 44.82E0 378.4E0
+ 50.76E0 434.8E0
+ 55.05E0 477.3E0
+ 61.01E0 536.8E0
+ 66.40E0 593.1E0
+ 75.47E0 689.1E0
+ 81.78E0 760.0E0
diff --git a/examples/NIST_STRD/Misra1b.dat b/examples/NIST_STRD/Misra1b.dat
new file mode 100644
index 0000000..a0da9d3
--- /dev/null
+++ b/examples/NIST_STRD/Misra1b.dat
@@ -0,0 +1,74 @@
+NIST/ITL StRD
+Dataset Name: Misra1b (Misra1b.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 74)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study regarding
+ dental research in monomolecular adsorption. The
+ response variable is volume, and the predictor
+ variable is pressure.
+
+Reference: Misra, D., NIST (1978).
+ Dental Research Monomolecular Adsorption Study.
+
+
+
+
+
+
+
+Data: 1 Response (y = volume)
+ 1 Predictor (x = pressure)
+ 14 Observations
+ Lower Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 2 Parameters (b1 and b2)
+
+ y = b1 * (1-(1+b2*x/2)**(-2)) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 500 300 3.3799746163E+02 3.1643950207E+00
+ b2 = 0.0001 0.0002 3.9039091287E-04 4.2547321834E-06
+
+Residual Sum of Squares: 7.5464681533E-02
+Residual Standard Deviation: 7.9301471998E-02
+Degrees of Freedom: 12
+Number of Observations: 14
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 10.07E0 77.6E0
+ 14.73E0 114.9E0
+ 17.94E0 141.1E0
+ 23.93E0 190.8E0
+ 29.61E0 239.9E0
+ 35.18E0 289.0E0
+ 40.02E0 332.8E0
+ 44.82E0 378.4E0
+ 50.76E0 434.8E0
+ 55.05E0 477.3E0
+ 61.01E0 536.8E0
+ 66.40E0 593.1E0
+ 75.47E0 689.1E0
+ 81.78E0 760.0E0
diff --git a/examples/NIST_STRD/Misra1c.dat b/examples/NIST_STRD/Misra1c.dat
new file mode 100644
index 0000000..64681d3
--- /dev/null
+++ b/examples/NIST_STRD/Misra1c.dat
@@ -0,0 +1,74 @@
+NIST/ITL StRD
+Dataset Name: Misra1c (Misra1c.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 74)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study regarding
+ dental research in monomolecular adsorption. The
+ response variable is volume, and the predictor
+ variable is pressure.
+
+Reference: Misra, D., NIST (1978).
+ Dental Research Monomolecular Adsorption.
+
+
+
+
+
+
+
+Data: 1 Response (y = volume)
+ 1 Predictor (x = pressure)
+ 14 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 2 Parameters (b1 and b2)
+
+ y = b1 * (1-(1+2*b2*x)**(-.5)) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 500 600 6.3642725809E+02 4.6638326572E+00
+ b2 = 0.0001 0.0002 2.0813627256E-04 1.7728423155E-06
+
+Residual Sum of Squares: 4.0966836971E-02
+Residual Standard Deviation: 5.8428615257E-02
+Degrees of Freedom: 12
+Number of Observations: 14
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 10.07E0 77.6E0
+ 14.73E0 114.9E0
+ 17.94E0 141.1E0
+ 23.93E0 190.8E0
+ 29.61E0 239.9E0
+ 35.18E0 289.0E0
+ 40.02E0 332.8E0
+ 44.82E0 378.4E0
+ 50.76E0 434.8E0
+ 55.05E0 477.3E0
+ 61.01E0 536.8E0
+ 66.40E0 593.1E0
+ 75.47E0 689.1E0
+ 81.78E0 760.0E0
diff --git a/examples/NIST_STRD/Misra1d.dat b/examples/NIST_STRD/Misra1d.dat
new file mode 100644
index 0000000..fcf12d3
--- /dev/null
+++ b/examples/NIST_STRD/Misra1d.dat
@@ -0,0 +1,74 @@
+NIST/ITL StRD
+Dataset Name: Misra1d (Misra1d.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 42)
+ Certified Values (lines 41 to 47)
+ Data (lines 61 to 74)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study regarding
+ dental research in monomolecular adsorption. The
+ response variable is volume, and the predictor
+ variable is pressure.
+
+Reference: Misra, D., NIST (1978).
+ Dental Research Monomolecular Adsorption Study.
+
+
+
+
+
+
+
+Data: 1 Response (y = volume)
+ 1 Predictor (x = pressure)
+ 14 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 2 Parameters (b1 and b2)
+
+ y = b1*b2*x*((1+b2*x)**(-1)) + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 500 450 4.3736970754E+02 3.6489174345E+00
+ b2 = 0.0001 0.0003 3.0227324449E-04 2.9334354479E-06
+
+Residual Sum of Squares: 5.6419295283E-02
+Residual Standard Deviation: 6.8568272111E-02
+Degrees of Freedom: 12
+Number of Observations: 14
+
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 10.07E0 77.6E0
+ 14.73E0 114.9E0
+ 17.94E0 141.1E0
+ 23.93E0 190.8E0
+ 29.61E0 239.9E0
+ 35.18E0 289.0E0
+ 40.02E0 332.8E0
+ 44.82E0 378.4E0
+ 50.76E0 434.8E0
+ 55.05E0 477.3E0
+ 61.01E0 536.8E0
+ 66.40E0 593.1E0
+ 75.47E0 689.1E0
+ 81.78E0 760.0E0
diff --git a/examples/NIST_STRD/Models b/examples/NIST_STRD/Models
new file mode 100644
index 0000000..c4dacf4
--- /dev/null
+++ b/examples/NIST_STRD/Models
@@ -0,0 +1,215 @@
+Bennett5.dat:Model: Miscellaneous Class
+Bennett5.dat- 3 Parameters (b1 to b3)
+Bennett5.dat-
+Bennett5.dat- y = b1 * (b2+x)**(-1/b3) + e
+Bennett5.dat-
+Bennett5.dat-
+Bennett5.dat-
+--
+BoxBOD.dat:Model: Exponential Class
+BoxBOD.dat- 2 Parameters (b1 and b2)
+BoxBOD.dat-
+BoxBOD.dat- y = b1*(1-exp[-b2*x]) + e
+BoxBOD.dat-
+BoxBOD.dat-
+BoxBOD.dat-
+--
+Chwirut1.dat:Model: Exponential Class
+Chwirut1.dat- 3 Parameters (b1 to b3)
+Chwirut1.dat-
+Chwirut1.dat- y = exp[-b1*x]/(b2+b3*x) + e
+Chwirut1.dat-
+Chwirut1.dat-
+Chwirut1.dat-
+--
+Chwirut2.dat:Model: Exponential Class
+Chwirut2.dat- 3 Parameters (b1 to b3)
+Chwirut2.dat-
+Chwirut2.dat- y = exp(-b1*x)/(b2+b3*x) + e
+Chwirut2.dat-
+Chwirut2.dat-
+Chwirut2.dat-
+--
+DanWood.dat:Model: Miscellaneous Class
+DanWood.dat- 2 Parameters (b1 and b2)
+DanWood.dat-
+DanWood.dat- y = b1*x**b2 + e
+DanWood.dat-
+DanWood.dat-
+DanWood.dat-
+--
+ENSO.dat:Model: Miscellaneous Class
+ENSO.dat- 9 Parameters (b1 to b9)
+ENSO.dat-
+ENSO.dat- y = b1 + b2*cos( 2*pi*x/12 ) + b3*sin( 2*pi*x/12 )
+ENSO.dat- + b5*cos( 2*pi*x/b4 ) + b6*sin( 2*pi*x/b4 )
+ENSO.dat- + b8*cos( 2*pi*x/b7 ) + b9*sin( 2*pi*x/b7 ) + e
+ENSO.dat-
+--
+Eckerle4.dat:Model: Exponential Class
+Eckerle4.dat- 3 Parameters (b1 to b3)
+Eckerle4.dat-
+Eckerle4.dat- y = (b1/b2) * exp[-0.5*((x-b3)/b2)**2] + e
+Eckerle4.dat-
+Eckerle4.dat-
+Eckerle4.dat-
+--
+Gauss1.dat:Model: Exponential Class
+Gauss1.dat- 8 Parameters (b1 to b8)
+Gauss1.dat-
+Gauss1.dat- y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+Gauss1.dat- + b6*exp( -(x-b7)**2 / b8**2 ) + e
+Gauss1.dat-
+Gauss1.dat-
+--
+Gauss2.dat:Model: Exponential Class
+Gauss2.dat- 8 Parameters (b1 to b8)
+Gauss2.dat-
+Gauss2.dat- y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+Gauss2.dat- + b6*exp( -(x-b7)**2 / b8**2 ) + e
+Gauss2.dat-
+Gauss2.dat-
+--
+Gauss3.dat:Model: Exponential Class
+Gauss3.dat- 8 Parameters (b1 to b8)
+Gauss3.dat-
+Gauss3.dat- y = b1*exp( -b2*x ) + b3*exp( -(x-b4)**2 / b5**2 )
+Gauss3.dat- + b6*exp( -(x-b7)**2 / b8**2 ) + e
+Gauss3.dat-
+Gauss3.dat-
+--
+Hahn1.dat:Model: Rational Class (cubic/cubic)
+Hahn1.dat- 7 Parameters (b1 to b7)
+Hahn1.dat-
+Hahn1.dat- y = (b1+b2*x+b3*x**2+b4*x**3) /
+Hahn1.dat- (1+b5*x+b6*x**2+b7*x**3) + e
+Hahn1.dat-
+Hahn1.dat-
+--
+Kirby2.dat:Model: Rational Class (quadratic/quadratic)
+Kirby2.dat- 5 Parameters (b1 to b5)
+Kirby2.dat-
+Kirby2.dat- y = (b1 + b2*x + b3*x**2) /
+Kirby2.dat- (1 + b4*x + b5*x**2) + e
+Kirby2.dat-
+Kirby2.dat-
+--
+Lanczos1.dat:Model: Exponential Class
+Lanczos1.dat- 6 Parameters (b1 to b6)
+Lanczos1.dat-
+Lanczos1.dat- y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+Lanczos1.dat-
+Lanczos1.dat-
+Lanczos1.dat-
+--
+Lanczos2.dat:Model: Exponential Class
+Lanczos2.dat- 6 Parameters (b1 to b6)
+Lanczos2.dat-
+Lanczos2.dat- y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+Lanczos2.dat-
+Lanczos2.dat-
+Lanczos2.dat-
+--
+Lanczos3.dat:Model: Exponential Class
+Lanczos3.dat- 6 Parameters (b1 to b6)
+Lanczos3.dat-
+Lanczos3.dat- y = b1*exp(-b2*x) + b3*exp(-b4*x) + b5*exp(-b6*x) + e
+Lanczos3.dat-
+Lanczos3.dat-
+Lanczos3.dat-
+--
+MGH09.dat:Model: Rational Class (linear/quadratic)
+MGH09.dat- 4 Parameters (b1 to b4)
+MGH09.dat-
+MGH09.dat- y = b1*(x**2+x*b2) / (x**2+x*b3+b4) + e
+MGH09.dat-
+MGH09.dat-
+MGH09.dat-
+--
+MGH10.dat:Model: Exponential Class
+MGH10.dat- 3 Parameters (b1 to b3)
+MGH10.dat-
+MGH10.dat- y = b1 * exp[b2/(x+b3)] + e
+MGH10.dat-
+MGH10.dat-
+MGH10.dat-
+--
+MGH17.dat:Model: Exponential Class
+MGH17.dat- 5 Parameters (b1 to b5)
+MGH17.dat-
+MGH17.dat- y = b1 + b2*exp[-x*b4] + b3*exp[-x*b5] + e
+MGH17.dat-
+MGH17.dat-
+MGH17.dat-
+--
+Misra1a.dat:Model: Exponential Class
+Misra1a.dat- 2 Parameters (b1 and b2)
+Misra1a.dat-
+Misra1a.dat- y = b1*(1-exp[-b2*x]) + e
+Misra1a.dat-
+Misra1a.dat-
+Misra1a.dat-
+--
+Misra1b.dat:Model: Miscellaneous Class
+Misra1b.dat- 2 Parameters (b1 and b2)
+Misra1b.dat-
+Misra1b.dat- y = b1 * (1-(1+b2*x/2)**(-2)) + e
+Misra1b.dat-
+Misra1b.dat-
+Misra1b.dat-
+--
+Misra1c.dat:Model: Miscellaneous Class
+Misra1c.dat- 2 Parameters (b1 and b2)
+Misra1c.dat-
+Misra1c.dat- y = b1 * (1-(1+2*b2*x)**(-.5)) + e
+Misra1c.dat-
+Misra1c.dat-
+Misra1c.dat-
+--
+Misra1d.dat:Model: Miscellaneous Class
+Misra1d.dat- 2 Parameters (b1 and b2)
+Misra1d.dat-
+Misra1d.dat- y = b1*b2*x*((1+b2*x)**(-1)) + e
+Misra1d.dat-
+Misra1d.dat-
+Misra1d.dat-
+--
+Nelson.dat:Model: Exponential Class
+Nelson.dat- 3 Parameters (b1 to b3)
+Nelson.dat-
+Nelson.dat- log[y] = b1 - b2*x1 * exp[-b3*x2] + e
+Nelson.dat-
+Nelson.dat-
+Nelson.dat-
+--
+Rat42.dat:Model: Exponential Class
+Rat42.dat- 3 Parameters (b1 to b3)
+Rat42.dat-
+Rat42.dat- y = b1 / (1+exp[b2-b3*x]) + e
+Rat42.dat-
+Rat42.dat-
+Rat42.dat-
+--
+Rat43.dat:Model: Exponential Class
+Rat43.dat- 4 Parameters (b1 to b4)
+Rat43.dat-
+Rat43.dat- y = b1 / ((1+exp[b2-b3*x])**(1/b4)) + e
+Rat43.dat-
+Rat43.dat-
+Rat43.dat-
+--
+Roszman1.dat:Model: Miscellaneous Class
+Roszman1.dat- 4 Parameters (b1 to b4)
+Roszman1.dat-
+Roszman1.dat- pi = 3.141592653589793238462643383279E0
+Roszman1.dat- y = b1 - b2*x - arctan[b3/(x-b4)]/pi + e
+Roszman1.dat-
+Roszman1.dat-
+--
+Thurber.dat:Model: Rational Class (cubic/cubic)
+Thurber.dat- 7 Parameters (b1 to b7)
+Thurber.dat-
+Thurber.dat- y = (b1 + b2*x + b3*x**2 + b4*x**3) /
+Thurber.dat- (1 + b5*x + b6*x**2 + b7*x**3) + e
+Thurber.dat-
+Thurber.dat-
diff --git a/examples/NIST_STRD/Nelson.dat b/examples/NIST_STRD/Nelson.dat
new file mode 100644
index 0000000..a6dc9e2
--- /dev/null
+++ b/examples/NIST_STRD/Nelson.dat
@@ -0,0 +1,188 @@
+NIST/ITL StRD
+Dataset Name: Nelson (Nelson.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 188)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a study involving
+ the analysis of performance degradation data from
+ accelerated tests, published in IEEE Transactions
+ on Reliability. The response variable is dialectric
+ breakdown strength in kilo-volts, and the predictor
+ variables are time in weeks and temperature in degrees
+ Celcius.
+
+
+Reference: Nelson, W. (1981).
+ Analysis of Performance-Degradation Data.
+ IEEE Transactions on Reliability.
+ Vol. 2, R-30, No. 2, pp. 149-155.
+
+Data: 1 Response ( y = dialectric breakdown strength)
+ 2 Predictors (x1 = time; x2 = temperature)
+ 128 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ log[y] = b1 - b2*x1 * exp[-b3*x2] + e
+
+
+
+ Starting values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 2 2.5 2.5906836021E+00 1.9149996413E-02
+ b2 = 0.0001 0.000000005 5.6177717026E-09 6.1124096540E-09
+ b3 = -0.01 -0.05 -5.7701013174E-02 3.9572366543E-03
+
+Residual Sum of Squares: 3.7976833176E+00
+Residual Standard Deviation: 1.7430280130E-01
+Degrees of Freedom: 125
+Number of Observations: 128
+
+
+
+
+
+
+
+
+
+
+
+Data: y x1 x2
+ 15.00E0 1E0 180E0
+ 17.00E0 1E0 180E0
+ 15.50E0 1E0 180E0
+ 16.50E0 1E0 180E0
+ 15.50E0 1E0 225E0
+ 15.00E0 1E0 225E0
+ 16.00E0 1E0 225E0
+ 14.50E0 1E0 225E0
+ 15.00E0 1E0 250E0
+ 14.50E0 1E0 250E0
+ 12.50E0 1E0 250E0
+ 11.00E0 1E0 250E0
+ 14.00E0 1E0 275E0
+ 13.00E0 1E0 275E0
+ 14.00E0 1E0 275E0
+ 11.50E0 1E0 275E0
+ 14.00E0 2E0 180E0
+ 16.00E0 2E0 180E0
+ 13.00E0 2E0 180E0
+ 13.50E0 2E0 180E0
+ 13.00E0 2E0 225E0
+ 13.50E0 2E0 225E0
+ 12.50E0 2E0 225E0
+ 12.50E0 2E0 225E0
+ 12.50E0 2E0 250E0
+ 12.00E0 2E0 250E0
+ 11.50E0 2E0 250E0
+ 12.00E0 2E0 250E0
+ 13.00E0 2E0 275E0
+ 11.50E0 2E0 275E0
+ 13.00E0 2E0 275E0
+ 12.50E0 2E0 275E0
+ 13.50E0 4E0 180E0
+ 17.50E0 4E0 180E0
+ 17.50E0 4E0 180E0
+ 13.50E0 4E0 180E0
+ 12.50E0 4E0 225E0
+ 12.50E0 4E0 225E0
+ 15.00E0 4E0 225E0
+ 13.00E0 4E0 225E0
+ 12.00E0 4E0 250E0
+ 13.00E0 4E0 250E0
+ 12.00E0 4E0 250E0
+ 13.50E0 4E0 250E0
+ 10.00E0 4E0 275E0
+ 11.50E0 4E0 275E0
+ 11.00E0 4E0 275E0
+ 9.50E0 4E0 275E0
+ 15.00E0 8E0 180E0
+ 15.00E0 8E0 180E0
+ 15.50E0 8E0 180E0
+ 16.00E0 8E0 180E0
+ 13.00E0 8E0 225E0
+ 10.50E0 8E0 225E0
+ 13.50E0 8E0 225E0
+ 14.00E0 8E0 225E0
+ 12.50E0 8E0 250E0
+ 12.00E0 8E0 250E0
+ 11.50E0 8E0 250E0
+ 11.50E0 8E0 250E0
+ 6.50E0 8E0 275E0
+ 5.50E0 8E0 275E0
+ 6.00E0 8E0 275E0
+ 6.00E0 8E0 275E0
+ 18.50E0 16E0 180E0
+ 17.00E0 16E0 180E0
+ 15.30E0 16E0 180E0
+ 16.00E0 16E0 180E0
+ 13.00E0 16E0 225E0
+ 14.00E0 16E0 225E0
+ 12.50E0 16E0 225E0
+ 11.00E0 16E0 225E0
+ 12.00E0 16E0 250E0
+ 12.00E0 16E0 250E0
+ 11.50E0 16E0 250E0
+ 12.00E0 16E0 250E0
+ 6.00E0 16E0 275E0
+ 6.00E0 16E0 275E0
+ 5.00E0 16E0 275E0
+ 5.50E0 16E0 275E0
+ 12.50E0 32E0 180E0
+ 13.00E0 32E0 180E0
+ 16.00E0 32E0 180E0
+ 12.00E0 32E0 180E0
+ 11.00E0 32E0 225E0
+ 9.50E0 32E0 225E0
+ 11.00E0 32E0 225E0
+ 11.00E0 32E0 225E0
+ 11.00E0 32E0 250E0
+ 10.00E0 32E0 250E0
+ 10.50E0 32E0 250E0
+ 10.50E0 32E0 250E0
+ 2.70E0 32E0 275E0
+ 2.70E0 32E0 275E0
+ 2.50E0 32E0 275E0
+ 2.40E0 32E0 275E0
+ 13.00E0 48E0 180E0
+ 13.50E0 48E0 180E0
+ 16.50E0 48E0 180E0
+ 13.60E0 48E0 180E0
+ 11.50E0 48E0 225E0
+ 10.50E0 48E0 225E0
+ 13.50E0 48E0 225E0
+ 12.00E0 48E0 225E0
+ 7.00E0 48E0 250E0
+ 6.90E0 48E0 250E0
+ 8.80E0 48E0 250E0
+ 7.90E0 48E0 250E0
+ 1.20E0 48E0 275E0
+ 1.50E0 48E0 275E0
+ 1.00E0 48E0 275E0
+ 1.50E0 48E0 275E0
+ 13.00E0 64E0 180E0
+ 12.50E0 64E0 180E0
+ 16.50E0 64E0 180E0
+ 16.00E0 64E0 180E0
+ 11.00E0 64E0 225E0
+ 11.50E0 64E0 225E0
+ 10.50E0 64E0 225E0
+ 10.00E0 64E0 225E0
+ 7.27E0 64E0 250E0
+ 7.50E0 64E0 250E0
+ 6.70E0 64E0 250E0
+ 7.60E0 64E0 250E0
+ 1.50E0 64E0 275E0
+ 1.00E0 64E0 275E0
+ 1.20E0 64E0 275E0
+ 1.20E0 64E0 275E0
diff --git a/examples/NIST_STRD/Rat42.dat b/examples/NIST_STRD/Rat42.dat
new file mode 100644
index 0000000..5468df8
--- /dev/null
+++ b/examples/NIST_STRD/Rat42.dat
@@ -0,0 +1,69 @@
+NIST/ITL StRD
+Dataset Name: Rat42 (Rat42.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 43)
+ Certified Values (lines 41 to 48)
+ Data (lines 61 to 69)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This model and data are an example of fitting
+ sigmoidal growth curves taken from Ratkowsky (1983).
+ The response variable is pasture yield, and the
+ predictor variable is growing time.
+
+
+Reference: Ratkowsky, D.A. (1983).
+ Nonlinear Regression Modeling.
+ New York, NY: Marcel Dekker, pp. 61 and 88.
+
+
+
+
+
+Data: 1 Response (y = pasture yield)
+ 1 Predictor (x = growing time)
+ 9 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 3 Parameters (b1 to b3)
+
+ y = b1 / (1+exp[b2-b3*x]) + e
+
+
+
+ Starting Values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 100 75 7.2462237576E+01 1.7340283401E+00
+ b2 = 1 2.5 2.6180768402E+00 8.8295217536E-02
+ b3 = 0.1 0.07 6.7359200066E-02 3.4465663377E-03
+
+Residual Sum of Squares: 8.0565229338E+00
+Residual Standard Deviation: 1.1587725499E+00
+Degrees of Freedom: 6
+Number of Observations: 9
+
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 8.930E0 9.000E0
+ 10.800E0 14.000E0
+ 18.590E0 21.000E0
+ 22.330E0 28.000E0
+ 39.350E0 42.000E0
+ 56.110E0 57.000E0
+ 61.730E0 63.000E0
+ 64.620E0 70.000E0
+ 67.080E0 79.000E0
diff --git a/examples/NIST_STRD/Rat43.dat b/examples/NIST_STRD/Rat43.dat
new file mode 100644
index 0000000..ca6d1dc
--- /dev/null
+++ b/examples/NIST_STRD/Rat43.dat
@@ -0,0 +1,75 @@
+NIST/ITL StRD
+Dataset Name: Rat43 (Rat43.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 44)
+ Certified Values (lines 41 to 49)
+ Data (lines 61 to 75)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: This model and data are an example of fitting
+ sigmoidal growth curves taken from Ratkowsky (1983).
+ The response variable is the dry weight of onion bulbs
+ and tops, and the predictor variable is growing time.
+
+
+Reference: Ratkowsky, D.A. (1983).
+ Nonlinear Regression Modeling.
+ New York, NY: Marcel Dekker, pp. 62 and 88.
+
+
+
+
+
+Data: 1 Response (y = onion bulb dry weight)
+ 1 Predictor (x = growing time)
+ 15 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Exponential Class
+ 4 Parameters (b1 to b4)
+
+ y = b1 / ((1+exp[b2-b3*x])**(1/b4)) + e
+
+
+
+ Starting Values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 100 700 6.9964151270E+02 1.6302297817E+01
+ b2 = 10 5 5.2771253025E+00 2.0828735829E+00
+ b3 = 1 0.75 7.5962938329E-01 1.9566123451E-01
+ b4 = 1 1.3 1.2792483859E+00 6.8761936385E-01
+
+Residual Sum of Squares: 8.7864049080E+03
+Residual Standard Deviation: 2.8262414662E+01
+Degrees of Freedom: 9
+Number of Observations: 15
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 16.08E0 1.0E0
+ 33.83E0 2.0E0
+ 65.80E0 3.0E0
+ 97.20E0 4.0E0
+ 191.55E0 5.0E0
+ 326.20E0 6.0E0
+ 386.87E0 7.0E0
+ 520.53E0 8.0E0
+ 590.03E0 9.0E0
+ 651.92E0 10.0E0
+ 724.93E0 11.0E0
+ 699.56E0 12.0E0
+ 689.96E0 13.0E0
+ 637.56E0 14.0E0
+ 717.41E0 15.0E0
diff --git a/examples/NIST_STRD/Roszman1.dat b/examples/NIST_STRD/Roszman1.dat
new file mode 100644
index 0000000..ddab210
--- /dev/null
+++ b/examples/NIST_STRD/Roszman1.dat
@@ -0,0 +1,85 @@
+NIST/ITL StRD
+Dataset Name: Roszman1 (Roszman1.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 44)
+ Certified Values (lines 41 to 49)
+ Data (lines 61 to 85)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ quantum defects in iodine atoms. The response
+ variable is the number of quantum defects, and the
+ predictor variable is the excited energy state.
+ The argument to the ARCTAN function is in radians.
+
+Reference: Roszman, L., NIST (19??).
+ Quantum Defects for Sulfur I Atom.
+
+
+
+
+
+
+Data: 1 Response (y = quantum defect)
+ 1 Predictor (x = excited state energy)
+ 25 Observations
+ Average Level of Difficulty
+ Observed Data
+
+Model: Miscellaneous Class
+ 4 Parameters (b1 to b4)
+
+ pi = 3.141592653589793238462643383279E0
+ y = b1 - b2*x - arctan[b3/(x-b4)]/pi + e
+
+
+ Starting Values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 0.1 0.2 2.0196866396E-01 1.9172666023E-02
+ b2 = -0.00001 -0.000005 -6.1953516256E-06 3.2058931691E-06
+ b3 = 1000 1200 1.2044556708E+03 7.4050983057E+01
+ b4 = -100 -150 -1.8134269537E+02 4.9573513849E+01
+
+Residual Sum of Squares: 4.9484847331E-04
+Residual Standard Deviation: 4.8542984060E-03
+Degrees of Freedom: 21
+Number of Observations: 25
+
+
+
+
+
+
+
+
+
+
+Data: y x
+ 0.252429 -4868.68
+ 0.252141 -4868.09
+ 0.251809 -4867.41
+ 0.297989 -3375.19
+ 0.296257 -3373.14
+ 0.295319 -3372.03
+ 0.339603 -2473.74
+ 0.337731 -2472.35
+ 0.333820 -2469.45
+ 0.389510 -1894.65
+ 0.386998 -1893.40
+ 0.438864 -1497.24
+ 0.434887 -1495.85
+ 0.427893 -1493.41
+ 0.471568 -1208.68
+ 0.461699 -1206.18
+ 0.461144 -1206.04
+ 0.513532 -997.92
+ 0.506641 -996.61
+ 0.505062 -996.31
+ 0.535648 -834.94
+ 0.533726 -834.66
+ 0.568064 -710.03
+ 0.612886 -530.16
+ 0.624169 -464.17
diff --git a/examples/NIST_STRD/Thurber.dat b/examples/NIST_STRD/Thurber.dat
new file mode 100644
index 0000000..6ecdc77
--- /dev/null
+++ b/examples/NIST_STRD/Thurber.dat
@@ -0,0 +1,97 @@
+NIST/ITL StRD
+Dataset Name: Thurber (Thurber.dat)
+
+File Format: ASCII
+ Starting Values (lines 41 to 47)
+ Certified Values (lines 41 to 52)
+ Data (lines 61 to 97)
+
+Procedure: Nonlinear Least Squares Regression
+
+Description: These data are the result of a NIST study involving
+ semiconductor electron mobility. The response
+ variable is a measure of electron mobility, and the
+ predictor variable is the natural log of the density.
+
+
+Reference: Thurber, R., NIST (197?).
+ Semiconductor electron mobility modeling.
+
+
+
+
+
+
+Data: 1 Response Variable (y = electron mobility)
+ 1 Predictor Variable (x = log[density])
+ 37 Observations
+ Higher Level of Difficulty
+ Observed Data
+
+Model: Rational Class (cubic/cubic)
+ 7 Parameters (b1 to b7)
+
+ y = (b1 + b2*x + b3*x**2 + b4*x**3) /
+ (1 + b5*x + b6*x**2 + b7*x**3) + e
+
+
+ Starting Values Certified Values
+
+ Start 1 Start 2 Parameter Standard Deviation
+ b1 = 1000 1300 1.2881396800E+03 4.6647963344E+00
+ b2 = 1000 1500 1.4910792535E+03 3.9571156086E+01
+ b3 = 400 500 5.8323836877E+02 2.8698696102E+01
+ b4 = 40 75 7.5416644291E+01 5.5675370270E+00
+ b5 = 0.7 1 9.6629502864E-01 3.1333340687E-02
+ b6 = 0.3 0.4 3.9797285797E-01 1.4984928198E-02
+ b7 = 0.03 0.05 4.9727297349E-02 6.5842344623E-03
+
+Residual Sum of Squares: 5.6427082397E+03
+Residual Standard Deviation: 1.3714600784E+01
+Degrees of Freedom: 30
+Number of Observations: 37
+
+
+
+
+
+
+
+Data: y x
+ 80.574E0 -3.067E0
+ 84.248E0 -2.981E0
+ 87.264E0 -2.921E0
+ 87.195E0 -2.912E0
+ 89.076E0 -2.840E0
+ 89.608E0 -2.797E0
+ 89.868E0 -2.702E0
+ 90.101E0 -2.699E0
+ 92.405E0 -2.633E0
+ 95.854E0 -2.481E0
+ 100.696E0 -2.363E0
+ 101.060E0 -2.322E0
+ 401.672E0 -1.501E0
+ 390.724E0 -1.460E0
+ 567.534E0 -1.274E0
+ 635.316E0 -1.212E0
+ 733.054E0 -1.100E0
+ 759.087E0 -1.046E0
+ 894.206E0 -0.915E0
+ 990.785E0 -0.714E0
+ 1090.109E0 -0.566E0
+ 1080.914E0 -0.545E0
+ 1122.643E0 -0.400E0
+ 1178.351E0 -0.309E0
+ 1260.531E0 -0.109E0
+ 1273.514E0 -0.103E0
+ 1288.339E0 0.010E0
+ 1327.543E0 0.119E0
+ 1353.863E0 0.377E0
+ 1414.509E0 0.790E0
+ 1425.208E0 0.963E0
+ 1421.384E0 1.006E0
+ 1442.962E0 1.115E0
+ 1464.350E0 1.572E0
+ 1468.705E0 1.841E0
+ 1447.894E0 2.047E0
+ 1457.628E0 2.200E0
diff --git a/examples/example_anneal.py b/examples/example_anneal.py
new file mode 100644
index 0000000..e59fa09
--- /dev/null
+++ b/examples/example_anneal.py
@@ -0,0 +1,63 @@
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+
+from lmfit import Parameters, minimize
+try:
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+p_true = Parameters()
+p_true.add('amp', value=14.0)
+p_true.add('period', value=5.33)
+p_true.add('shift', value=0.123)
+p_true.add('decay', value=0.010)
+
+def residual(pars, x, data=None):
+ amp = pars['amp'].value
+ per = pars['period'].value
+ shift = pars['shift'].value
+ decay = pars['decay'].value
+
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+ model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return (model - data)
+
+n = 2500
+xmin = 0.
+xmax = 250.0
+noise = random.normal(scale=0.7215, size=n)
+x = linspace(xmin, xmax, n)
+data = residual(p_true, x) + noise
+
+fit_params = Parameters()
+fit_params.add('amp', value=13.0, min=-5, max=40)
+fit_params.add('period', value=2, min=0, max=7)
+fit_params.add('shift', value=0.0, min=-1.5, max=1.5)
+fit_params.add('decay', value=0.02, min=0, max=1.0)
+#p_true.add('amp', value=14.0)
+#p_true.add('period', value=5.33)
+#p_true.add('shift', value=0.123)
+#p_true.add('decay', value=0.010)
+
+out = minimize(residual, fit_params, method='anneal',
+ Tf= 1000,
+ args=(x,), kws={'data':data})
+
+print out.sa_out
+for key, par in fit_params.items():
+ print key, par, p_true[key].value
+
+
+if HASPYLAB:
+ pylab.plot(x, data, 'ro')
+ pylab.plot(x, fit, 'b')
+ pylab.show()
+
+
+
+
+
diff --git a/examples/example_ci.py b/examples/example_ci.py
new file mode 100644
index 0000000..8899b8e
--- /dev/null
+++ b/examples/example_ci.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Sun Apr 15 19:47:45 2012
+
+ at author: Tillsten
+"""
+import numpy as np
+from lmfit import Parameters, minimize, conf_interval, report_fit, report_ci
+
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+from scipy.optimize import leastsq
+
+try:
+ import matplotlib.pyplot as plt
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+
+p_true = Parameters()
+p_true.add('amp', value=14.0)
+p_true.add('period', value=5.33)
+p_true.add('shift', value=0.123)
+p_true.add('decay', value=0.010)
+
+def residual(pars, x, data=None):
+ amp = pars['amp'].value
+ per = pars['period'].value
+ shift = pars['shift'].value
+ decay = pars['decay'].value
+
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+ model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return (model - data)
+
+n = 2500
+xmin = 0.
+xmax = 250.0
+noise = random.normal(scale=0.7215, size=n)
+x = linspace(xmin, xmax, n)
+data = residual(p_true, x) + noise
+
+fit_params = Parameters()
+fit_params.add('amp', value=13.0)
+fit_params.add('period', value=2)
+fit_params.add('shift', value=0.0)
+fit_params.add('decay', value=0.02)
+
+out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+
+fit = residual(fit_params, x)
+
+print( ' N fev = ', out.nfev)
+print( out.chisqr, out.redchi, out.nfree)
+
+report_fit(fit_params)
+#ci=calc_ci(out)
+ci, tr = conf_interval(out, trace=True)
+report_ci(ci)
+
+if HASPYLAB:
+ names=fit_params.keys()
+ i=0
+ gs=pylab.GridSpec(4,4)
+ sx={}
+ sy={}
+ for fixed in names:
+ j=0
+ for free in names:
+ if j in sx and i in sy:
+ ax=pylab.subplot(gs[i,j],sharex=sx[j],sharey=sy[i])
+ elif i in sy:
+ ax=pylab.subplot(gs[i,j],sharey=sy[i])
+ sx[j]=ax
+ elif j in sx:
+ ax=pylab.subplot(gs[i,j],sharex=sx[j])
+ sy[i]=ax
+ else:
+ ax=pylab.subplot(gs[i,j])
+ sy[i]=ax
+ sx[j]=ax
+ if i<3:
+ pylab.setp( ax.get_xticklabels(), visible=False)
+ else:
+ ax.set_xlabel(free)
+
+ if j>0:
+ pylab.setp( ax.get_yticklabels(), visible=False)
+ else:
+ ax.set_ylabel(fixed)
+
+ res=tr[fixed]
+ prob=res['prob']
+ f=prob<0.96
+
+ x,y=res[free], res[fixed]
+ ax.scatter(x[f],y[f],
+ c=1-prob[f],s=200*(1-prob[f]+0.5))
+ ax.autoscale(1,1)
+
+
+
+ j=j+1
+ i=i+1
+
+ pylab.show()
+
+
diff --git a/examples/example_ci2.py b/examples/example_ci2.py
new file mode 100644
index 0000000..4d2f2ab
--- /dev/null
+++ b/examples/example_ci2.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+
+from lmfit import Parameters, Minimizer, conf_interval, conf_interval2d, minimize
+import numpy as np
+from scipy.interpolate import interp1d
+
+try:
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+np.random.seed(1)
+
+p_true = Parameters()
+p_true.add('amp', value=14.0)
+p_true.add('decay', value=0.010)
+p_true.add('amp2', value=-10.0)
+p_true.add('decay2', value=0.050)
+
+
+def residual(pars, x, data=None):
+ amp = pars['amp'].value
+ decay = pars['decay'].value
+ amp2 = pars['amp2'].value
+ decay2 = pars['decay2'].value
+
+
+ model = amp*np.exp(-x*decay)+amp2*np.exp(-x*decay2)
+ if data is None:
+ return model
+ return (model - data)
+
+n = 200
+xmin = 0.
+xmax = 250.0
+noise = np.random.normal(scale=0.7215, size=n)
+x = np.linspace(xmin, xmax, n)
+data = residual(p_true, x) + noise
+
+fit_params = Parameters()
+fit_params.add('amp', value=14.0)
+fit_params.add('decay', value=0.010)
+fit_params.add('amp2', value=-10.0)
+fit_params.add('decay2', value=0.050)
+
+out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+out.leastsq()
+ci, trace = conf_interval(out, trace=True)
+
+
+names=fit_params.keys()
+
+if HASPYLAB:
+ pylab.rcParams['font.size']=8
+ pylab.plot(x,data)
+ pylab.figure()
+ cm=pylab.cm.coolwarm
+ for i in range(4):
+ for j in range(4):
+ pylab.subplot(4,4,16-j*4-i)
+ if i!=j:
+ x,y,m = conf_interval2d(out,names[i],names[j],20,20)
+ pylab.contourf(x,y,m,np.linspace(0,1,10),cmap=cm)
+ pylab.xlabel(names[i])
+ pylab.ylabel(names[j])
+
+ x=trace[names[i]][names[i]]
+ y=trace[names[i]][names[j]]
+ pr=trace[names[i]]['prob']
+ s=np.argsort(x)
+ pylab.scatter(x[s],y[s],c=pr[s],s=30,lw=1, cmap=cm)
+ else:
+ x=trace[names[i]][names[i]]
+ y=trace[names[i]]['prob']
+
+ t,s=np.unique(x,True)
+ f=interp1d(t,y[s],'slinear')
+ xn=np.linspace(x.min(),x.max(),50)
+ pylab.plot(xn,f(xn),'g',lw=1)
+ pylab.xlabel(names[i])
+ pylab.ylabel('prob')
+
+ pylab.show()
+
+
+
+
+
+
diff --git a/examples/example_covar.py b/examples/example_covar.py
new file mode 100644
index 0000000..1152b05
--- /dev/null
+++ b/examples/example_covar.py
@@ -0,0 +1,93 @@
+import sys
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+from scipy.optimize import leastsq
+
+from lmfit import Parameters, Minimizer, report_fit
+from lmfit.utilfuncs import gauss, loren, pvoigt
+
+try:
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+HASPYLAB = False
+
+def residual(pars, x, sigma=None, data=None):
+ yg = gauss(x, pars['amp_g'].value,
+ pars['cen_g'].value, pars['wid_g'].value)
+
+ slope = pars['line_slope'].value
+ offset = pars['line_off'].value
+ model = yg + offset + x * slope
+ if data is None:
+ return model
+ if sigma is None:
+ return (model - data)
+
+ return (model - data)/sigma
+
+
+n = 201
+xmin = 0.
+xmax = 20.0
+x = linspace(xmin, xmax, n)
+
+p_true = Parameters()
+p_true.add('amp_g', value=21.0)
+p_true.add('cen_g', value=8.1)
+p_true.add('wid_g', value=1.6)
+p_true.add('line_off', value=-1.023)
+p_true.add('line_slope', value=0.62)
+
+data = (gauss(x, p_true['amp_g'].value, p_true['cen_g'].value,
+ p_true['wid_g'].value) +
+ random.normal(scale=0.23, size=n) +
+ x*p_true['line_slope'].value + p_true['line_off'].value )
+
+if HASPYLAB:
+ pylab.plot(x, data, 'r+')
+
+p_fit = Parameters()
+p_fit.add('amp_g', value=10.0)
+p_fit.add('cen_g', value=9)
+p_fit.add('wid_g', value=1)
+p_fit.add('line_slope', value=0.0)
+p_fit.add('line_off', value=0.0)
+
+myfit = Minimizer(residual, p_fit,
+ fcn_args=(x,),
+ fcn_kws={'sigma':0.2, 'data':data})
+
+myfit.prepare_fit()
+#
+for scale_covar in (True, False):
+ myfit.scale_covar = scale_covar
+ print ' ==== scale_covar = ', myfit.scale_covar, ' ==='
+ for sigma in (0.1, 0.2, 0.23, 0.5):
+ myfit.userkws['sigma'] = sigma
+
+ p_fit['amp_g'].value = 10
+ p_fit['cen_g'].value = 9
+ p_fit['wid_g'].value = 1
+ p_fit['line_slope'].value =0.0
+ p_fit['line_off'].value =0.0
+
+ myfit.leastsq()
+ print ' sigma = ', sigma
+ print ' chisqr = ', myfit.chisqr
+ print ' reduced_chisqr = ', myfit.redchi
+
+ report_fit(p_fit, modelpars=p_true, show_correl=False)
+ print ' =============================='
+
+
+# if HASPYLAB:
+# fit = residual(p_fit, x)
+# pylab.plot(x, fit, 'k-')
+# pylab.show()
+#
+
+
+
+
diff --git a/examples/example_derivfunc.py b/examples/example_derivfunc.py
new file mode 100644
index 0000000..32b87e7
--- /dev/null
+++ b/examples/example_derivfunc.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+from lmfit import Parameters, Minimizer
+import numpy as np
+
+try:
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+def func(pars, x, data=None):
+ a = pars['a'].value
+ b = pars['b'].value
+ c = pars['c'].value
+
+ model=a * np.exp(-b * x)+c
+ if data is None:
+ return model
+ return (model - data)
+
+def dfunc(pars, x, data=None):
+ a = pars['a'].value
+ b = pars['b'].value
+ c = pars['c'].value
+ v = np.exp(-b*x)
+ return [v, -a*x*v, np.ones(len(x))]
+
+def f(var, x):
+ return var[0]* np.exp(-var[1] * x)+var[2]
+
+params1 = Parameters()
+params1.add('a', value=10)
+params1.add('b', value=10)
+params1.add('c', value=10)
+
+params2 = Parameters()
+params2.add('a', value=10)
+params2.add('b', value=10)
+params2.add('c', value=10)
+
+a, b, c = 2.5, 1.3, 0.8
+x = np.linspace(0,4,50)
+y = f([a, b, c], x)
+data = y + 0.15*np.random.normal(size=len(x))
+
+# fit without analytic derivative
+min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data':data})
+min1.leastsq()
+fit1 = func(params1, x)
+
+# fit with analytic derivative
+min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data':data})
+min2.leastsq(Dfun=dfunc, col_deriv=1)
+fit2 = func(params2, x)
+
+print '''Comparison of fit to exponential decay
+with and without analytic derivatives, to
+ model = a*exp(-b*x) + c
+for a = %.2f, b = %.2f, c = %.2f
+==============================================
+Statistic/Parameter| Without | With |
+----------------------------------------------
+N Function Calls | %3i | %3i |
+Chi-square | %.4f | %.4f |
+ a | %.4f | %.4f |
+ b | %.4f | %.4f |
+ c | %.4f | %.4f |
+----------------------------------------------
+''' % (a, b, c,
+ min1.nfev, min2.nfev,
+ min1.chisqr, min2.chisqr,
+ params1['a'].value, params2['a'].value,
+ params1['b'].value, params2['b'].value,
+ params1['c'].value, params2['c'].value )
+
+
+if HASPYLAB:
+ pylab.plot(x, data, 'ro')
+ pylab.plot(x, fit1, 'b')
+ pylab.plot(x, fit2, 'k')
+ pylab.show()
+
+
+
diff --git a/examples/example_lbfgsb.py b/examples/example_lbfgsb.py
new file mode 100644
index 0000000..772dea1
--- /dev/null
+++ b/examples/example_lbfgsb.py
@@ -0,0 +1,66 @@
+from lmfit import Parameters, minimize, report_fit
+
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+
+try:
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+p_true = Parameters()
+p_true.add('amp', value=14.0)
+p_true.add('period', value=5.33)
+p_true.add('shift', value=0.123)
+p_true.add('decay', value=0.010)
+
+def residual(pars, x, data=None):
+ amp = pars['amp'].value
+ per = pars['period'].value
+ shift = pars['shift'].value
+ decay = pars['decay'].value
+
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+ model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return (model - data)
+
+n = 2500
+xmin = 0.
+xmax = 250.0
+noise = random.normal(scale=0.7215, size=n)
+x = linspace(xmin, xmax, n)
+data = residual(p_true, x) + noise
+
+fit_params = Parameters()
+fit_params.add('amp', value=11.0, min=5, max=20)
+fit_params.add('period', value=5., min=1., max=7)
+fit_params.add('shift', value=.10, min=0.0, max=0.2)
+fit_params.add('decay', value=6.e-3, min=0, max=0.1)
+
+init = residual(fit_params, x)
+
+out = minimize(residual, fit_params, method='lbfgsb', args=(x,), kws={'data':data})
+
+fit = residual(fit_params, x)
+
+for name, par in fit_params.items():
+ nout = "%s:%s" % (name, ' '*(20-len(name)))
+ print "%s: %s (%s) " % (nout, par.value, p_true[name].value)
+
+#print out.chisqr, out.redchi, out.nfree
+#
+#report_fit(fit_params)
+
+if HASPYLAB:
+ pylab.plot(x, data, 'r--')
+ pylab.plot(x, init, 'k')
+ pylab.plot(x, fit, 'b')
+ pylab.show()
+
+
+
+
+
diff --git a/examples/example_peakmodel.py b/examples/example_peakmodel.py
new file mode 100644
index 0000000..ce0bfea
--- /dev/null
+++ b/examples/example_peakmodel.py
@@ -0,0 +1,33 @@
+"""
+Example using the built-in Peak-like models
+"""
+import numpy as np
+from lmfit.models1d import GaussianModel, LorentzianModel, VoigtModel
+import matplotlib.pyplot as plt
+
+x = np.linspace(0, 10, 101)
+
+sca = 1./(2.0*np.sqrt(2*np.pi))
+noise = 5e-2*np.random.randn(len(x))
+dat = 2.60 -0.04*x + 7.5 * np.exp(-(x-4.0)**2 / (2*0.35)**2) + noise
+
+mod = GaussianModel(background='linear')
+# mod = VoigtModel(background='linear')
+# mod = LorentzianModel(background='linear')
+
+mod.guess_starting_values(dat, x)
+
+
+plt.plot(x, dat)
+
+# initial guess
+plt.plot(x, mod.model(x=x) + mod.calc_background(x), 'r+')
+
+mod.fit(dat, x=x)
+
+print mod.fit_report()
+
+# best fit
+plt.plot(x, mod.model(x=x) + mod.calc_background(x))
+plt.show()
+
diff --git a/examples/example_stepmodel.py b/examples/example_stepmodel.py
new file mode 100644
index 0000000..9bfaad7
--- /dev/null
+++ b/examples/example_stepmodel.py
@@ -0,0 +1,28 @@
+import numpy as np
+from lmfit.models1d import StepModel
+
+import matplotlib.pyplot as plt
+
+x = np.linspace(0, 10, 201)
+dat = np.ones_like(x)
+dat[:48] = 0.0
+dat[48:77] = np.arange(77-48)/(77.0-48)
+dat = dat + 5e-2*np.random.randn(len(x))
+dat = 110.2 * dat + 12.0
+
+mod = StepModel(background='constant', form='erf') # linear') # 'atan')
+
+mod.guess_starting_values(dat, x)
+
+init = mod.model(x=x)+mod.calc_background(x)
+mod.fit(dat, x=x)
+
+print mod.fit_report()
+
+fit = mod.model(x=x)+mod.calc_background(x)
+
+plt.plot(x, dat)
+plt.plot(x, init, 'r+')
+plt.plot(x, fit)
+plt.show()
+
diff --git a/examples/fit1.py b/examples/fit1.py
new file mode 100644
index 0000000..edf278a
--- /dev/null
+++ b/examples/fit1.py
@@ -0,0 +1,63 @@
+from lmfit import Parameters, minimize, report_fit
+
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+from scipy.optimize import leastsq
+
+try:
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+p_true = Parameters()
+p_true.add('amp', value=14.0)
+p_true.add('period', value=5.33)
+p_true.add('shift', value=0.123)
+p_true.add('decay', value=0.010)
+
+def residual(pars, x, data=None):
+ amp = pars['amp'].value
+ per = pars['period'].value
+ shift = pars['shift'].value
+ decay = pars['decay'].value
+
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+ model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return (model - data)
+
+n = 2500
+xmin = 0.
+xmax = 250.0
+noise = random.normal(scale=0.7215, size=n)
+x = linspace(xmin, xmax, n)
+data = residual(p_true, x) + noise
+
+fit_params = Parameters()
+fit_params.add('amp', value=13.0)
+fit_params.add('period', value=2)
+fit_params.add('shift', value=0.0)
+fit_params.add('decay', value=0.02)
+
+out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+
+fit = residual(fit_params, x)
+
+print ' N fev = ', out.nfev
+print out.chisqr, out.redchi, out.nfree
+
+print '### Error Report:'
+report_fit(fit_params)
+
+
+if HASPYLAB:
+ pylab.plot(x, data, 'ro')
+ pylab.plot(x, fit, 'b')
+ pylab.show()
+
+
+
+
+
diff --git a/examples/fit_NIST.py b/examples/fit_NIST.py
new file mode 100644
index 0000000..bf1ceaf
--- /dev/null
+++ b/examples/fit_NIST.py
@@ -0,0 +1,163 @@
+from __future__ import print_function
+import sys
+import math
+
+from optparse import OptionParser
+
+
+try:
+ import matplotlib
+ matplotlib.use('WXAgg')
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+from lmfit import Parameters, minimize
+
+from NISTModels import Models, ReadNistData
+
+
+def ndig(a, b):
+ return int(0.5-math.log10(abs(abs(a)-abs(b))/abs(b)))
+
+def Compare_NIST_Results(DataSet, myfit, params, NISTdata):
+ print(' ======================================')
+ print(' %s: ' % DataSet)
+ print(' | Parameter Name | Value Found | Certified Value | # Matching Digits |')
+ print(' |----------------+----------------+------------------+-------------------|')
+
+ val_dig_min = 200
+ err_dig_min = 200
+ for i in range(NISTdata['nparams']):
+ parname = 'b%i' % (i+1)
+ par = params[parname]
+ thisval = par.value
+ certval = NISTdata['cert_values'][i]
+ vdig = ndig(thisval, certval)
+ pname = (parname + ' value ' + ' '*14)[:14]
+ print(' | %s | % -.7e | % -.7e | %2i |' % (pname, thisval, certval, vdig))
+ val_dig_min = min(val_dig_min, vdig)
+
+ thiserr = par.stderr
+ certerr = NISTdata['cert_stderr'][i]
+ if thiserr is not None:
+ edig = ndig(thiserr, certerr)
+ ename = (parname + ' stderr' + ' '*14)[:14]
+ print(' | %s | % -.7e | % -.7e | %2i |' % (ename, thiserr, certerr, edig))
+ err_dig_min = min(err_dig_min, edig)
+
+ print(' |----------------+----------------+------------------+-------------------|')
+ sumsq = NISTdata['sum_squares']
+ try:
+ chi2 = myfit.chisqr
+ print(' | Sum of Squares | %.7e | %.7e | %2i |' % (chi2, sumsq,
+ ndig(chi2, sumsq)))
+ except:
+ pass
+ print(' |----------------+----------------+------------------+-------------------|')
+ if err_dig_min < 199:
+ print(' Worst agreement: %i digits for value, %i digits for error ' % (val_dig_min, err_dig_min))
+ else:
+ print(' Worst agreement: %i digits' % (val_dig_min))
+ return val_dig_min
+
+def NIST_Test(DataSet, method='leastsq', start='start2', plot=True):
+
+ NISTdata = ReadNistData(DataSet)
+ resid, npar, dimx = Models[DataSet]
+ y = NISTdata['y']
+ x = NISTdata['x']
+
+ params = Parameters()
+ for i in range(npar):
+ pname = 'b%i' % (i+1)
+ cval = NISTdata['cert_values'][i]
+ cerr = NISTdata['cert_stderr'][i]
+ pval1 = NISTdata[start][i]
+ params.add(pname, value=pval1)
+
+
+ myfit = minimize(resid, params, method=method, args=(x,), kws={'y':y})
+
+
+ digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata)
+
+ if plot and HASPYLAB:
+ fit = -resid(params, x, )
+ pylab.plot(x, y, 'ro')
+ pylab.plot(x, fit, 'k+-')
+ pylab.show()
+
+ return digs > 2
+
+
+modelnames = []
+ms = ''
+for d in sorted(Models.keys()):
+ ms = ms + ' %s ' % d
+ if len(ms) > 55:
+ modelnames.append(ms)
+ ms = ' '
+modelnames.append(ms)
+modelnames = '\n'.join(modelnames)
+
+usage = """
+ === Test Fit to NIST StRD Models ===
+
+usage:
+------
+ python fit_NIST.py [options] Model Start
+
+where Start is either 'start1' or 'start2', for different
+starting values, and Model is one of
+
+ %s
+
+if Model = 'all', all models and starting values will be run.
+
+options:
+--------
+ -m name of fitting method. One of:
+ leastsq, nelder, powell, lbfgsb, bfgs,
+ tnc, cobyla, slsqp, cg, newto-cg
+ leastsq (Levenberg-Marquardt) is the default
+""" % modelnames
+
+############################
+parser = OptionParser(usage=usage, prog="fit-NIST.py")
+
+parser.add_option("-m", "--method", dest="method", metavar='METH',
+ default='leastsq', help="set method name, default = 'leastsq'")
+
+(opts, args) = parser.parse_args()
+dset = ''
+start = 'start1'
+if len(args) > 0:
+ dset = args[0]
+if len(args) > 1:
+ start = args[1]
+
+if dset.lower() == 'all':
+ tpass = 0
+ tfail = 0
+ failures = []
+ dsets = sorted(Models.keys())
+ for dset in dsets:
+ for start in ('start1', 'start2'):
+ if NIST_Test(dset, method=opts.method, start=start, plot=False):
+ tpass += 1
+ else:
+ tfail += 1
+ failures.append(" %s (starting at '%s')" % (dset, start))
+
+ print('--------------------------------------')
+ print(' Fit Method: %s ' % opts.method)
+ print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
+ print(' Tests Failed for:\n %s' % '\n '.join(failures))
+ print('--------------------------------------')
+elif dset not in Models:
+ print(usage)
+else:
+ NIST_Test(dset, method=opts.method, start=start, plot=True)
+
diff --git a/examples/fit_NIST_leastsq.py b/examples/fit_NIST_leastsq.py
new file mode 100644
index 0000000..525f562
--- /dev/null
+++ b/examples/fit_NIST_leastsq.py
@@ -0,0 +1,148 @@
+# from __future__ import print_function
+import sys
+import math
+
+try:
+ import matplotlib
+ matplotlib.use('WXAgg')
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+from scipy.optimize import leastsq, curve_fit
+
+from NISTModels import Models, ReadNistData
+
+
+def ndig(a, b):
+ return int(0.5-math.log10(abs(abs(a)-abs(b))/abs(b)))
+
+def Compare_NIST_Results(DataSet, params, NISTdata):
+ print(' ======================================')
+ print(' %s: ' % DataSet)
+ print(' | Parameter Name | Value Found | Certified Value | # Matching Digits |')
+ print( ' |----------------+----------------+------------------+-------------------|')
+
+ val_dig_min = 1000
+ err_dig_min = 1000
+ for i in range(NISTdata['nparams']):
+ parname = 'b%i' % (i+1)
+ par = params[parname]
+ thisval = par.value
+ certval = NISTdata['cert_values'][i]
+
+ thiserr = par.stderr
+ certerr = NISTdata['cert_stderr'][i]
+ vdig = ndig(thisval, certval)
+ edig = ndig(thiserr, certerr)
+
+ pname = (parname + ' value ' + ' '*14)[:14]
+ ename = (parname + ' stderr' + ' '*14)[:14]
+ print(' | %s | % -.7e | % -.7e | %2i |' % (pname, thisval, certval, vdig))
+ print(' | %s | % -.7e | % -.7e | %2i |' % (ename, thiserr, certerr, edig))
+
+ val_dig_min = min(val_dig_min, vdig)
+ err_dig_min = min(err_dig_min, edig)
+
+ print(' |----------------+----------------+------------------+-------------------|')
+ sumsq = NISTdata['sum_squares']
+ chi2 = 'xx' # myfit.chisqr
+ print(' | Sum of Squares | %.7e | %.7e | %2i |' % (chi2, sumsq,
+ ndig(chi2, sumsq)))
+ print(' |----------------+----------------+------------------+-------------------|')
+
+ print(' Worst agreement: %i digits for value, %i digits for error ' % (val_dig_min, err_dig_min))
+
+ return val_dig_min
+
+def NIST_Test(DataSet, start='start2', plot=True):
+
+ NISTdata = ReadNistData(DataSet)
+ resid, npar, dimx = Models[DataSet]
+ y = NISTdata['y']
+ x = NISTdata['x']
+
+ params = []
+ param_names = []
+ for i in range(npar):
+ pname = 'b%i' % (i+1)
+ cval = NISTdata['cert_values'][i]
+ cerr = NISTdata['cert_stderr'][i]
+ pval1 = NISTdata[start][i]
+ params.append(pval1)
+ param_names.append(pname)
+
+
+ # myfit = Minimizer(resid, params, fcn_args=(x,), fcn_kws={'y':y},
+ # scale_covar=True)
+ #
+ print 'lsout ', params
+ lsout = leastsq(resid, params, args=(x, y), full_output=True)
+
+ print 'lsout ', lsout
+ print params , len(x), len(y)
+
+ digs = Compare_NIST_Results(DataSet, params, NISTdata)
+
+ if plot and HASPYLAB:
+ fit = -resid(params, x, )
+ pylab.plot(x, y, 'r+-')
+ pylab.plot(x, fit, 'ko--')
+ pylab.show()
+
+ return digs > 2
+
+msg1 = """
+----- NIST StRD Models -----
+Select one of the Models listed below:
+and a starting point of 'start1' or 'start2'
+"""
+
+msg2 = """
+That is, use
+ python fit_NIST.py Bennet5 start1
+or go through all models and starting points with:
+ python fit_NIST.py all
+"""
+
+if __name__ == '__main__':
+ dset = 'Bennett5'
+ start = 'start2'
+ if len(sys.argv) < 2:
+ print(msg1)
+ out = ''
+ for d in sorted(Models.keys()):
+ out = out + ' %s ' % d
+ if len(out) > 55:
+ print( out)
+ out = ''
+ print(out)
+ print(msg2)
+
+ sys.exit()
+
+ if len(sys.argv) > 1:
+ dset = sys.argv[1]
+ if len(sys.argv) > 2:
+ start = sys.argv[2]
+ if dset.lower() == 'all':
+ tpass = 0
+ tfail = 0
+ failures = []
+ dsets = sorted(Models.keys())
+ for dset in dsets:
+ for start in ('start1', 'start2'):
+ if NIST_Test(dset, start=start, plot=False):
+ tpass += 1
+ else:
+ tfail += 1
+ failures.append(" %s (starting at '%s')" % (dset, start))
+
+ print('--------------------------------------')
+ print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
+ print(' Tests Failed for:\n %s' % '\n '.join(failures))
+ print('--------------------------------------')
+ else:
+ NIST_Test(dset, start=start, plot=True)
+
diff --git a/examples/fit_NIST_scipy_lmdif.py b/examples/fit_NIST_scipy_lmdif.py
new file mode 100644
index 0000000..453694f
--- /dev/null
+++ b/examples/fit_NIST_scipy_lmdif.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+"""This runs the same fits for the NIST StRD models but using
+plain old scipy.optimize and relying on no code from lmfit.
+In fact, it goes right down to using
+ scipy.optimize._minpack._lmdif()
+
+The tests only check best-fit value, not estimated uncertainties.
+Currently, not all tests pass.
+
+
+"""
+
+from __future__ import print_function
+import sys
+import math
+import numpy as np
+
+from scipy.optimize import _minpack
+
+from NISTModels import Models, ReadNistData
+
+try:
+ import matplotlib
+ matplotlib.use('WXAgg')
+ import pylab
+ HASPYLAB = True
+except IOError:
+ HASPYLAB = False
+
+
+def ndig(a, b):
+ return int(0.5-math.log10(abs(abs(a)-abs(b))/abs(b)))
+
+def Compare_NIST_Results(DataSet, vals, NISTdata):
+ #print(' ======================================')
+ print(' %s: ' % DataSet)
+ print(' | Parameter Name | Value Found | Certified Value | # Matching Digits |')
+ print(' |----------------+----------------+------------------+-------------------|')
+
+ val_dig_min = 1000
+ err_dig_min = 1000
+ for i in range(NISTdata['nparams']):
+ parname = 'b%i' % (i+1)
+ thisval = vals[i]
+ certval = NISTdata['cert_values'][i]
+
+ vdig = ndig(thisval, certval)
+
+ pname = (parname + ' value ' + ' '*14)[:14]
+ print(' | %s | % -.7e | % -.7e | %2i |' % (pname, thisval, certval, vdig))
+ val_dig_min = min(val_dig_min, vdig)
+
+ print(' |----------------+----------------+------------------+-------------------|')
+ print(' Worst agreement: %i digits for value ' % (val_dig_min))
+ return val_dig_min
+
+def NIST_Test(DataSet, start='start2', plot=True):
+
+ NISTdata = ReadNistData(DataSet)
+ resid, npar, dimx = Models[DataSet]
+ y = NISTdata['y']
+ x = NISTdata['x']
+
+ vals = []
+ for i in range(npar):
+ pname = 'b%i' % (i+1)
+ cval = NISTdata['cert_values'][i]
+ cerr = NISTdata['cert_stderr'][i]
+ pval1 = NISTdata[start][i]
+ vals.append(pval1)
+
+ maxfev = 2500 * (npar + 1)
+ factor = 100
+ xtol = 1.e-14
+ ftol = 1.e-14
+ epsfcn = 1.e-13
+ gtol = 1.e-14
+ diag = None
+ print(" Fit with: ", factor, xtol, ftol, gtol, epsfcn, diag)
+
+ _best, out, ier = _minpack._lmdif(resid, vals, (x, y), 1,
+ ftol, xtol, gtol,
+ maxfev, epsfcn, factor, diag)
+
+ digs = Compare_NIST_Results(DataSet, _best, NISTdata)
+
+ if plot and HASPYLAB:
+ fit = -resid(_best, x, )
+ pylab.plot(x, y, 'ro')
+ pylab.plot(x, fit, 'k+-')
+ pylab.show()
+
+ return digs > 2
+
+msg1 = """
+----- NIST StRD Models -----
+Select one of the Models listed below:
+and a starting point of 'start1' or 'start2'
+"""
+
+msg2 = """
+That is, use
+ python fit_NIST.py Bennett5 start1
+or go through all models and starting points with:
+ python fit_NIST.py all
+"""
+
+if __name__ == '__main__':
+ dset = 'Bennett5'
+ start = 'start2'
+ if len(sys.argv) < 2:
+ print(msg1)
+ out = ''
+ for d in sorted(Models.keys()):
+ out = out + ' %s ' % d
+ if len(out) > 55:
+ print( out)
+ out = ''
+ print(out)
+ print(msg2)
+
+ sys.exit()
+
+ if len(sys.argv) > 1:
+ dset = sys.argv[1]
+ if len(sys.argv) > 2:
+ start = sys.argv[2]
+ if dset.lower() == 'all':
+ tpass = 0
+ tfail = 0
+ failures = []
+ dsets = sorted(Models.keys())
+ for dset in dsets:
+ for start in ('start1', 'start2'):
+ if NIST_Test(dset, start=start, plot=False):
+ tpass += 1
+ else:
+ tfail += 1
+ failures.append(" %s (starting at '%s')" % (dset, start))
+
+ print('--------------------------------------')
+ print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
+ print(' Tests Failed for:\n %s' % '\n '.join(failures))
+ print('--------------------------------------')
+ else:
+ NIST_Test(dset, start=start, plot=True)
+
diff --git a/examples/fit_multi_datasets.py b/examples/fit_multi_datasets.py
new file mode 100644
index 0000000..13b22b9
--- /dev/null
+++ b/examples/fit_multi_datasets.py
@@ -0,0 +1,69 @@
+#
+# example fitting to multiple (simulated) data sets
+#
+import numpy as np
+import matplotlib.pyplot as plt
+from lmfit import minimize, Parameters, report_fit
+
+def gauss(x, amp, cen, sigma):
+ "basic gaussian"
+ return amp*np.exp(-(x-cen)**2/(2.*sigma**2))
+
+def gauss_dataset(params, i, x):
+ """calc gaussian from params for data set i
+ using simple, hardwired naming convention"""
+ amp = params['amp_%i' % (i+1)].value
+ cen = params['cen_%i' % (i+1)].value
+ sig = params['sig_%i' % (i+1)].value
+ return gauss(x, amp, cen, sig)
+
+def objective(params, x, data):
+ """ calculate total residual for fits to several data sets held
+ in a 2-D array, and modeled by Gaussian functions"""
+ ndata, nx = data.shape
+ resid = 0.0*data[:]
+ # make residual per data set
+ for i in range(ndata):
+ resid[i, :] = data[i, :] - gauss_dataset(params, i, x)
+ # now flatten this to a 1D array, as minimize() needs
+ return resid.flatten()
+
+# create 5 datasets
+x = np.linspace( -1, 2, 151)
+data = []
+for i in np.arange(5):
+ params = Parameters()
+ amp = 0.60 + 9.50*np.random.rand()
+ cen = -0.20 + 1.20*np.random.rand()
+ sig = 0.25 + 0.03*np.random.rand()
+ dat = gauss(x, amp, cen, sig) + np.random.normal(size=len(x), scale=0.1)
+ data.append(dat)
+
+# data has shape (5, 151)
+data = np.array(data)
+assert(data.shape) == (5, 151)
+
+# create 5 sets of parameters, one per data set
+fit_params = Parameters()
+for iy, y in enumerate(data):
+ fit_params.add( 'amp_%i' % (iy+1), value=0.5, min=0.0, max=200)
+ fit_params.add( 'cen_%i' % (iy+1), value=0.4, min=-2.0, max=2.0)
+ fit_params.add( 'sig_%i' % (iy+1), value=0.3, min=0.01, max=3.0)
+
+# but now constrain all values of sigma to have the same value
+# by assigning sig_2, sig_3, .. sig_5 to be equal to sig_1
+for iy in (2, 3, 4, 5):
+ fit_params['sig_%i' % iy].expr='sig_1'
+
+# run the global fit to all the data sets
+minimize(objective, fit_params, args=(x, data))
+report_fit(fit_params)
+
+# plot the data sets and fits
+plt.figure()
+for i in range(5):
+ y_fit = gauss_dataset(fit_params, i, x)
+ plt.plot(x, data[i, :], 'o', x, y_fit, '-')
+
+plt.show()
+
diff --git a/examples/fit_pvoigt.py b/examples/fit_pvoigt.py
new file mode 100644
index 0000000..8f828f7
--- /dev/null
+++ b/examples/fit_pvoigt.py
@@ -0,0 +1,99 @@
+import sys
+
+from lmfit import Parameters, Parameter, Minimizer, report_fit
+from lmfit.utilfuncs import gauss, loren, pvoigt
+
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+
+try:
+ import matplotlib
+ # matplotlib.use('WXAGG')
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+def per_iteration(pars, i, resid, x, *args, **kws):
+ if i < 10 or i % 10 == 0:
+ print( '====== Iteration ', i)
+ for p in pars.values():
+ print( p.name , p.value)
+
+def residual(pars, x, sigma=None, data=None):
+ yg = gauss(x, pars['amp_g'].value,
+ pars['cen_g'].value, pars['wid_g'].value)
+ yl = loren(x, pars['amp_l'].value,
+ pars['cen_l'].value, pars['wid_l'].value)
+
+ frac = pars['frac'].value
+ slope = pars['line_slope'].value
+ offset = pars['line_off'].value
+ model = (1-frac) * yg + frac * yl + offset + x * slope
+ if data is None:
+ return model
+ if sigma is None:
+ return (model - data)
+ return (model - data)/sigma
+
+
+n = 601
+xmin = 0.
+xmax = 20.0
+x = linspace(xmin, xmax, n)
+
+p_true = Parameters()
+p_true.add('amp_g', value=21.0)
+p_true.add('cen_g', value=8.1)
+p_true.add('wid_g', value=1.6)
+p_true.add('frac', value=0.37)
+p_true.add('line_off', value=-1.023)
+p_true.add('line_slope', value=0.62)
+
+data = (pvoigt(x, p_true['amp_g'].value, p_true['cen_g'].value,
+ p_true['wid_g'].value, p_true['frac'].value) +
+ random.normal(scale=0.23, size=n) +
+ x*p_true['line_slope'].value + p_true['line_off'].value )
+
+if HASPYLAB:
+ pylab.plot(x, data, 'r+')
+
+pfit = [Parameter(name='amp_g', value=10),
+ Parameter(name='amp_g', value=10.0),
+ Parameter(name='cen_g', value=9),
+ Parameter(name='wid_g', value=1),
+ Parameter(name='frac', value=0.50),
+ Parameter(name='amp_l', expr='amp_g'),
+ Parameter(name='cen_l', expr='cen_g'),
+ Parameter(name='wid_l', expr='wid_g'),
+ Parameter(name='line_slope', value=0.0),
+ Parameter(name='line_off', value=0.0)]
+
+sigma = 0.021 # estimate of data error (for all data points)
+
+myfit = Minimizer(residual, pfit, iter_cb=per_iteration,
+ fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
+ scale_covar=True)
+
+myfit.prepare_fit()
+init = residual(myfit.params, x)
+
+if HASPYLAB:
+ pylab.plot(x, init, 'b--')
+
+myfit.leastsq()
+
+print(' Nfev = ', myfit.nfev)
+print( myfit.chisqr, myfit.redchi, myfit.nfree)
+
+report_fit(myfit.params, modelpars=p_true)
+
+fit = residual(myfit.params, x)
+
+if HASPYLAB:
+ pylab.plot(x, fit, 'k-')
+ pylab.show()
+
+
+
+
+
diff --git a/examples/fit_pvoigt2.py b/examples/fit_pvoigt2.py
new file mode 100644
index 0000000..13df297
--- /dev/null
+++ b/examples/fit_pvoigt2.py
@@ -0,0 +1,87 @@
+import sys
+
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+
+from lmfit import Parameters, Parameter, Minimizer
+from lmfit.utilfuncs import gauss, loren, pvoigt
+from lmfit.printfuncs import report_fit
+
+try:
+ import matplotlib
+ matplotlib.use('WXAGG')
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+
+def residual(pars, x, sigma=None, data=None):
+ yg = gauss(x, pars['amp_g'].value,
+ pars['cen_g'].value, pars['wid_g'].value)
+ yl = loren(x, pars['amp_l'].value,
+ pars['cen_l'].value, pars['wid_l'].value)
+
+ slope = pars['line_slope'].value
+ offset = pars['line_off'].value
+ model = yg + yl + offset + x * slope
+ if data is None:
+ return model
+ if sigma is None:
+ return (model - data)
+ return (model - data)/sigma
+
+
+n = 601
+xmin = 0.
+xmax = 20.0
+x = linspace(xmin, xmax, n)
+
+data = (gauss(x, 21, 8.1, 1.2) +
+ loren(x, 10, 9.6, 2.4) +
+ random.normal(scale=0.23, size=n) +
+ x*0.5)
+
+
+if HASPYLAB:
+ pylab.plot(x, data, 'r+')
+
+pfit = [Parameter(name='amp_g', value=10),
+ Parameter(name='cen_g', value=9),
+ Parameter(name='wid_g', value=1),
+
+ Parameter(name='amp_tot', value=20),
+ Parameter(name='amp_l', expr='amp_tot - amp_g'),
+ Parameter(name='cen_l', expr='1.5+cen_g'),
+ Parameter(name='wid_l', expr='2*wid_g'),
+
+ Parameter(name='line_slope', value=0.0),
+ Parameter(name='line_off', value=0.0)]
+
+sigma = 0.021 # estimate of data error (for all data points)
+
+myfit = Minimizer(residual, pfit,
+ fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
+ scale_covar=True)
+
+myfit.prepare_fit()
+init = residual(myfit.params, x)
+
+if HASPYLAB:
+ pylab.plot(x, init, 'b--')
+
+myfit.leastsq()
+
+print(' Nfev = ', myfit.nfev)
+print( myfit.chisqr, myfit.redchi, myfit.nfree)
+
+report_fit(myfit.params)
+
+fit = residual(myfit.params, x)
+
+if HASPYLAB:
+ pylab.plot(x, fit, 'k-')
+ pylab.show()
+
+
+
+
diff --git a/examples/fit_pvoigt_NelderMead.py b/examples/fit_pvoigt_NelderMead.py
new file mode 100644
index 0000000..790ae9a
--- /dev/null
+++ b/examples/fit_pvoigt_NelderMead.py
@@ -0,0 +1,100 @@
+import sys
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+from lmfit import Parameters, Parameter, Minimizer, report_fit
+from lmfit.utilfuncs import gauss, loren, pvoigt
+
+
+try:
+ import matplotlib
+ matplotlib.use('WXAGG')
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+def per_iteration(pars, i, resid, x, *args, **kws):
+ if i < 10 or i % 10 == 0:
+ print( '====== Iteration ', i)
+ for p in pars.values():
+ print( p.name , p.value)
+
+def residual(pars, x, sigma=None, data=None):
+ yg = gauss(x, pars['amp_g'].value,
+ pars['cen_g'].value, pars['wid_g'].value)
+ yl = loren(x, pars['amp_l'].value,
+ pars['cen_l'].value, pars['wid_l'].value)
+
+ frac = pars['frac'].value
+ slope = pars['line_slope'].value
+ offset = pars['line_off'].value
+ model = (1-frac) * yg + frac * yl + offset + x * slope
+ if data is None:
+ return model
+ if sigma is None:
+ return (model - data)
+ return (model - data)/sigma
+
+
+n = 601
+xmin = 0.
+xmax = 20.0
+x = linspace(xmin, xmax, n)
+
+p_true = Parameters()
+p_true.add('amp_g', value=21.0)
+p_true.add('cen_g', value=8.1)
+p_true.add('wid_g', value=1.6)
+p_true.add('frac', value=0.37)
+p_true.add('line_off', value=-1.023)
+p_true.add('line_slope', value=0.62)
+
+data = (pvoigt(x, p_true['amp_g'].value, p_true['cen_g'].value,
+ p_true['wid_g'].value, p_true['frac'].value) +
+ random.normal(scale=0.23, size=n) +
+ x*p_true['line_slope'].value + p_true['line_off'].value )
+
+if HASPYLAB:
+ pylab.plot(x, data, 'r+')
+
+pfit = [Parameter(name='amp_g', value=10),
+ Parameter(name='amp_g', value=10.0),
+ Parameter(name='cen_g', value=9),
+ Parameter(name='wid_g', value=1),
+ Parameter(name='frac', value=0.50),
+ Parameter(name='amp_l', expr='amp_g'),
+ Parameter(name='cen_l', expr='cen_g'),
+ Parameter(name='wid_l', expr='wid_g'),
+ Parameter(name='line_slope', value=0.0),
+ Parameter(name='line_off', value=0.0)]
+
+sigma = 0.021 # estimate of data error (for all data points)
+
+myfit = Minimizer(residual, pfit, # iter_cb=per_iteration,
+ fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
+ scale_covar=True)
+
+myfit.prepare_fit()
+init = residual(myfit.params, x)
+
+if HASPYLAB:
+ pylab.plot(x, init, 'b--')
+
+# fit with Nelder-Mead simplex method
+supported_methods = ('BFGS', 'COBYLA', 'SLSQP', 'Powell', 'Nelder-Mead')
+myfit.scalar_minimize(method='Nelder-Mead')
+
+
+print(' Nfev = ', myfit.nfev)
+# print( myfit.chisqr, myfit.redchi, myfit.nfree)
+# report_fit(myfit.params, modelpars=p_true)
+
+fit = residual(myfit.params, x)
+
+if HASPYLAB:
+ pylab.plot(x, fit, 'k-')
+ pylab.show()
+
+
+
+
+
diff --git a/examples/fit_pvoigt_NelderMead2.py b/examples/fit_pvoigt_NelderMead2.py
new file mode 100644
index 0000000..5bac47e
--- /dev/null
+++ b/examples/fit_pvoigt_NelderMead2.py
@@ -0,0 +1,85 @@
+import sys
+from numpy import linspace, exp, random
+
+from lmfit import Parameters, minimize
+from lmfit.utilfuncs import gauss, loren, pvoigt
+
+try:
+ import matplotlib
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+def per_iteration(pars, i, resid, x, *args, **kws):
+ if i < 10 or i % 10 == 0:
+ print( '====== Iteration ', i)
+ for p in pars.values():
+ print( p.name , p.value)
+
+def residual(pars, x, sigma=None, data=None):
+ yg = gauss(x, pars['amp_g'].value,
+ pars['cen_g'].value, pars['wid_g'].value)
+ yl = loren(x, pars['amp_l'].value,
+ pars['cen_l'].value, pars['wid_l'].value)
+
+ frac = pars['frac'].value
+ slope = pars['line_slope'].value
+ offset = pars['line_off'].value
+ model = (1-frac) * yg + frac * yl + offset + x * slope
+ if data is None:
+ return model
+ if sigma is None:
+ return (model - data)
+ return (model - data)/sigma
+
+
+n = 601
+xmin = 0.
+xmax = 20.0
+x = linspace(xmin, xmax, n)
+
+p_true = Parameters()
+p_true.add('amp_g', value=21.0)
+p_true.add('cen_g', value=8.1)
+p_true.add('wid_g', value=1.6)
+p_true.add('frac', value=0.37)
+p_true.add('line_off', value=-1.023)
+p_true.add('line_slope', value=0.62)
+
+data = (pvoigt(x, p_true['amp_g'].value, p_true['cen_g'].value,
+ p_true['wid_g'].value, p_true['frac'].value) +
+ random.normal(scale=0.23, size=n) +
+ x*p_true['line_slope'].value + p_true['line_off'].value )
+
+
+pfit = Parameters()
+pfit.add('amp_g', value=10)
+pfit.add('amp_g', value=10.0)
+pfit.add('cen_g', value=9)
+pfit.add('wid_g', value=1)
+pfit.add('frac', value=0.50)
+pfit.add('amp_l', expr='amp_g')
+pfit.add('cen_l', expr='cen_g')
+pfit.add('wid_l', expr='wid_g')
+pfit.add('line_slope', value=0.0)
+pfit.add('line_off', value=0.0)
+
+sigma = 0.021
+
+myfit = minimize(residual, pfit, method='nelder',
+ args=(x,), kws={'sigma':sigma, 'data':data})
+
+print(' Nfev = ', myfit.nfev)
+
+fit = residual(myfit.params, x)
+
+if HASPYLAB:
+ pylab.plot(x, data, 'r+')
+ pylab.plot(x, fit, 'k-')
+ pylab.show()
+
+
+
+
+
diff --git a/examples/fit_with_algebraic_constraint.py b/examples/fit_with_algebraic_constraint.py
new file mode 100644
index 0000000..fe634d5
--- /dev/null
+++ b/examples/fit_with_algebraic_constraint.py
@@ -0,0 +1,85 @@
+import sys
+
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+
+from lmfit import Parameters, Parameter, Minimizer
+from lmfit.utilfuncs import gauss, loren, pvoigt
+from lmfit.printfuncs import report_fit
+
+try:
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+
+def residual(pars, x, sigma=None, data=None):
+ yg = gauss(x, pars['amp_g'].value,
+ pars['cen_g'].value, pars['wid_g'].value)
+ yl = loren(x, pars['amp_l'].value,
+ pars['cen_l'].value, pars['wid_l'].value)
+
+ slope = pars['line_slope'].value
+ offset = pars['line_off'].value
+ model = yg + yl + offset + x * slope
+ if data is None:
+ return model
+ if sigma is None:
+ return (model - data)
+ return (model - data)/sigma
+
+
+n = 601
+xmin = 0.
+xmax = 20.0
+x = linspace(xmin, xmax, n)
+
+data = (gauss(x, 21, 8.1, 1.2) +
+ loren(x, 10, 9.6, 2.4) +
+ random.normal(scale=0.23, size=n) +
+ x*0.5)
+
+
+if HASPYLAB:
+ pylab.plot(x, data, 'r+')
+
+pfit = [Parameter(name='amp_g', value=10),
+ Parameter(name='cen_g', value=9),
+ Parameter(name='wid_g', value=1),
+
+ Parameter(name='amp_tot', value=20),
+ Parameter(name='amp_l', expr='amp_tot - amp_g'),
+ Parameter(name='cen_l', expr='1.5+cen_g'),
+ Parameter(name='wid_l', expr='2*wid_g'),
+
+ Parameter(name='line_slope', value=0.0),
+ Parameter(name='line_off', value=0.0)]
+
+sigma = 0.021 # estimate of data error (for all data points)
+
+myfit = Minimizer(residual, pfit,
+ fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
+ scale_covar=True)
+
+myfit.prepare_fit()
+init = residual(myfit.params, x)
+
+if HASPYLAB:
+ pylab.plot(x, init, 'b--')
+
+myfit.leastsq()
+
+print(' Nfev = ', myfit.nfev)
+print( myfit.chisqr, myfit.redchi, myfit.nfree)
+
+report_fit(myfit.params)
+
+fit = residual(myfit.params, x)
+
+if HASPYLAB:
+ pylab.plot(x, fit, 'k-')
+ pylab.show()
+
+
+
+
diff --git a/examples/fit_with_bounds.py b/examples/fit_with_bounds.py
new file mode 100644
index 0000000..2e46e25
--- /dev/null
+++ b/examples/fit_with_bounds.py
@@ -0,0 +1,62 @@
+from lmfit import Parameters, minimize
+from lmfit.printfuncs import report_fit
+
+from numpy import linspace, zeros, sin, exp, random, pi, sign
+
+try:
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+p_true = Parameters()
+p_true.add('amp', value=14.0)
+p_true.add('period', value=5.4321)
+p_true.add('shift', value=0.12345)
+p_true.add('decay', value=0.01000)
+
+def residual(pars, x, data=None):
+ amp = pars['amp'].value
+ per = pars['period'].value
+ shift = pars['shift'].value
+ decay = pars['decay'].value
+
+ if abs(shift) > pi/2:
+ shift = shift - sign(shift)*pi
+
+ model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
+ if data is None:
+ return model
+ return (model - data)
+
+n = 1500
+xmin = 0.
+xmax = 250.0
+random.seed(0)
+noise = random.normal(scale=2.80, size=n)
+x = linspace(xmin, xmax, n)
+data = residual(p_true, x) + noise
+
+fit_params = Parameters()
+fit_params.add('amp', value=13.0, max=20, min=0.0)
+fit_params.add('period', value=2, max=10)
+fit_params.add('shift', value=0.0, max=pi/2., min=-pi/2.)
+fit_params.add('decay', value=0.02, max=0.10, min=0.00)
+
+out = minimize(residual, fit_params, args=(x,), kws={'data':data})
+
+fit = residual(fit_params, x)
+
+print '# N_func_evals, N_free = ', out.nfev, out.nfree
+print '# chi-square, reduced chi-square = % .7g, % .7g' % (out.chisqr, out.redchi)
+
+report_fit(fit_params, show_correl=True, modelpars=p_true)
+
+print 'Raw (unordered, unscaled) Covariance Matrix:'
+print out.covar
+
+if HASPYLAB:
+ pylab.plot(x, data, 'ro')
+ pylab.plot(x, fit, 'b')
+ pylab.show()
+
diff --git a/examples/lmfit-model.ipynb b/examples/lmfit-model.ipynb
new file mode 100644
index 0000000..05f9b2b
--- /dev/null
+++ b/examples/lmfit-model.ipynb
@@ -0,0 +1,567 @@
+{
+ "metadata": {
+ "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+ {
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The ``Model`` class is a flexible, concise curve fitter. I will illustrate fitting example data to an exponential decay."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "def decay(t, N, tau):\n",
+ " return N*np.exp(-t/tau)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 1
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The parameters are in no particular order. We'll need some example data. I will use ``N=7`` and ``tau=3``, and I'll add a little noise."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "t = np.linspace(0, 5, num=1000)\n",
+ "data = decay(t, 7, 3) + np.random.randn(*t.shape)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 2
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Simplest Usage"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "from lmfit import Model\n",
+ "\n",
+ "model = Model(decay, independent_vars=['t'])\n",
+ "result = model.fit(data, t=t, N=10, tau=1)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 3
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The Model infers the parameter names by inspecting the arguments of the function, ``decay``. Then I passed the independent variable, ``t``, and initial guesses for each parameter. A residual function is automatically defined, and a least-squared regression is performed.\n",
+ "\n",
+ "We can immediately see the best-fit values"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "result.values"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 4,
+ "text": [
+ "{'N': 6.8332246334656945, 'tau': 3.0502578166074512}"
+ ]
+ }
+ ],
+ "prompt_number": 4
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "and easily pass those to the original model function for plotting:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "plot(t, data) # data\n",
+ "plot(t, decay(t=t, **result.values)) # best-fit model"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 5,
+ "text": [
+ "[<matplotlib.lines.Line2D at 0xb9a28cc>]"
+ ]
+ },
+ {
+ "metadata": {},
+ "output_type": "display_data",
+ "png": "iVBORw0KGgoAAAANSUhEUgAAAW4AAAEACAYAAACTXJylAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnXl4E+Xah++0lJZ9B1lkK6W4gICAqIAVBYGjeFyOIMqi\ngIqC6xEVOQofHgVROaKodQFxQXFDFBRcK1hBEQURZKssspd9FUqT74+3IZNkJpmZTJKmfe7rGkgm\nM++8SZNfnjzvs7g8Ho8HQRAEIWFIivcEBEEQBGuIcAuCICQYItyCIAgJhgi3IAhCgiHCLQiCkGCI\ncAuCICQYItyCIAgJhgi3IAhCglHG7IE7d+5k/vz5DBgwgBMnTvDGG29Qvnx5Tp48yYABA6I5R0EQ\nBEGDKYt7/fr1ZGdn8/DDDwMwaNAg0tPT6devH8ePH2f06NFRnaQgCILgw2U25X3Tpk106tSJdevW\nUadOHQ4cO [...]
+ "text": [
+ "<matplotlib.figure.Figure at 0xb9a28ec>"
+ ]
+ }
+ ],
+ "prompt_number": 5
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can review the best-fit Parameters in more detail."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "result.params"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 6,
+ "text": [
+ "Parameters([('tau', <Parameter 'tau', value=3.0502578166074512 +/- 0.0675, bounds=[-inf:inf]>), ('N', <Parameter 'N', value=6.8332246334656945 +/- 0.0869, bounds=[-inf:inf]>)])"
+ ]
+ }
+ ],
+ "prompt_number": 6
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "More information about the fit is stored in the result,which is an [``lmfit.Mimimizer``](http://newville.github.io/lmfit-py/fitting.html#Minimizer) object."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Specifying Bounds and Holding Parameters Constant"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Above, the ``Model`` class implicitly builds ``Parameter`` objects from keyword arguments of ``fit`` that match the argments of ``decay``. You can build the ``Parameter`` objects explicity; the following is equivalent."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "from lmfit import Parameter\n",
+ "\n",
+ "result = model.fit(data, t=t, \n",
+ " N=Parameter(value=10), \n",
+ " tau=Parameter(value=1))\n",
+ "result.params"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 7,
+ "text": [
+ "Parameters([('tau', <Parameter 'tau', value=3.0502578166074512 +/- 0.0675, bounds=[-inf:inf]>), ('N', <Parameter 'N', value=6.8332246334656945 +/- 0.0869, bounds=[-inf:inf]>)])"
+ ]
+ }
+ ],
+ "prompt_number": 7
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "By building ``Parameter`` objects explicitly, you can specify bounds (``min``, ``max``) and set parameters constant (``vary=False``)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "result = model.fit(data, t=t, \n",
+ " N=Parameter(value=7, vary=False), \n",
+ " tau=Parameter(value=1, min=0))\n",
+ "result.params"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 8,
+ "text": [
+ "Parameters([('tau', <Parameter 'tau', value=2.9550822200975864 +/- 0.0417, bounds=[0:inf]>), ('N', <Parameter 'N', value=7 (fixed), bounds=[-inf:inf]>)])"
+ ]
+ }
+ ],
+ "prompt_number": 8
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Defining Parameters in Advance"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Passing parameters to ``fit`` can become unwieldly. As an alternative, you can extract the parameters from ``model`` like so, set them individually, and pass them to ``fit``."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "params = model.params()"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 9
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "params['N'].value = 10 # initial guess\n",
+ "params['tau'].value = 1\n",
+ "params['tau'].min = 0"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [],
+ "prompt_number": 10
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "result = model.fit(data, params, t=t)\n",
+ "result.params"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 11,
+ "text": [
+ "Parameters([('tau', <Parameter 'tau', value=3.0502578132121547 +/- 0.0675, bounds=[0:inf]>), ('N', <Parameter 'N', value=6.8332246370863503 +/- 0.0869, bounds=[-inf:inf]>)])"
+ ]
+ }
+ ],
+ "prompt_number": 11
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Keyword arguments override ``params``, resetting ``value`` and all other properties (``min``, ``max``, ``vary``)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "result = model.fit(data, params, t=t, tau=1)\n",
+ "result.params"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 12,
+ "text": [
+ "Parameters([('tau', <Parameter 'tau', value=3.0502578166074512 +/- 0.0675, bounds=[-inf:inf]>), ('N', <Parameter 'N', value=6.8332246334656945 +/- 0.0869, bounds=[-inf:inf]>)])"
+ ]
+ }
+ ],
+ "prompt_number": 12
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The input parameters are not modified by ``fit``. They can be reused, retaining the same initial value. If you want to use the result of one fit as the initial guess for the next, simply pass ``params=result.params``."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### A Helpful Exception"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "All this implicit magic makes it very easy for the user to neglect to set a parameter. The ``fit`` function checks for this and raises a helpful exception."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "result = model.fit(data, t=t, tau=1) # N unspecified"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "ename": "ValueError",
+ "evalue": "Assign each parameter an initial value by passing Parameters or keyword arguments to fit().",
+ "output_type": "pyerr",
+ "traceback": [
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m\n\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
+ "\u001b[1;32m<ipython-input-13-6d8fedbef3f8>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mt\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mt\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtau\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0 [...]
+ "\u001b[1;32m/home/dallan/lmfit-py/lmfit/model.pyc\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, data, params, sigma, **kwargs)\u001b[0m\n\u001b[0;32m 191\u001b[0m raise ValueError(\"Assign each parameter an initial value by \" +\n\u001b[0;32m 192\u001b[0m \u001b[1;34m\"passing Parameters or keyword arguments to \"\u001b[0m \u001b[1;33m+\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 193\u001b[1;33m [...]
+ "\u001b[1;31mValueError\u001b[0m: Assign each parameter an initial value by passing Parameters or keyword arguments to fit()."
+ ]
+ }
+ ],
+ "prompt_number": 13
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "An *extra* parameter that cannot be matched to the model function will throw a ``UserWarning``, but it will not raise, leaving open the possibility of unforeseen extensions calling for some parameters."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Weighted Fits"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Use the ``sigma`` argument to perform a weighted fit. If you prefer to think of the fit in term of ``weights``, ``sigma=1/weights``."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "weights = np.arange(len(data))\n",
+ "result = model.fit(data, params, t=t, sigma=1./weights)\n",
+ "result.params"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 14,
+ "text": [
+ "Parameters([('tau', <Parameter 'tau', value=3.096728970589659 +/- 0.113, bounds=[0:inf]>), ('N', <Parameter 'N', value=6.7514922300319453 +/- 0.256, bounds=[-inf:inf]>)])"
+ ]
+ }
+ ],
+ "prompt_number": 14
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Handling Missing Data"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "By default, attemping to fit data that includes a ``NaN``, which conventionally indicates a \"missing\" observation, raises a lengthy exception. You can choose to drop (i.e., skip over) missing values instead."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "data_with_holes = data.copy()\n",
+ "data_with_holes[[5, 500, 700]] = np.nan # Replace arbitrary values with NaN.\n",
+ "\n",
+ "model = Model(decay, independent_vars=['t'], missing='drop')\n",
+ "result = model.fit(data_with_holes, params, t=t)\n",
+ "result.params"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 15,
+ "text": [
+ "Parameters([('tau', <Parameter 'tau', value=3.0547114484523323 +/- 0.0677, bounds=[0:inf]>), ('N', <Parameter 'N', value=6.8308291273906265 +/- 0.087, bounds=[-inf:inf]>)])"
+ ]
+ }
+ ],
+ "prompt_number": 15
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If you don't want to ignore missing values, you can set the model to raise proactively, checking for missing values before attempting the fit."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "model = Model(decay, independent_vars=['t'], missing='raise')\n",
+ "result = model.fit(data_with_holes, params, t=t)"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "ename": "ValueError",
+ "evalue": "Data contains a null value.",
+ "output_type": "pyerr",
+ "traceback": [
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m\n\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
+ "\u001b[1;32m<ipython-input-16-788e0b6b627f>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mModel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdecay\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mindependent_vars\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m't'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmissing\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'raise'\u [...]
+ "\u001b[1;32m/home/dallan/lmfit-py/lmfit/model.pyc\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, data, params, sigma, **kwargs)\u001b[0m\n\u001b[0;32m 196\u001b[0m \u001b[0mmask\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 197\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmissing\u001b[0m \u001b[1;33m!=\u001b[0m \u001b[1;34m'none'\u001b[0m\u001b[1;33m:\u001b[0m\u001 [...]
+ "\u001b[1;32m/home/dallan/lmfit-py/lmfit/model.pyc\u001b[0m in \u001b[0;36m_handle_missing\u001b[1;34m(self, data)\u001b[0m\n\u001b[0;32m 117\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmissing\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;34m'raise'\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 118\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0many\u0 [...]
+ "\u001b[1;31mValueError\u001b[0m: Data contains a null value."
+ ]
+ }
+ ],
+ "prompt_number": 16
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The default setting is ``missing='none'``, which does not check for NaNs. This interface is consistent with the ``statsmodels`` project.\n",
+ "\n",
+ "Null-chekcing relies on ``pandas.isnull`` if it is available. If pandas cannot be imported, it silently falls back on ``numpy.isnan``."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Data Alignment"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Imagine a collection of time series data with different lengths. It would be convenient to define one sufficiently long array ``t`` and use it for each time series, regardless of length. The [``pandas``](http://pandas.pydata.org/pandas-docs/stable/) provides tools for aligning indexed data. And, unlike most wrappers to ``scipy.leastsq``, ``Model`` can handle pandas objects out of the box, using its data alignment features.\n",
+ "\n",
+ "Here I take just a slice of the ``data`` and fit it to the full ``t``. It is automatically aligned to the correct section of ``t`` using Series' index."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "from pandas import Series\n",
+ "\n",
+ "model = Model(decay, independent_vars=['t'])\n",
+ "truncated_data = Series(data)[200:800] # data points 200-800\n",
+ "t = Series(t) # all 1000 points\n",
+ "result = model.fit(truncated_data, params, t=t)\n",
+ "result.params"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 17,
+ "text": [
+ "Parameters([('tau', <Parameter 'tau', value=3.2221825353028226 +/- 0.159, bounds=[0:inf]>), ('N', <Parameter 'N', value=6.5296051307920768 +/- 0.221, bounds=[-inf:inf]>)])"
+ ]
+ }
+ ],
+ "prompt_number": 17
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Data with missing entries and an unequal length still aligns properly."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "collapsed": false,
+ "input": [
+ "model = Model(decay, independent_vars=['t'], missing='drop')\n",
+ "truncated_data_with_holes = Series(data_with_holes)[200:800]\n",
+ "result = model.fit(truncated_data_with_holes, params, t=t)\n",
+ "result.params"
+ ],
+ "language": "python",
+ "metadata": {},
+ "outputs": [
+ {
+ "metadata": {},
+ "output_type": "pyout",
+ "prompt_number": 18,
+ "text": [
+ "Parameters([('tau', <Parameter 'tau', value=3.2397946733749583 +/- 0.16, bounds=[0:inf]>), ('N', <Parameter 'N', value=6.5107676500014584 +/- 0.219, bounds=[-inf:inf]>)])"
+ ]
+ }
+ ],
+ "prompt_number": 18
+ }
+ ],
+ "metadata": {}
+ }
+ ]
+}
\ No newline at end of file
diff --git a/examples/m1.py b/examples/m1.py
new file mode 100644
index 0000000..b42af60
--- /dev/null
+++ b/examples/m1.py
@@ -0,0 +1,26 @@
+
+import numpy as np
+from lmfit.models1d import GaussianModel
+import matplotlib.pyplot as plt
+
+data = np.loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+model = GaussianModel() # background='linear'
+
+# model.guess_starting_values(y, x, negative=False)
+# model.params['bkg_offset'].value=min(y)
+
+init_fit = model.model(x=x) + model.calc_background(x)
+model.fit(y, x=x)
+
+print model.fit_report()
+
+final_fit = model.model(x=x)
+
+plt.plot(x, y)
+plt.plot(x, init_fit)
+plt.plot(x, final_fit)
+plt.show()
+
diff --git a/examples/model1d_doc1.py b/examples/model1d_doc1.py
new file mode 100644
index 0000000..ed6b136
--- /dev/null
+++ b/examples/model1d_doc1.py
@@ -0,0 +1,23 @@
+import numpy as np
+from lmfit.models1d import GaussianModel
+import matplotlib.pyplot as plt
+
+data = np.loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+model = GaussianModel()
+model.guess_starting_values(y, x=x)
+# model.params['amplitude'].value=6.0
+
+init_fit = model.model(x=x)
+model.fit(y, x=x)
+
+print model.fit_report(min_correl=0.25)
+
+final_fit = model.model(x=x)
+
+plt.plot(x, final_fit, 'r-')
+plt.plot(x, init_fit, 'k--')
+plt.plot(x, y, 'bo')
+plt.show()
diff --git a/examples/model1d_doc2.py b/examples/model1d_doc2.py
new file mode 100644
index 0000000..a8ae418
--- /dev/null
+++ b/examples/model1d_doc2.py
@@ -0,0 +1,38 @@
+import numpy as np
+from lmfit.models1d import GaussianModel, VoigtModel
+import matplotlib.pyplot as plt
+
+data = np.loadtxt('model1d_gauss.dat')
+x = data[:, 0]
+y = data[:, 1]
+
+model = VoigtModel(background='linear')
+
+# get default starting values, but then alter them
+model.guess_starting_values(y, x=x)
+model.params['amplitude'].value = 2.0
+
+init_fit = model.model(x=x)
+
+# the actual fit
+model.fit(y, x=x)
+
+print model.fit_report(min_correl=0.25)
+
+vfit = model.model(x=x)
+
+
+mod2 = GaussianModel(background='linear')
+
+mod2.fit(y, x=x)
+gfit = mod2.model(x=x)
+
+print mod2.fit_report(min_correl=0.25)
+
+print 'Voigt Sum of Squares: ', ((vfit - y)**2).sum()
+print 'Gaussian Sum of Squares: ', ((gfit - y)**2).sum()
+
+plt.plot(x, vfit, 'r-')
+plt.plot(x, gfit, 'b-')
+plt.plot(x, y, 'bo')
+plt.show()
diff --git a/examples/model1d_gauss.dat b/examples/model1d_gauss.dat
new file mode 100644
index 0000000..c7d4bd3
--- /dev/null
+++ b/examples/model1d_gauss.dat
@@ -0,0 +1,103 @@
+#---------------------------------
+# col1 col2
+ 0.000000 -0.305196
+ 0.100000 0.004932
+ 0.200000 0.192535
+ 0.300000 0.100639
+ 0.400000 0.244992
+ 0.500000 -0.001095
+ 0.600000 -0.017190
+ 0.700000 -0.138330
+ 0.800000 -0.065546
+ 0.900000 0.150089
+ 1.000000 0.021981
+ 1.100000 0.231610
+ 1.200000 0.186122
+ 1.300000 0.224188
+ 1.400000 0.355904
+ 1.500000 -0.069747
+ 1.600000 0.062342
+ 1.700000 -0.025591
+ 1.800000 0.052080
+ 1.900000 -0.329106
+ 2.000000 -0.012132
+ 2.100000 0.205438
+ 2.200000 0.118093
+ 2.300000 0.018204
+ 2.400000 -0.113374
+ 2.500000 -0.086265
+ 2.600000 -0.074747
+ 2.700000 0.179214
+ 2.800000 0.168398
+ 2.900000 0.067954
+ 3.000000 0.076506
+ 3.100000 0.433768
+ 3.200000 0.019097
+ 3.300000 0.239973
+ 3.400000 0.006607
+ 3.500000 -0.121174
+ 3.600000 0.162577
+ 3.700000 0.042030
+ 3.800000 0.288718
+ 3.900000 0.137440
+ 4.000000 0.593153
+ 4.100000 0.480413
+ 4.200000 0.901715
+ 4.300000 0.868281
+ 4.400000 1.301646
+ 4.500000 1.093022
+ 4.600000 1.531770
+ 4.700000 1.772498
+ 4.800000 2.346719
+ 4.900000 2.716594
+ 5.000000 3.333042
+ 5.100000 3.688503
+ 5.200000 3.821775
+ 5.300000 4.583784
+ 5.400000 4.805664
+ 5.500000 5.125762
+ 5.600000 4.964982
+ 5.700000 4.988856
+ 5.800000 4.854896
+ 5.900000 4.738134
+ 6.000000 4.815129
+ 6.100000 4.070525
+ 6.200000 3.983041
+ 6.300000 3.107054
+ 6.400000 2.841105
+ 6.500000 2.610117
+ 6.600000 2.146078
+ 6.700000 1.683386
+ 6.800000 1.317547
+ 6.900000 0.789538
+ 7.000000 0.585832
+ 7.100000 0.494665
+ 7.200000 0.447038
+ 7.300000 0.441926
+ 7.400000 0.393547
+ 7.500000 -0.033900
+ 7.600000 0.042947
+ 7.700000 -0.116248
+ 7.800000 0.061516
+ 7.900000 0.183615
+ 8.000000 -0.127174
+ 8.100000 0.368512
+ 8.200000 0.194381
+ 8.300000 0.301574
+ 8.400000 0.045097
+ 8.500000 0.110543
+ 8.600000 0.263164
+ 8.700000 0.190722
+ 8.800000 0.425007
+ 8.900000 0.253164
+ 9.000000 0.201519
+ 9.100000 0.132292
+ 9.200000 0.304519
+ 9.300000 0.129096
+ 9.400000 0.269171
+ 9.500000 0.189405
+ 9.600000 0.243728
+ 9.700000 0.411963
+ 9.800000 0.080682
+ 9.900000 0.332672
+ 10.000000 -0.067100
diff --git a/examples/models.py b/examples/models.py
new file mode 100644
index 0000000..ee24ce2
--- /dev/null
+++ b/examples/models.py
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-
+"""
+Created on Sun Apr 22 04:24:43 2012
+
+ at author: Tillsten
+"""
+
+from lmfit import Parameters, Minimizer
+import numpy as np
+import matplotlib.pyplot as plt
+
+class LmModel(object):
+ """
+ Base class for all models.
+
+ Models take x and y and return
+ """
+ def __init__(self,x,y):
+ self.x, self.y=x,y
+ self.parameters=Parameters()
+ self.min=Minimizer(self.residual, self.parameters)
+
+ def print_para(self):
+ for i in self.parameters.values:
+ print i
+
+ def func(self,paras):
+ raise NotImplementedError
+
+ def est_startvals(self):
+ raise NotImplementedError
+
+ def residual(self,paras):
+ return self.func(paras)-self.y
+
+ def fit(self):
+ self.min.leastsq()
+ self.y_model=self.func(self.parameters)
+
+
+
+class Linear(LmModel):
+ """
+ y = a*x + b
+ """
+ def __init__(self,x,y):
+ LmModel.__init__(self,x,y)
+ self.parameters.add_many(('a',0), ('b',0))
+ self.est_startvals()
+
+ def est_startvals(self):
+ a, b = np.polyfit(self.x,self.y,1)
+ self.parameters['a'].value = a
+ self.parameters['b'].value = b
+
+ def func(self,paras):
+ a=paras['a'].value
+ b=paras['b'].value
+ return a*self.x+b
+
+
+
+class ExpDecay(LmModel):
+ """
+ y = a*exp(-x / b) + c
+ """
+ def __init__(self,x,y):
+ LmModel.__init__(self,x,y)
+ self.parameters.add_many(('a',0), ('b',0),('c',0))
+ self.est_startvals()
+
+ def est_startvals(self):
+ c = np.min(self.y)
+ a, b = np.polyfit(self.x, np.log(self.y-c+0.5),1)
+ self.parameters['a'].value = np.exp(b)
+ self.parameters['b'].value = 1/b
+
+ def func(self,paras):
+ a=paras['a'].value
+ b=paras['b'].value
+ c=paras['c'].value
+ return a*np.exp(-x / b) + c
+
+
+class Gaussian(LmModel):
+ """
+ y = a*exp(-(x-xc)**2/(2*w))+c
+ """
+ def __init__(self,x,y):
+ LmModel.__init__(self,x,y)
+ self.parameters.add_many(('a',0), ('xc',0),('w',0),('c',0))
+ self.est_startvals()
+
+ def est_startvals(self):
+ c = np.min(self.y)
+ xc = x[np.argmax(abs(y))]
+ a = np.max(y)
+ w = abs(x[np.argmin(abs(a/2.-y))]-x[np.argmax(y)])*2
+ self.parameters['c'].value=c
+ self.parameters['xc'].value=xc
+ self.parameters['a'].value=a
+ self.parameters['w'].value=w
+
+ def func(self,paras):
+ c=paras['c'].value
+ xc=paras['xc'].value
+ a=paras['a'].value
+ w=paras['w'].value
+ return a*np.exp(-(self.x-xc)**2/(2*w))+c
+
+
+#x=np.linspace(-5,5,20)
+#y=3*x+1+np.random.randn(x.size)
+#lm=Linear(x,y)
+#lm.fit()
+
+x=np.linspace(-5,5,20)
+y= 5*np.exp(-x / 3.) + 3+ 4*np.random.randn(x.size)
+lm=ExpDecay(x,y)
+lm.fit()
+
+plt.plot(lm.x, lm.y)
+plt.plot(lm.x, lm.y_model)
+plt.show()
+
+
+
+
\ No newline at end of file
diff --git a/examples/peakfit_1.py b/examples/peakfit_1.py
new file mode 100644
index 0000000..5f1b734
--- /dev/null
+++ b/examples/peakfit_1.py
@@ -0,0 +1,75 @@
+from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
+from scipy.optimize import leastsq
+try:
+ import pylab
+ HASPYLAB = True
+except ImportError:
+ HASPYLAB = False
+
+
+from lmfit import Parameters, Minimizer, report_fit
+from lmfit.utilfuncs import gauss, loren
+
+def residual(pars, x, data=None):
+ g1 = gauss(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
+ g2 = gauss(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
+ model = g1 + g2
+ if data is None:
+ return model
+ return (model - data)
+
+n = 601
+xmin = 0.
+xmax = 15.0
+noise = random.normal(scale=.65, size=n)
+x = linspace(xmin, xmax, n)
+
+fit_params = Parameters()
+fit_params.add_many(('a1', 12.0, True, None, None, None),
+ ('c1', 5.3, True, None, None, None),
+ ('w1', 1.0, True, None, None, None),
+ ('a2', 9.1, True, None, None, None),
+ ('c2', 8.1, True, None, None, None),
+ ('w2', 2.5, True, None, None, None))
+
+data = residual(fit_params, x) + noise
+
+if HASPYLAB:
+ pylab.plot(x, data, 'r+')
+
+fit_params = Parameters()
+fit_params.add_many(('a1', 8.0, True, None, 14., None),
+ ('c1', 5.0, True, None, None, None),
+ ('w1', 0.7, True, None, None, None),
+ ('a2', 3.1, True, None, None, None),
+ ('c2', 8.8, True, None, None, None))
+
+fit_params.add('w2', expr='2.5*w1')
+
+myfit = Minimizer(residual, fit_params,
+ fcn_args=(x,), fcn_kws={'data':data})
+
+myfit.prepare_fit()
+
+init = residual(fit_params, x)
+
+if HASPYLAB:
+ pylab.plot(x, init, 'b--')
+
+myfit.leastsq()
+
+print ' N fev = ', myfit.nfev
+print myfit.chisqr, myfit.redchi, myfit.nfree
+
+report_fit(fit_params)
+
+fit = residual(fit_params, x)
+
+if HASPYLAB:
+ pylab.plot(x, fit, 'k-')
+ pylab.show()
+
+
+
+
+
diff --git a/examples/simple.py b/examples/simple.py
new file mode 100644
index 0000000..af0495e
--- /dev/null
+++ b/examples/simple.py
@@ -0,0 +1,46 @@
+
+from lmfit import minimize, Parameters, Parameter, report_fit
+import numpy as np
+
+# create data to be fitted
+x = np.linspace(0, 15, 301)
+data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
+ np.random.normal(size=len(x), scale=0.2) )
+
+# define objective function: returns the array to be minimized
+def fcn2min(params, x, data):
+ """ model decaying sine wave, subtract data"""
+ amp = params['amp'].value
+ shift = params['shift'].value
+ omega = params['omega'].value
+ decay = params['decay'].value
+
+ model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
+ return model - data
+
+# create a set of Parameters
+params = Parameters()
+params.add('amp', value= 10, min=0)
+params.add('decay', value= 0.1)
+params.add('shift', value= 0.0, min=-np.pi/2., max=np.pi/2)
+params.add('omega', value= 3.0)
+
+
+# do fit, here with leastsq model
+result = minimize(fcn2min, params, args=(x, data))
+
+# calculate final result
+final = data + result.residual
+
+# write error report
+report_fit(params)
+
+# try to plot results
+try:
+ import pylab
+ pylab.plot(x, data, 'k+')
+ pylab.plot(x, final, 'r')
+ pylab.show()
+except:
+ pass
+
diff --git a/examples/use_models1d.py b/examples/use_models1d.py
new file mode 100644
index 0000000..239f12c
--- /dev/null
+++ b/examples/use_models1d.py
@@ -0,0 +1,44 @@
+import numpy as np
+from lmfit.models1d import LinearModel, QuadraticModel, ExponentialModel
+from lmfit.models1d import LorenztianModel, GaussianModel, VoigtModel
+import matplotlib.pyplot as plt
+
+
+x = np.linspace(0, 10, 101)
+# dat = 118.0 + 10.0*np.exp(-x/7.0) + 5e-2*np.random.randn(len(x))
+# dat = 18.0 + 1.5*x + 5.6*np.random.randn(len(x))
+
+sig = 0.47
+amp = 12.00
+cen = 5.66
+eps = 0.15
+off = 9
+slo = 0.0012
+sca = 1./(2.0*np.sqrt(2*np.pi))/sig
+
+noise = eps*np.random.randn(len(x))
+
+dat = off +slo*x + amp*sca* np.exp(-(x-cen)**2 / (2*sig)**2) + noise
+
+# mod = ExponentialModel(background='linear')
+# mod = LinearModel()
+
+mod = GaussianModel(background='quad')
+mod = VoigtModel(background='quad')
+mod = LorenztianModel(background='quad')
+mod.guess_starting_values(dat, x, negative=False)
+mod.params['bkg_offset'].value=min(dat)
+
+init = mod.model(x=x)+mod.calc_background(x)
+mod.fit(dat, x=x)
+
+
+print mod.fit_report()
+
+fit = mod.model(x=x)+mod.calc_background(x)
+
+plt.plot(x, dat)
+plt.plot(x, init)
+plt.plot(x, fit)
+plt.show()
+
diff --git a/lmfit.egg-info/PKG-INFO b/lmfit.egg-info/PKG-INFO
deleted file mode 100644
index aaac6b9..0000000
--- a/lmfit.egg-info/PKG-INFO
+++ /dev/null
@@ -1,35 +0,0 @@
-Metadata-Version: 1.1
-Name: lmfit
-Version: 0.7.4
-Summary: Least-Squares Minimization with Bounds and Constraints
-Home-page: http://lmfit.github.io/lmfit-py/
-Author: LM-Fit Development Team
-Author-email: matt.newville at gmail.com
-License: BSD
-Download-URL: http://lmfit.github.io//lmfit-py/
-Description: A library for least-squares minimization and data fitting in
- Python. Built on top of scipy.optimize, lmfit provides a Parameter object
- which can be set as fixed or free, can have upper and/or lower bounds, or
- can be written in terms of algebraic constraints of other Parameters. The
- user writes a function to be minimized as a function of these Parameters,
- and the scipy.optimize methods are used to find the optimal values for the
- Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
- algorithm, and provides estimated standard errors and correlations between
- varied Parameters. Other minimization methods, including Nelder-Mead's
- downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
- others are also supported. Bounds and contraints can be placed on
- Parameters for all of these methods.
-
- In addition, methods for explicitly calculating confidence intervals are
- provided for exploring minmization problems where the approximation of
- estimating Parameter uncertainties from the covariance matrix is
- questionable.
-Platform: Windows
-Platform: Linux
-Platform: Mac OS X
-Classifier: Intended Audience :: Science/Research
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Topic :: Scientific/Engineering
-Requires: numpy
-Requires: scipy
diff --git a/lmfit.egg-info/SOURCES.txt b/lmfit.egg-info/SOURCES.txt
deleted file mode 100644
index e73feb1..0000000
--- a/lmfit.egg-info/SOURCES.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-INSTALL
-LICENSE
-MANIFEST.in
-README
-publish_docs.sh
-setup.py
-doc/Makefile
-doc/bounds.rst
-doc/conf.py
-doc/confidence.rst
-doc/constraints.rst
-doc/fitting.rst
-doc/index.rst
-doc/installation.rst
-doc/models1d.rst
-doc/models1d_doc1.png
-doc/parameters.rst
-doc/test_ci2_result.png
-doc/_static/empty
-doc/_templates/indexsidebar.html
-doc/ext/ipython_console_highlighting.py
-doc/ext/ipython_directive.py
-doc/ext/numpydoc/__init__.py
-doc/ext/numpydoc/comment_eater.py
-doc/ext/numpydoc/compiler_unparse.py
-doc/ext/numpydoc/docscrape.py
-doc/ext/numpydoc/docscrape_sphinx.py
-doc/ext/numpydoc/numpydoc.py
-doc/ext/numpydoc/phantom_import.py
-doc/ext/numpydoc/plot_directive.py
-doc/ext/numpydoc/traitsdoc.py
-doc/sphinxext/apigen.py
-doc/sphinxext/docscrape.py
-doc/sphinxext/docscrape_sphinx.py
-doc/sphinxext/github.py
-doc/sphinxext/inheritance_diagram.py
-doc/sphinxext/ipython_console_highlighting.py
-doc/sphinxext/ipython_directive.py
-doc/sphinxext/numpydoc.py
-lmfit/__init__.py
-lmfit/asteval.py
-lmfit/astutils.py
-lmfit/confidence.py
-lmfit/minimizer.py
-lmfit/model.py
-lmfit/models1d.py
-lmfit/ordereddict.py
-lmfit/parameter.py
-lmfit/printfuncs.py
-lmfit/specified_models.py
-lmfit/utilfuncs.py
-lmfit/wrap.py
-lmfit.egg-info/PKG-INFO
-lmfit.egg-info/SOURCES.txt
-lmfit.egg-info/dependency_links.txt
-lmfit.egg-info/top_level.txt
-lmfit/uncertainties/__init__.py
-lmfit/uncertainties/test_umath.py
-lmfit/uncertainties/test_uncertainties.py
-lmfit/uncertainties/umath.py
-lmfit/uncertainties/unumpy/__init__.py
-lmfit/uncertainties/unumpy/core.py
-lmfit/uncertainties/unumpy/test_ulinalg.py
-lmfit/uncertainties/unumpy/test_unumpy.py
-lmfit/uncertainties/unumpy/ulinalg.py
-tests/_test_ci.py
-tests/_test_make_paras_and_func.py
-tests/test_1variable.py
-tests/test_algebraic_constraint.py
-tests/test_algebraic_constraint2.py
-tests/test_model.py
-tests/test_nose.py
-tests/test_wrap_function.py
\ No newline at end of file
diff --git a/lmfit.egg-info/dependency_links.txt b/lmfit.egg-info/dependency_links.txt
deleted file mode 100644
index 8b13789..0000000
--- a/lmfit.egg-info/dependency_links.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/lmfit.egg-info/top_level.txt b/lmfit.egg-info/top_level.txt
deleted file mode 100644
index 536bcc8..0000000
--- a/lmfit.egg-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-lmfit
diff --git a/lmfit/uncertainties/LICENSE.txt b/lmfit/uncertainties/LICENSE.txt
new file mode 100644
index 0000000..ce63a8d
--- /dev/null
+++ b/lmfit/uncertainties/LICENSE.txt
@@ -0,0 +1,10 @@
+Copyright (c) 2010, Eric O. LEBIGOT (EOL).
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+ * The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROF [...]
diff --git a/lmfit/uncertainties/README b/lmfit/uncertainties/README
new file mode 100644
index 0000000..497740a
--- /dev/null
+++ b/lmfit/uncertainties/README
@@ -0,0 +1,14 @@
+This is a modified version of the uncertainties package Eric O. Lebigot. See the Python Package
+Index (PyPI) and/or the github source repository (at https://github.com/lebigot/uncertainties) for
+more complete documentation on this package.
+
+The changes here were forked from version 0.9 of the uncertainties packages, and add the ability
+to calculate uncertainties on wrapped python functions that include keyword arguments. This is
+important for lmfit, as it allows estimates of uncertainties in constrained parameters.
+
+The changes made here were submitted for inclusion in the uncertainties package, but not
+accepted. Since the enhancement is useful for lmfit, the forked version is included here.
+
+--Matt Newville
+
+
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 861a9f5..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
-
diff --git a/upload_wininst.bat b/upload_wininst.bat
new file mode 100644
index 0000000..ebfe392
--- /dev/null
+++ b/upload_wininst.bat
@@ -0,0 +1,18 @@
+REM
+REM %HOME%\.pypirc must be setup with PyPI info.
+REM
+SET HOME=M:\
+SET PATH=C:\Python26;%PATH%
+python setup.py install
+python setup.py bdist_wininst --target-version=2.6 upload
+
+SET PATH=C:\Python27;%PATH%
+python setup.py install
+python setup.py bdist_wininst --target-version=2.7 upload
+
+SET PATH=C:\Python32;%PATH%
+python setup.py install
+python setup.py bdist_wininst --target-version=3.2 upload
+
+
+
diff --git a/use_py26.bat b/use_py26.bat
new file mode 100644
index 0000000..c36bda6
--- /dev/null
+++ b/use_py26.bat
@@ -0,0 +1,2 @@
+ at echo off
+SET PATH=C:\Python26;C:\Python26\Tools\Scripts;%PATH%
diff --git a/use_py27.bat b/use_py27.bat
new file mode 100644
index 0000000..115eb16
--- /dev/null
+++ b/use_py27.bat
@@ -0,0 +1,2 @@
+ at echo off
+SET PATH=C:\Python27;C:\Python27\Tools\Scripts;%PATH%
diff --git a/use_py32.bat b/use_py32.bat
new file mode 100644
index 0000000..6672ddb
--- /dev/null
+++ b/use_py32.bat
@@ -0,0 +1,2 @@
+ at echo off
+SET PATH=C:\Python32;C:\Python32\Tools\Scripts;%PATH%
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/lmfit-py.git
More information about the debian-science-commits
mailing list