[Debian-astro-commits] [python-pyraf] 01/03: Imported Upstream version 2.1.11

Ole Streicher olebole at moszumanska.debian.org
Fri Sep 9 09:36:59 UTC 2016


This is an automated email from the git hooks/post-receive script.

olebole pushed a commit to branch debian
in repository python-pyraf.

commit eef79e3c3289a14dcccf485a258ce36997cfb6b4
Author: Ole Streicher <olebole at debian.org>
Date:   Tue Jul 26 11:09:35 2016 +0200

    Imported Upstream version 2.1.11
---
 LICENSE.txt                                        |   29 -
 PKG-INFO                                           |    2 +-
 data/ipythonrc-pyraf                               |   26 -
 defsetup.py                                        |  241 --
 distribute_setup.py                                |  485 ----
 docs/ecl.html                                      |  426 ----
 docs/ipython_notes.txt                             |   85 -
 lib/pyraf.egg-info/PKG-INFO                        |    2 +-
 lib/pyraf.egg-info/SOURCES.txt                     |   84 +-
 lib/pyraf/GkiMpl.py                                |    2 +-
 lib/pyraf/MplCanvasAdapter.py                      |    2 +-
 lib/pyraf/Ptkplot.py                               |    2 +-
 lib/pyraf/__init__.py                              |   12 +-
 lib/pyraf/aqutil.py                                |    2 +-
 lib/pyraf/cgeneric.py                              |    2 +-
 lib/pyraf/cl2py.py                                 |    2 +-
 lib/pyraf/clast.py                                 |    2 +-
 lib/pyraf/clcache.py                               |    2 +-
 lib/pyraf/cllinecache.py                           |    2 +-
 lib/pyraf/clparse.py                               |    2 +-
 lib/pyraf/clscan.py                                |    2 +-
 lib/pyraf/cltoken.py                               |    2 +-
 lib/pyraf/describe.py                              |    2 +-
 lib/pyraf/dirdbm.py                                |    2 +-
 lib/pyraf/dirshelve.py                             |    2 +-
 lib/pyraf/epar.py                                  |    2 +-
 lib/pyraf/filecache.py                             |    2 +-
 lib/pyraf/fontdata.py                              |    2 +-
 lib/pyraf/generic.py                               |    2 +-
 lib/pyraf/gki.py                                   |    2 +-
 lib/pyraf/gki_psikern_tests.py                     |    2 +-
 lib/pyraf/gki_sys_tests.py                         |    2 +-
 lib/pyraf/gkicmd.py                                |    2 +-
 lib/pyraf/gkigcur.py                               |    2 +-
 lib/pyraf/gkiiraf.py                               |    2 +-
 lib/pyraf/gkitkbase.py                             |    2 +-
 lib/pyraf/gkitkplot.py                             |    2 +-
 lib/pyraf/graphcap.py                              |    2 +-
 lib/pyraf/gwm.py                                   |    2 +-
 lib/pyraf/ipython_api.py                           |    2 +-
 lib/pyraf/iraf.py                                  |    2 +-
 lib/pyraf/irafcompleter.py                         |    2 +-
 lib/pyraf/irafdisplay.py                           |    2 +-
 lib/pyraf/irafecl.py                               |    2 +-
 lib/pyraf/irafexecute.py                           |    2 +-
 lib/pyraf/iraffunctions.py                         |    2 +-
 lib/pyraf/irafgwcs.py                              |    2 +-
 lib/pyraf/irafhelp.py                              |    2 +-
 lib/pyraf/irafimcur.py                             |    2 +-
 lib/pyraf/irafimport.py                            |    2 +-
 lib/pyraf/irafinst.py                              |    2 +-
 lib/pyraf/irafnames.py                             |    2 +-
 lib/pyraf/irafpar.py                               |    2 +-
 lib/pyraf/iraftask.py                              |    2 +-
 lib/pyraf/irafukey.py                              |    2 +-
 lib/pyraf/msgiobuffer.py                           |    2 +-
 lib/pyraf/msgiowidget.py                           |    2 +-
 lib/pyraf/newWindowHack.py                         |    2 +-
 lib/pyraf/pseteparoption.py                        |    2 +-
 lib/pyraf/pycmdline.py                             |    2 +-
 lib/pyraf/pyrafTk.py                               |    2 +-
 lib/pyraf/pyrafglobals.py                          |    2 +-
 lib/pyraf/splash.py                                |    2 +-
 lib/pyraf/subproc.py                               |    4 +-
 lib/pyraf/textattrib.py                            |    2 +-
 lib/pyraf/tkplottext.py                            |    2 +-
 lib/pyraf/tpar.py                                  |    2 +-
 lib/pyraf/urwfiledlg.py                            |    2 +-
 lib/pyraf/urwutil.py                               |    2 +-
 lib/pyraf/version.py                               |   12 +-
 lib/pyraf/version.py.orig1                         |   83 -
 lib/pyraf/version.py.orig2                         |   83 -
 lib/pyraf/version_vcs.py                           |    2 +
 lib/pyraf/wutil.py                                 |    7 +-
 lib/pyraf_setup.pyc                                |  Bin 3313 -> 3313 bytes
 required_pkgs/d2to1                                |    1 +
 required_pkgs/d2to1/.authors                       |    4 -
 required_pkgs/d2to1/.gitignore                     |   27 -
 required_pkgs/d2to1/.travis.yml                    |    7 -
 required_pkgs/d2to1/CHANGES.rst                    |  224 --
 required_pkgs/d2to1/CONTRIBUTORS                   |    4 -
 required_pkgs/d2to1/LICENSE                        |   29 -
 required_pkgs/d2to1/README.rst                     |  110 -
 required_pkgs/d2to1/d2to1/__init__.py              |    4 -
 required_pkgs/d2to1/d2to1/core.py                  |   83 -
 required_pkgs/d2to1/d2to1/extern/__init__.py       |    0
 required_pkgs/d2to1/d2to1/extern/six.py            |  386 ---
 required_pkgs/d2to1/d2to1/tests/__init__.py        |   90 -
 required_pkgs/d2to1/d2to1/tests/test_commands.py   |   13 -
 required_pkgs/d2to1/d2to1/tests/test_core.py       |   48 -
 required_pkgs/d2to1/d2to1/tests/test_hooks.py      |   52 -
 .../d2to1/d2to1/tests/testpackage/CHANGES.txt      |   86 -
 .../d2to1/d2to1/tests/testpackage/LICENSE.txt      |   29 -
 .../d2to1/d2to1/tests/testpackage/MANIFEST.in      |    1 -
 .../d2to1/d2to1/tests/testpackage/README.txt       |  148 --
 .../testpackage/d2to1_testpackage/__init__.py      |    0
 .../testpackage/d2to1_testpackage/_setup_hooks.py  |   25 -
 .../d2to1_testpackage/package_data/1.txt           |    0
 .../d2to1_testpackage/package_data/2.txt           |    0
 .../d2to1/d2to1/tests/testpackage/data_files/a.txt |    0
 .../d2to1/d2to1/tests/testpackage/data_files/b.txt |    0
 .../d2to1/d2to1/tests/testpackage/data_files/c.rst |    0
 .../d2to1/tests/testpackage/distribute_setup.py    |  485 ----
 .../d2to1/d2to1/tests/testpackage/extra-file.txt   |    0
 .../d2to1/d2to1/tests/testpackage/setup.cfg        |   46 -
 .../d2to1/d2to1/tests/testpackage/setup.py         |   12 -
 .../d2to1/d2to1/tests/testpackage/src/testext.c    |   28 -
 required_pkgs/d2to1/d2to1/tests/util.py            |   35 -
 required_pkgs/d2to1/d2to1/util.py                  |  580 -----
 required_pkgs/d2to1/d2to1/zestreleaser.py          |  161 --
 required_pkgs/d2to1/ez_setup.py                    |  264 ---
 required_pkgs/d2to1/setup.cfg                      |   52 -
 required_pkgs/d2to1/setup.py                       |   13 -
 required_pkgs/d2to1/tox.ini                        |   11 -
 required_pkgs/stsci.distutils                      |    1 +
 required_pkgs/stsci.distutils/CHANGES.txt          |  171 --
 required_pkgs/stsci.distutils/LICENSE.txt          |   29 -
 required_pkgs/stsci.distutils/MANIFEST.in          |    2 -
 required_pkgs/stsci.distutils/README.txt           |  148 --
 required_pkgs/stsci.distutils/distribute_setup.py  |  515 ----
 required_pkgs/stsci.distutils/docs/Makefile        |  153 --
 required_pkgs/stsci.distutils/docs/source/api.rst  |   17 -
 .../stsci.distutils/docs/source/changelog.rst      |    1 -
 required_pkgs/stsci.distutils/docs/source/conf.py  |  244 --
 .../stsci.distutils/docs/source/index.rst          |   22 -
 required_pkgs/stsci.distutils/setup.cfg            |   45 -
 required_pkgs/stsci.distutils/setup.py             |   93 -
 required_pkgs/stsci.distutils/stsci/__init__.py    |   10 -
 .../stsci.distutils/stsci/distutils/__init__.py    |   11 -
 .../stsci.distutils/stsci/distutils/astutils.py    |  106 -
 .../stsci/distutils/command/__init__.py            |    0
 .../stsci/distutils/command/build_optional_ext.py  |   86 -
 .../stsci/distutils/command/easier_install.py      |  105 -
 .../stsci.distutils/stsci/distutils/hooks.py       |  476 ----
 .../stsci.distutils/stsci/distutils/release.py     |  195 --
 .../stsci.distutils/stsci/distutils/svnutils.py    |   57 -
 .../stsci/distutils/tests/__init__.py              |   83 -
 .../stsci/distutils/tests/test_commands.py         |   71 -
 .../stsci/distutils/tests/test_hooks.py            |  256 --
 .../stsci/distutils/tests/testpackage/CHANGES.txt  |   86 -
 .../stsci/distutils/tests/testpackage/LICENSE.txt  |   29 -
 .../stsci/distutils/tests/testpackage/MANIFEST.in  |    1 -
 .../stsci/distutils/tests/testpackage/README.txt   |  148 --
 .../distutils/tests/testpackage/data_files/a.txt   |    0
 .../distutils/tests/testpackage/data_files/b.txt   |    0
 .../distutils/tests/testpackage/data_files/c.rst   |    0
 .../tests/testpackage/distribute_setup.py          |  485 ----
 .../stsci/distutils/tests/testpackage/setup.cfg    |   49 -
 .../stsci/distutils/tests/testpackage/setup.py     |   17 -
 .../distutils/tests/testpackage/src/testext.c      |   28 -
 .../distutils/tests/testpackage/stsci/__init__.py  |   10 -
 .../testpackage/stsci/testpackage/__init__.py      |    8 -
 .../stsci/testpackage/package_data/1.txt           |    0
 .../stsci/testpackage/package_data/2.txt           |    0
 .../stsci.distutils/stsci/distutils/tests/util.py  |   61 -
 .../stsci/distutils/versionutils.py                |  195 --
 required_pkgs/stsci.distutils/tox.ini              |   13 -
 required_pkgs/stsci.tools                          |    1 +
 required_pkgs/stsci.tools/LICENSE.txt              |   29 -
 required_pkgs/stsci.tools/doc/Makefile             |   89 -
 required_pkgs/stsci.tools/doc/make.bat             |  113 -
 required_pkgs/stsci.tools/doc/source/analysis.rst  |   32 -
 required_pkgs/stsci.tools/doc/source/asnutil.rst   |    9 -
 .../stsci.tools/doc/source/basicutils.rst          |   26 -
 required_pkgs/stsci.tools/doc/source/bitmask.rst   |   10 -
 required_pkgs/stsci.tools/doc/source/conf.py       |  208 --
 required_pkgs/stsci.tools/doc/source/convert.rst   |   26 -
 required_pkgs/stsci.tools/doc/source/fitsdiff.rst  |    9 -
 required_pkgs/stsci.tools/doc/source/fitsutil.rst  |   14 -
 required_pkgs/stsci.tools/doc/source/imgutils.rst  |   15 -
 required_pkgs/stsci.tools/doc/source/index.rst     |   35 -
 required_pkgs/stsci.tools/doc/source/stpyfits.rst  |   10 -
 .../stsci.tools/doc/source/teal_guide.rst          |  356 ---
 required_pkgs/stsci.tools/doc/source/wcsutil.rst   |    8 -
 required_pkgs/stsci.tools/ez_setup.py              |  332 ---
 required_pkgs/stsci.tools/lib/stsci/__init__.py    |   10 -
 .../stsci.tools/lib/stsci/tools/__init__.py        |    9 -
 required_pkgs/stsci.tools/lib/stsci/tools/alert.py |  133 --
 .../stsci.tools/lib/stsci/tools/asnutil.py         |  698 ------
 .../stsci.tools/lib/stsci/tools/basicpar.py        | 1630 -------------
 .../stsci.tools/lib/stsci/tools/bitmask.py         |  223 --
 .../stsci.tools/lib/stsci/tools/capable.py         |  167 --
 .../stsci.tools/lib/stsci/tools/cfgpars.py         | 1336 -----------
 .../stsci.tools/lib/stsci/tools/check_files.py     |  456 ----
 .../lib/stsci/tools/clipboard_helper.py            |   95 -
 .../stsci.tools/lib/stsci/tools/compmixin.py       |  224 --
 .../stsci.tools/lib/stsci/tools/configobj.py       | 2485 --------------------
 .../stsci.tools/lib/stsci/tools/convertgeis.py     |  483 ----
 .../lib/stsci/tools/convertwaiveredfits.py         |  628 -----
 .../stsci.tools/lib/stsci/tools/dialog.py          |   82 -
 .../stsci.tools/lib/stsci/tools/editpar.py         | 1769 --------------
 .../stsci.tools/lib/stsci/tools/eparoption.py      |  937 --------
 .../stsci.tools/lib/stsci/tools/filedlg.py         |  445 ----
 .../stsci.tools/lib/stsci/tools/fileutil.py        | 1518 ------------
 .../stsci.tools/lib/stsci/tools/fitsdiff.py        |   70 -
 .../stsci.tools/lib/stsci/tools/for2to3.py         |  114 -
 required_pkgs/stsci.tools/lib/stsci/tools/gfit.py  |  167 --
 .../stsci.tools/lib/stsci/tools/imageiter.py       |   48 -
 .../stsci.tools/lib/stsci/tools/irafglob.py        |   54 -
 .../stsci.tools/lib/stsci/tools/irafglobals.py     |  442 ----
 .../stsci.tools/lib/stsci/tools/irafutils.py       |  540 -----
 .../stsci.tools/lib/stsci/tools/iterfile.py        |  153 --
 .../stsci.tools/lib/stsci/tools/linefit.py         |   86 -
 .../stsci.tools/lib/stsci/tools/listdlg.py         |  123 -
 .../stsci.tools/lib/stsci/tools/logutil.py         |  679 ------
 .../stsci.tools/lib/stsci/tools/minmatch.py        |  317 ---
 .../stsci.tools/lib/stsci/tools/mputil.py          |  213 --
 .../stsci.tools/lib/stsci/tools/nimageiter.py      |  210 --
 .../stsci.tools/lib/stsci/tools/nmpfit.py          | 2274 ------------------
 .../stsci.tools/lib/stsci/tools/numerixenv.py      |   17 -
 .../stsci.tools/lib/stsci/tools/parseinput.py      |  219 --
 .../stsci.tools/lib/stsci/tools/readgeis.py        |  434 ----
 required_pkgs/stsci.tools/lib/stsci/tools/stash.py |   30 -
 .../stsci.tools/lib/stsci/tools/stpyfits.py        |  337 ---
 .../lib/stsci/tools/stsci_distutils_hack.py        |  372 ---
 .../stsci.tools/lib/stsci/tools/swapgeis.py        |  624 -----
 .../stsci.tools/lib/stsci/tools/taskpars.py        |   64 -
 required_pkgs/stsci.tools/lib/stsci/tools/teal.py  | 1161 ---------
 .../stsci.tools/lib/stsci/tools/teal_bttn.py       |   70 -
 .../stsci.tools/lib/stsci/tools/tester.py          |  136 --
 .../stsci.tools/lib/stsci/tools/tests/__init__.py  |    1 -
 .../stsci.tools/lib/stsci/tools/tests/cdva2.fits   |    1 -
 .../lib/stsci/tools/tests/o4sp040b0_raw.fits       |    1 -
 .../lib/stsci/tools/tests/testStpyfits.py          |  700 ------
 .../lib/stsci/tools/tests/test_xyinterp.py         |   70 -
 .../stsci.tools/lib/stsci/tools/testutil.py        |  237 --
 .../stsci.tools/lib/stsci/tools/textutil.py        |   72 -
 .../stsci.tools/lib/stsci/tools/tkrotext.py        |  103 -
 .../stsci.tools/lib/stsci/tools/validate.py        | 1460 ------------
 .../stsci.tools/lib/stsci/tools/versioninfo.py     |   81 -
 .../stsci.tools/lib/stsci/tools/vtor_checks.py     |  142 --
 .../stsci.tools/lib/stsci/tools/wcsutil.py         | 1099 ---------
 .../stsci.tools/lib/stsci/tools/xyinterp.py        |  102 -
 .../stsci.tools/scripts/convertwaiveredfits        |    6 -
 required_pkgs/stsci.tools/scripts/stscidocs        |   26 -
 required_pkgs/stsci.tools/setup.cfg                |   45 -
 required_pkgs/stsci.tools/setup.cfg.orig           |   46 -
 required_pkgs/stsci.tools/setup.py                 |   15 -
 scripts/pyraf                                      |    5 +-
 scripts/pyraf.bat                                  |   30 -
 setup.cfg                                          |    2 +-
 shortcut.vbs                                       |   48 -
 src/xutil.c                                        |    2 +-
 tools/cachecompare.py                              |    2 +-
 tools/cachesearch.py                               |    2 +-
 tools/checkcompileall.py                           |    2 +-
 tools/compileallcl.py                              |    2 +-
 tools/createTarBall.csh                            |  223 --
 tools/fixcache.py                                  |    2 +-
 tools/loadall.py                                   |    2 +-
 tools/plotbench.py                                 |    2 +-
 tools/test_usage.csh                               |   49 -
 tools/test_usage.save                              |   34 -
 253 files changed, 167 insertions(+), 36835 deletions(-)

diff --git a/LICENSE.txt b/LICENSE.txt
deleted file mode 100644
index c3000ce..0000000
--- a/LICENSE.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (C) 2003 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-      endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-
diff --git a/PKG-INFO b/PKG-INFO
index 0659b40..ac4ce9f 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: pyraf
-Version: 2.1.10
+Version: 2.1.11
 Summary: Provides a Pythonic interface to IRAF that can be used in place of the existing IRAF CL
 Home-page: http://www.stsci.edu/resources/software_hardware/pyraf
 Author: Rick White, Perry Greenfield, Chris Sontag
diff --git a/data/ipythonrc-pyraf b/data/ipythonrc-pyraf
deleted file mode 100644
index bd87c95..0000000
--- a/data/ipythonrc-pyraf
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- Mode: Shell-Script -*-  Not really, but shows comments correctly
-#***************************************************************************
-#
-# Configuration file for ipython -- ipythonrc format
-#
-# The format of this file is one of 'key value' lines.
-# Lines containing only whitespace at the beginning and then a # are ignored
-# as comments. But comments can NOT be put on lines with data.
-#***************************************************************************
-
-# This is an example of a 'profile' file which includes a base file and adds
-# some customizaton for a particular purpose.
-
-# If this file is found in the user's ~/.ipython directory as
-# ipythonrc-pyraf, it can be loaded by calling passing the '-profile
-# pyraf' (or '-p pyraf') option to IPython.
-
-# A simple alias pyraf -> 'ipython -p pyraf' makes life very convenient.
-
-# Load the user's basic configuration
-include ipythonrc
-
-# import ...
-
-import_mod pyraf.ipython_api
-import_mod iraf
diff --git a/defsetup.py b/defsetup.py
deleted file mode 100644
index df1d930..0000000
--- a/defsetup.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division # confidence high
-
-import os, os.path, shutil, sys, commands
-import distutils.core
-import distutils.sysconfig
-import string
-
-
-## conditional flags, defaults
-
-# conditional for if we are running on Windows
-ms_windows = sys.platform.startswith('win')
-
-# conditional for if we should build the C code
-build_c = not ms_windows
-# so my 2.5 fink build works:
-if sys.version_info[:2] < (2,6):
-    build_c = False
-    ms_windows = True
-
-C_EXT_MODNAME_ENDING = 'module'
-if sys.version_info[0] > 2: # actually changes in Python 3.3, but why wait
-    C_EXT_MODNAME_ENDING = '' # needs not "sscanfmodule.so", but "sscanf.so"
-
-# default to no C extensions; add to this list when necessary
-PYRAF_EXTENSIONS = []
-
-# get the python libraries for use by C extensions
-# (why? doesn't distutils already reference those?)
-add_lib_dirs = [ distutils.sysconfig.get_python_lib(plat_specific=1, standard_lib = 1) ]
-add_inc_dirs = [ distutils.sysconfig.get_python_inc(plat_specific=1) ]
-
-## x windows specific features
-
-x_libraries = 'X11'
-
-
-def find_x(xdir=""):
-    if xdir != "":
-        add_lib_dirs.append(os.path.join(xdir,'lib64'))
-        add_lib_dirs.append(os.path.join(xdir,'lib'))
-        add_inc_dirs.append(os.path.join(xdir,'include'))
-    elif sys.platform == 'darwin' or sys.platform.startswith('linux'):
-        add_lib_dirs.append('/usr/X11R6/lib64')
-        add_lib_dirs.append('/usr/X11R6/lib')
-        add_inc_dirs.append('/usr/X11R6/include')
-    elif sys.platform == 'sunos5' :
-        add_lib_dirs.append('/usr/openwin/lib')
-        add_inc_dirs.append('/usr/openwin/include')
-    else:
-        try:
-            import Tkinter
-        except:
-            raise ImportError("Tkinter is not installed")
-        tk=Tkinter.Tk()
-        tk.withdraw()
-        tcl_lib = os.path.join(str(tk.getvar('tcl_library')), '../')
-        tcl_inc = os.path.join(str(tk.getvar('tcl_library')), '../../include')
-        tk_lib = os.path.join(str(tk.getvar('tk_library')), '../')
-        tkv = str(Tkinter.TkVersion)[:3]
-        # yes, the version number of Tkinter really is a float...
-        if Tkinter.TkVersion < 8.3:
-            print "Tcl/Tk v8.3 or later required\n"
-            sys.exit(1)
-        else:
-            suffix = '.so'
-            tklib='libtk'+tkv+suffix
-            command = "ldd %s" % (os.path.join(tk_lib, tklib))
-            lib_list = string.split(commands.getoutput(command))
-            for lib in lib_list:
-                if string.find(lib, 'libX11') == 0:
-                    ind = lib_list.index(lib)
-                    add_lib_dirs.append(os.path.dirname(lib_list[ind + 2]))
-                    #break
-                    add_inc_dirs.append(os.path.join(os.path.dirname(lib_list[ind + 2]), '../include'))
-
-if not ms_windows :
-    # Should we do something about X if we're using aqua on a mac?
-    # Apparently it doesn't cause any problems.
-    find_x()
-
-#
-
-def dir_clean(list) :
-    # We have a list of directories.  Remove any that don't exist.
-    r = [ ]
-    for x in list :
-        if os.path.isdir(x) :
-            r.append(x)
-    return r
-
-add_lib_dirs = dir_clean(add_lib_dirs)
-add_inc_dirs = dir_clean(add_inc_dirs)
-
-## C extensions
-
-# by default, we don't build any C extensions on MS Windows.  The
-# user probably does not have a compiler, and these extensions just
-# aren't that important.
-
-if not ms_windows or build_c :
-    # windows users have to do without the CL sscanf() function,
-    # unless you explicitly set build_c true.
-    PYRAF_EXTENSIONS.append(
-        distutils.core.Extension(
-            'pyraf.sscanf'+C_EXT_MODNAME_ENDING,
-            ['src/sscanfmodule.c'],
-#           extra_compile_args = ['-arch','i386','-arch','x86_64'],
-#           extra_link_args =    ['-arch','i386','-arch','x86_64'],
-            include_dirs=add_inc_dirs
-        )
-    )
-
-if not ms_windows :
-    # windows users do not have X windows, so we never need the X
-    # support
-    PYRAF_EXTENSIONS.append(
-        distutils.core.Extension(
-            'pyraf.xutil'+C_EXT_MODNAME_ENDING,
-            ['src/xutil.c'],
-            include_dirs=add_inc_dirs,
-            library_dirs=add_lib_dirs,
-#           extra_compile_args = ['-arch','i386','-arch','x86_64'],
-#           extra_link_args =    ['-arch','i386','-arch','x86_64'],
-            libraries = [x_libraries]
-        )
-    )
-
-
-## what scripts do we install
-
-if ms_windows :
-    # On windows, you use "runpyraf.py" -  it can't be pyraf.py
-    # because then you can't "import pyraf" in the script.
-    # Instead, you ( double-click the icon for runpyraf.py ) or
-    # ( type "runpyraf.py" ) or ( type "pyraf" to get pyraf.bat ).
-
-    # adapt to installing in the pyraf package or installing stsci_python
-    if os.path.exists('pyraf'):
-        scriptdir = [ 'pyraf', 'scripts' ]
-    else :
-        scriptdir = [ 'scripts' ]
-
-    # copy the pyraf main program to the name we want it installed as
-    shutil.copy(
-        os.path.join( * ( scriptdir + [ 'pyraf' ] ) ) ,
-        os.path.join( * ( scriptdir + [ 'runpyraf.py' ] ) )
-        )
-
-    # list of scripts for windows
-    scriptlist = ['scripts/runpyraf.py', 'scripts/pyraf.bat']
-
-else :
-    # on linux/mac, you have just the one main program
-    scriptlist = ['scripts/pyraf' ]
-
-## icon on the desktop
-
-if ms_windows :
-    # Install optional launcher onto desktop
-    if 'USERPROFILE' in os.environ:
-       dtop = os.environ['USERPROFILE']+os.sep+'Desktop'
-       if os.path.exists(dtop):
-           shortcut = dtop+os.sep+"PyRAF.bat"
-           if os.path.exists(shortcut):
-               os.remove(shortcut)
-           target = sys.exec_prefix+os.sep+"Scripts"+os.sep+"runpyraf.py"
-           f = open(shortcut, 'w')
-           f.write('@echo off\necho.\ncd %APPDATA%\n')
-           f.write('echo Launching PyRAF ...\necho.\n')
-           f.write(target)
-           f.write('\necho.\npause\n')
-           f.close()
-           print('Installing PyRAF.bat to -> '+dtop)
-       else:
-           print('Error: User desktop not found at: '+dtop)
-    else:
-       print('Error: User desktop location unknown')
-
-    # NOTE: a much better solution would be to use something (bdist) to
-    # create installer binaries for Windows, since they are: 1) easier on
-    # the win user, and 2) can be used to create actual desktop shortcuts,
-    # not this kludgy .bat file.  If we take out the two libraries built
-    # from the bdist run (which aren't used on Win anyway) then we can
-    # automate this build from Linux (yes, for Windows), via:
-    #    python setup.py bdist_wininst --no-target-compile --plat-name=win32
-    # and
-    #    python setup.py bdist_wininst --no-target-compile --plat-name=win-amd64
-    # We would need to provide both 32- and 64-bit versions since the
-    # installer will fail gracelessly if you try to install one and the Win
-    # node only has the other (listed in its registry).  The above 64-bit bdist
-    # fails currently on thor but the 32-bit bdist works.  Need to investigate.
-
-    # Another option to create the shortcut is to bundle win32com w/ installer.
-
-## the defsetup interface is here:
-
-## pkg
-
-pkg = "pyraf"
-
-# data files
-
-DATA_FILES = [ ( pkg,
-                    ['data/blankcursor.xbm',
-                    'data/epar.optionDB',
-                    'data/pyraflogo_rgb_web.gif',
-                    'data/ipythonrc-pyraf',
-                    'LICENSE.txt',
-                    ]
-                )
-        ]
-
-if not ms_windows and sys.version_info[0] < 3:
-    # clcache is a pre-loaded set of CL files already converted to
-    # python.  There are none on Windows, so we don't need them.
-    # We also are not yet using them in PY3K.
-    # Leaving them out makes the install go a lot faster.
-    DATA_FILES += [
-                (pkg+'/clcache',  [ "data/clcache/*" ] )
-        ]
-
-
-## setupargs
-
-setupargs = {
-    'version' :			    "2.x", # see lib's __init__.py
-    'description' :		    "A Python based CL for IRAF",
-    'author' :			    "Rick White, Perry Greenfield, Chris Sontag",
-    'maintainer_email' :	"help at stsci.edu",
-    'url' :			        "http://www.stsci.edu/resources/software_hardware/pyraf",
-    'license' :			    "http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE",
-    'platforms' :			["unix"],
-    'data_files' :			DATA_FILES,
-    'scripts' :			    scriptlist,
-    'ext_modules' :			PYRAF_EXTENSIONS,
-    'package_dir' :         { 'pyraf' : 'lib/pyraf' },
-}
-
-
diff --git a/distribute_setup.py b/distribute_setup.py
deleted file mode 100644
index bbb6f3c..0000000
--- a/distribute_setup.py
+++ /dev/null
@@ -1,485 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
-    from distribute_setup import use_setuptools
-    use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-try:
-    import subprocess
-
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        return subprocess.call(args) == 0
-
-except ImportError:
-    # will be used for python 2.3
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        # quoting arguments if windows
-        if sys.platform == 'win32':
-            def quote(arg):
-                if ' ' in arg:
-                    return '"%s"' % arg
-                return arg
-            args = [quote(arg) for arg in args]
-        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.19"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # installing
-        log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install'):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-    finally:
-        os.chdir(old_wd)
-
-
-def _build_egg(egg, tarball, to_dir):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # building an egg
-        log.warn('Building a Distribute egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
-    finally:
-        os.chdir(old_wd)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        tarball = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, tarball, to_dir)
-    sys.path.insert(0, egg)
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                   to_dir=os.curdir, download_delay=15, no_fake=True):
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    was_imported = 'pkg_resources' in sys.modules or \
-        'setuptools' in sys.modules
-    try:
-        try:
-            import pkg_resources
-            if not hasattr(pkg_resources, '_distribute'):
-                if not no_fake:
-                    _fake_setuptools()
-                raise ImportError
-        except ImportError:
-            return _do_download(version, download_base, to_dir, download_delay)
-        try:
-            pkg_resources.require("distribute>="+version)
-            return
-        except pkg_resources.VersionConflict:
-            e = sys.exc_info()[1]
-            if was_imported:
-                sys.stderr.write(
-                "The required version of distribute (>=%s) is not available,\n"
-                "and can't be installed while this script is running. Please\n"
-                "install a more recent version first, using\n"
-                "'easy_install -U distribute'."
-                "\n\n(Currently using %r)\n" % (version, e.args[0]))
-                sys.exit(2)
-            else:
-                del pkg_resources, sys.modules['pkg_resources']    # reload ok
-                return _do_download(version, download_base, to_dir,
-                                    download_delay)
-        except pkg_resources.DistributionNotFound:
-            return _do_download(version, download_base, to_dir,
-                                download_delay)
-    finally:
-        if not no_fake:
-            _create_fake_setuptools_pkg_info(to_dir)
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                        to_dir=os.curdir, delay=15):
-    """Download distribute from a specified location and return its filename
-
-    `version` should be a valid distribute version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    tgz_name = "distribute-%s.tar.gz" % version
-    url = download_base + tgz_name
-    saveto = os.path.join(to_dir, tgz_name)
-    src = dst = None
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        try:
-            log.warn("Downloading %s", url)
-            src = urlopen(url)
-            # Read/write all in one block, so we don't create a corrupt file
-            # if the download is interrupted.
-            data = src.read()
-            dst = open(saveto, "wb")
-            dst.write(data)
-        finally:
-            if src:
-                src.close()
-            if dst:
-                dst.close()
-    return os.path.realpath(saveto)
-
-def _no_sandbox(function):
-    def __no_sandbox(*args, **kw):
-        try:
-            from setuptools.sandbox import DirectorySandbox
-            if not hasattr(DirectorySandbox, '_old'):
-                def violation(*args):
-                    pass
-                DirectorySandbox._old = DirectorySandbox._violation
-                DirectorySandbox._violation = violation
-                patched = True
-            else:
-                patched = False
-        except ImportError:
-            patched = False
-
-        try:
-            return function(*args, **kw)
-        finally:
-            if patched:
-                DirectorySandbox._violation = DirectorySandbox._old
-                del DirectorySandbox._old
-
-    return __no_sandbox
-
-def _patch_file(path, content):
-    """Will backup the file then patch it"""
-    existing_content = open(path).read()
-    if existing_content == content:
-        # already patched
-        log.warn('Already patched.')
-        return False
-    log.warn('Patching...')
-    _rename_path(path)
-    f = open(path, 'w')
-    try:
-        f.write(content)
-    finally:
-        f.close()
-    return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-def _same_content(path, content):
-    return open(path).read() == content
-
-def _rename_path(path):
-    new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s into %s', path, new_name)
-    os.rename(path, new_name)
-    return new_name
-
-def _remove_flat_installation(placeholder):
-    if not os.path.isdir(placeholder):
-        log.warn('Unkown installation at %s', placeholder)
-        return False
-    found = False
-    for file in os.listdir(placeholder):
-        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
-            found = True
-            break
-    if not found:
-        log.warn('Could not locate setuptools*.egg-info')
-        return
-
-    log.warn('Removing elements out of the way...')
-    pkg_info = os.path.join(placeholder, file)
-    if os.path.isdir(pkg_info):
-        patched = _patch_egg_dir(pkg_info)
-    else:
-        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
-    if not patched:
-        log.warn('%s already patched.', pkg_info)
-        return False
-    # now let's move the files out of the way
-    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
-        element = os.path.join(placeholder, element)
-        if os.path.exists(element):
-            _rename_path(element)
-        else:
-            log.warn('Could not find the %s element of the '
-                     'Setuptools distribution', element)
-    return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-def _after_install(dist):
-    log.warn('After install bootstrap.')
-    placeholder = dist.get_command_obj('install').install_purelib
-    _create_fake_setuptools_pkg_info(placeholder)
-
-def _create_fake_setuptools_pkg_info(placeholder):
-    if not placeholder or not os.path.exists(placeholder):
-        log.warn('Could not find the install location')
-        return
-    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
-    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
-            (SETUPTOOLS_FAKED_VERSION, pyver)
-    pkg_info = os.path.join(placeholder, setuptools_file)
-    if os.path.exists(pkg_info):
-        log.warn('%s already exists', pkg_info)
-        return
-
-    log.warn('Creating %s', pkg_info)
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-
-    pth_file = os.path.join(placeholder, 'setuptools.pth')
-    log.warn('Creating %s', pth_file)
-    f = open(pth_file, 'w')
-    try:
-        f.write(os.path.join(os.curdir, setuptools_file))
-    finally:
-        f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
-
-def _patch_egg_dir(path):
-    # let's check if it's already patched
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    if os.path.exists(pkg_info):
-        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
-            log.warn('%s already patched.', pkg_info)
-            return False
-    _rename_path(path)
-    os.mkdir(path)
-    os.mkdir(os.path.join(path, 'EGG-INFO'))
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-    return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-def _before_install():
-    log.warn('Before install bootstrap.')
-    _fake_setuptools()
-
-
-def _under_prefix(location):
-    if 'install' not in sys.argv:
-        return True
-    args = sys.argv[sys.argv.index('install')+1:]
-    for index, arg in enumerate(args):
-        for option in ('--root', '--prefix'):
-            if arg.startswith('%s=' % option):
-                top_dir = arg.split('root=')[-1]
-                return location.startswith(top_dir)
-            elif arg == option:
-                if len(args) > index:
-                    top_dir = args[index+1]
-                    return location.startswith(top_dir)
-        if arg == '--user' and USER_SITE is not None:
-            return location.startswith(USER_SITE)
-    return True
-
-
-def _fake_setuptools():
-    log.warn('Scanning installed packages')
-    try:
-        import pkg_resources
-    except ImportError:
-        # we're cool
-        log.warn('Setuptools or Distribute does not seem to be installed.')
-        return
-    ws = pkg_resources.working_set
-    try:
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
-                                  replacement=False))
-    except TypeError:
-        # old distribute API
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
-
-    if setuptools_dist is None:
-        log.warn('No setuptools distribution found')
-        return
-    # detecting if it was already faked
-    setuptools_location = setuptools_dist.location
-    log.warn('Setuptools installation detected at %s', setuptools_location)
-
-    # if --root or --preix was provided, and if
-    # setuptools is not located in them, we don't patch it
-    if not _under_prefix(setuptools_location):
-        log.warn('Not patching, --root or --prefix is installing Distribute'
-                 ' in another location')
-        return
-
-    # let's see if its an egg
-    if not setuptools_location.endswith('.egg'):
-        log.warn('Non-egg installation')
-        res = _remove_flat_installation(setuptools_location)
-        if not res:
-            return
-    else:
-        log.warn('Egg installation')
-        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
-        if (os.path.exists(pkg_info) and
-            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
-            log.warn('Already patched.')
-            return
-        log.warn('Patching...')
-        # let's create a fake egg replacing setuptools one
-        res = _patch_egg_dir(setuptools_location)
-        if not res:
-            return
-    log.warn('Patched done.')
-    _relaunch()
-
-
-def _relaunch():
-    log.warn('Relaunching...')
-    # we have to relaunch the process
-    # pip marker to avoid a relaunch bug
-    if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
-        sys.argv[0] = 'setup.py'
-    args = [sys.executable] + sys.argv
-    sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
-    """Extract all members from the archive to the current working
-       directory and set owner, modification time and permissions on
-       directories afterwards. `path' specifies a different directory
-       to extract to. `members' is optional and must be a subset of the
-       list returned by getmembers().
-    """
-    import copy
-    import operator
-    from tarfile import ExtractError
-    directories = []
-
-    if members is None:
-        members = self
-
-    for tarinfo in members:
-        if tarinfo.isdir():
-            # Extract directories with a safe mode.
-            directories.append(tarinfo)
-            tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448 # decimal for oct 0700
-        self.extract(tarinfo, path)
-
-    # Reverse sort directories.
-    if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
-        directories.reverse()
-    else:
-        directories.sort(key=operator.attrgetter('name'), reverse=True)
-
-    # Set correct owner, mtime and filemode on directories.
-    for tarinfo in directories:
-        dirpath = os.path.join(path, tarinfo.name)
-        try:
-            self.chown(tarinfo, dirpath)
-            self.utime(tarinfo, dirpath)
-            self.chmod(tarinfo, dirpath)
-        except ExtractError:
-            e = sys.exc_info()[1]
-            if self.errorlevel > 1:
-                raise
-            else:
-                self._dbg(1, "tarfile: %s" % e)
-
-
-def main(argv, version=DEFAULT_VERSION):
-    """Install or upgrade setuptools and EasyInstall"""
-    tarball = download_setuptools()
-    _install(tarball)
-
-
-if __name__ == '__main__':
-    main(sys.argv[1:])
diff --git a/docs/ecl.html b/docs/ecl.html
deleted file mode 100644
index 55dd24e..0000000
--- a/docs/ecl.html
+++ /dev/null
@@ -1,426 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
-<HTML>
-<HEAD>
-	<META HTTP-EQUIV="CONTENT-TYPE" CONTENT="text/html; charset=utf-8">
-	<TITLE></TITLE>
-	<META NAME="GENERATOR" CONTENT="OpenOffice.org 2.0  (Linux)">
-	<META NAME="AUTHOR" CONTENT="Todd Miller">
-	<META NAME="CREATED" CONTENT="20061117;8415800">
-	<META NAME="CHANGEDBY" CONTENT="Todd Miller">
-	<META NAME="CHANGED" CONTENT="20061121;13283200">
-	<STYLE>
-	<!--
-		@page { size: 8.5in 11in }
-		TD P.western { font-family: "Arial", sans-serif }
-		H1.western { font-family: "Arial", sans-serif }
-		P.western { font-family: "Arial", sans-serif }
-		TH P.western { font-family: "Arial", sans-serif }
-	-->
-	</STYLE>
-</HEAD>
-<BODY LANG="en-US" DIR="LTR">
-<H1 CLASS="western" ALIGN=CENTER><FONT SIZE=7>PyRAF ECL Support</FONT></H1>
-<H1 CLASS="western">Overview</H1>
-<P CLASS="western">PyRAF can now support the error handling
-constructs of IRAF's ECL scripting language.  In addition to PyRAF's
-classic Python exception handling, PyRAF's  ECL support enables
-certain errors to be trapped and to cause exception handling
-statements to execute. 
-</P>
-<P CLASS="western">PyRAF's ECL captures the following errors:</P>
-<UL>
-	<LI><DD><FONT FACE="Arial, sans-serif">System exceptions (FPE,
-	segfault, etc) thrown by compiled tasks (i.e. SPP or C)</FONT></DD><LI><DD>
-	<FONT FACE="Arial, sans-serif">error() calls from compiled tasks</FONT></DD><LI><DD>
-	<FONT FACE="Arial, sans-serif">error() calls from CL scripts</FONT></DD><LI><DD STYLE="margin-bottom: 0.2in">
-	<FONT FACE="Arial, sans-serif">division by zero in CL scripts</FONT></DD></UL>
-<H1 CLASS="western">Activating ECL Support</H1>
-<P CLASS="western">During it's introduction, PyRAF's ECL support is
-optional and is only activated by one of the following means:</P>
-<TABLE WIDTH=703 BORDER=1 CELLPADDING=4 CELLSPACING=3 STYLE="page-break-inside: avoid">
-	<COL WIDTH=211>
-	<COL WIDTH=465>
-	<TR VALIGN=TOP>
-		<TD WIDTH=211>
-			<P CLASS="western" ALIGN=CENTER><B>Activation Method</B></P>
-		</TD>
-		<TD WIDTH=465>
-			<P CLASS="western" ALIGN=CENTER><B>Description</B></P>
-		</TD>
-	</TR>
-	<TR VALIGN=TOP>
-		<TD WIDTH=211>
-			<PRE>pyraf -e</PRE>
-		</TD>
-		<TD WIDTH=465>
-			<P CLASS="western">Use command line switch -e when invoking pyraf.</P>
-		</TD>
-	</TR>
-	<TR VALIGN=TOP>
-		<TD WIDTH=211>
-			<PRE>pyaf –ecl</PRE>
-		</TD>
-		<TD WIDTH=465>
-			<P CLASS="western">Use the verbose switch, --ecl, when invoking
-			pyraf</P>
-		</TD>
-	</TR>
-	<TR VALIGN=TOP>
-		<TD WIDTH=211>
-			<PRE>epyraf</PRE>
-		</TD>
-		<TD WIDTH=465>
-			<P CLASS="western">Link pyraf to epyraf and run as epyraf.</P>
-		</TD>
-	</TR>
-	<TR VALIGN=TOP>
-		<TD WIDTH=211>
-			<PRE>setenv PYRAF_USE_ECL 1</PRE>
-		</TD>
-		<TD WIDTH=465>
-			<P CLASS="western">Set the environment variable PYRAF_USE_ECL to 1</P>
-		</TD>
-	</TR>
-</TABLE>
-<P CLASS="western"><BR><BR>
-</P>
-<P CLASS="western">In the absence of the above methods,  PyRAF runs
-without ECL support.</P>
-<H1 CLASS="western">New ECL Keywords</H1>
-<P CLASS="western">PyRAF's ECL support uses the following words as
-keywords, i.e. words which are part of ECL and can no longer be used
-as program identifiers (i.e. variable, task, or procedure names):</P>
-<UL>
-	<LI><PRE>iferr</PRE>
-	<LI><PRE>ifnoerr</PRE>
-	<LI><PRE STYLE="margin-bottom: 0.2in">then</PRE>
-</UL>
-<H1 CLASS="western">ECL Grammar Extensions</H1>
-<P CLASS="western" STYLE="background: transparent">PyRAF's support
-for ECL includes two new symmetric statements, <SPAN STYLE="background: transparent"><FONT COLOR="#0000ff"><B><FONT COLOR="#000000">iferr</FONT></B>
-</FONT></SPAN>and <B><FONT COLOR="#000000">ifnoerr</FONT></B><FONT COLOR="#0000ff">.</FONT>
-iferr is used to describe what should be done when an error <B>occurs</B>
-in a group of guarded statements, and ifnoerr is used to emphasize
-what should be done when an error <B>does not occur</B> in a group of
-guarded statements. A “guarded statement” is essentially a block
-of ordinary CL statements for which errors should be trapped. An
-“except action” are the statement(s) which should be executed
-when an error occurs (iferr) or does not occur (ifnoerr). An “else
-action” are the statement(s) which should be executed when an error
-occurs. Below is the section of the PyRAF grammar which describes ECL
-iferr statements; IFERR, IFNOERR, THEN, and ELSE denote keyword
-literals:</P>
-<PRE STYLE="margin-left: 0.79in">iferr_stmt    ::= if_kind guarded_stmt except_action
-iferr_stmt    ::= if_kind guarded_stmt opt_newline THEN except_action
-iferr_stmt    ::= if_kind guarded_stmt opt_newline THEN except_action opt_newline ELSE else_action
-if_kind       ::= IFERR
-if_kind       ::= IFNOERR                
-guarded_stmt  ::=  { opt_newline statement_list }
-except_action ::= compound_stmt
-else_action   ::= compound_stmt</PRE><P CLASS="western">
-A compound statement can be a single statement or block of
-statements.</P>
-<H1 CLASS="western">ECL Syntax Examples</H1>
-<P CLASS="western">The simplest form of ECL error statement is a
-block of guarded statements followed by a single handler statement
-which should execute when one or more of the guarded statements fail.
-The <B>then</B> keyword is optional in this form. A curious property
-of ECL error handling is that <I>all guarded statements execute</I>,
-even those following the first failed statement. This contrasts
-sharply with Python's exception handling model which performs it's
-traceback immediately following the first error.</P>
-<PRE STYLE="margin-left: 2.36in">iferr {
-<guarded statements>
-} <error-handler statement>
-
-
-iferr {
-<guarded statements>
-} then
-<error-handler statement></PRE><P CLASS="western">
-When a block of error handler statements is desired, the <B>then</B>
-keyword should be used to be compatible with ECL. An optional <B>else</B>
-clause may be used to specify what to do when the guarded statements
-all succeed; either the <B>then</B> clause or the <B>else</B> clause
-is executed, but never both.</P>
-<PRE STYLE="margin-left: 2.36in">iferr {
-<guarded statements>
-} then {
-<error-handler statements>
-} else {
-<non-error-handler statements>
-}</PRE><P CLASS="western">
-There is a symmetric form of <B>iferr</B> which uses the keyword
-<B>ifnoerr</B>. It is perhaps most useful when one doesn't want to
-specify anything to handle an error, but only specify what to do when
-the guarded statements succeed.  <B>ifnoerr</B> is effectively <B>iferr
-</B>with the error-handling and success-handling statements reversed.</P>
-<PRE STYLE="margin-left: 2.36in">ifnoerr {
-<guarded statements>
-} then {
-<non-error statements>
-} 
-
-
-
-ifnoerr {
-<guarded statements>
-} then {
-<non-error statements>
-} else {
-<error handling statements>
-}</PRE><H1 CLASS="western">
-ECL Pseudo Variables</H1>
-<P CLASS="western">PyRAF in ECL mode defines the following pseudo
-variables which are associated with each <I>task</I> object:</P>
-<TABLE WIDTH=656 BORDER=1 CELLPADDING=4 CELLSPACING=3 STYLE="page-break-inside: avoid">
-	<COL WIDTH=139>
-	<COL WIDTH=490>
-	<THEAD>
-		<TR VALIGN=TOP>
-			<TD WIDTH=139>
-				<P CLASS="western" ALIGN=CENTER><B>Variable</B></P>
-			</TD>
-			<TD WIDTH=490>
-				<P CLASS="western" ALIGN=CENTER><B>Description</B></P>
-			</TD>
-		</TR>
-	</THEAD>
-	<TBODY>
-		<TR VALIGN=TOP>
-			<TD WIDTH=139>
-				<PRE>$errno</PRE>
-			</TD>
-			<TD WIDTH=490>
-				<P CLASS="western">The numerical value associated with the last
-				error.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=139>
-				<PRE>$errtask</PRE>
-			</TD>
-			<TD WIDTH=490>
-				<P CLASS="western">The task which caused the error.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=139>
-				<PRE>$errmsg</PRE>
-			</TD>
-			<TD WIDTH=490>
-				<P CLASS="western">The text message associated with the last
-				error.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=139>
-				<PRE>$err_dzvalue</PRE>
-			</TD>
-			<TD WIDTH=490>
-				<P CLASS="western">The <I>result</I> value of a division by zero.</P>
-			</TD>
-		</TR>
-	</TBODY>
-</TABLE>
-<P CLASS="western"><BR><BR>
-</P>
-<P CLASS="western">Since pseudo variables are associated with a task
-object, they have several properties:</P>
-<OL>
-	<LI><P CLASS="western">They are persistent, i.e. not cleared until a
-	task is re-run, and only then based on erract's <B>clear</B> field.
-	They are however overwritten with each new error.</P>
-	<LI><P CLASS="western">They can be accessed in CL code as written in
-	the table above.</P>
-	<LI><P CLASS="western">They can be accessed from the command line
-	using DOLLAR notation after a traceback has occurred:</P>
-	<P CLASS="western">--> iraf.failed_task.DOLLARerrno</P>
-	<P CLASS="western">57</P>
-	<P CLASS="western">--> iraf.failed_task.DOLLARerrmsg</P>
-	<P CLASS="western">'becuz something went wrong...'</P>
-	<P CLASS="western">--> iraf.failed_task_caller.DOLLARerrtask</P>
-	<P CLASS="western">failed_task</P>
-	<LI><P CLASS="western">They are not re-entrant – i.e., recursive
-	procedures using them are only referring to a single storage
-	location and will interfere with one another, only recording the
-	last error.</P>
-</OL>
-<H1 CLASS="western">ECL Functions</H1>
-<P CLASS="western">PyRAF in ECL mode defines the following error
-handling functions which are analogous to the pseudo variables and
-easier to use.</P>
-<TABLE WIDTH=703 BORDER=1 CELLPADDING=4 CELLSPACING=3 STYLE="page-break-inside: avoid">
-	<COL WIDTH=102>
-	<COL WIDTH=574>
-	<THEAD>
-		<TR VALIGN=TOP>
-			<TD WIDTH=102>
-				<P CLASS="western" ALIGN=CENTER><B>Function</B></P>
-			</TD>
-			<TD WIDTH=574>
-				<P CLASS="western" ALIGN=CENTER><B>Description</B></P>
-			</TD>
-		</TR>
-	</THEAD>
-	<TBODY>
-		<TR VALIGN=TOP>
-			<TD WIDTH=102>
-				<PRE>error()</PRE>
-			</TD>
-			<TD WIDTH=574>
-				<P CLASS="western">Forces a CL error state, generally raising a
-				traceback.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=102>
-				<PRE>errno()</PRE>
-			</TD>
-			<TD WIDTH=574>
-				<P CLASS="western">Returns the numerical value associated with
-				the last error.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=102>
-				<PRE>errmsg()</PRE>
-			</TD>
-			<TD WIDTH=574>
-				<P CLASS="western">Returns the message associated with the last
-				error.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=102>
-				<PRE>errtask()</PRE>
-			</TD>
-			<TD WIDTH=574>
-				<P CLASS="western">Returns the task associated with the last
-				error.</P>
-			</TD>
-		</TR>
-	</TBODY>
-</TABLE>
-<H1 CLASS="western">Division By Zero</H1>
-<P CLASS="western">PyRAF's ECL mode now traps division by zero and
-either raises and ECL exception or returns the default result value
-contained in the variable <B>$err_dzvalue</B>.</P>
-<P CLASS="western">So in ECL, the following guarded code:</P>
-<PRE STYLE="margin-left: 0.79in">iferr {
-$err_dzvalue = 33
-print 1/0
-}
-</PRE><P CLASS="western">
-Outputs:</P>
-<PRE STYLE="margin-left: 0.79in">Warning on line 6 of 'nested5':  divide by zero - using $err_dzvalue = 33
-33</PRE><P CLASS="western">
-While un-guarded code such as:</P>
-<PRE STYLE="margin-left: 0.79in">$err_dzvalue = 33
-print 1/0</PRE><P CLASS="western">
-Generates a traceback and outputs:</P>
-<PRE STYLE="margin-left: 0.79in">ERROR (1): divide by zero
-   'print 1/0'
-      line 4: /home/jmiller/nested5.cl
-Traceback (innermost last):
-  File "<CL script CL1>", line 8, in <module>
-IrafError: ERROR: divide by zero
-                                                                                                                                                                                </PRE><H1 CLASS="western">
-Controlling ECL Behavior using erract</H1>
-<P CLASS="western">PyRAF's ECL mode behavior is controlled by a
-multi-field environment variable named <B>erract</B>. erract is set
-in PyRAF as a string containing one or more field modifiers. 
-</P>
-<PRE STYLE="margin-left: 1.58in; text-align: left">--> show erract
-abort trace flpr clear full ecl</PRE><P CLASS="western" ALIGN=LEFT>
-Multiple fields may be changed with a single “set” command, and
-not all fields need be specified. Fields not mentioned in a set
-statement retain their old values.</P>
-<PRE STYLE="margin-left: 1.58in; text-align: left">--> set erract="noflpr noclear"
-
---> show erract
-abort trace noflpr noclear full ecl
-</PRE>
-<TABLE WIDTH=703 BORDER=1 CELLPADDING=4 CELLSPACING=3>
-	<COL WIDTH=147>
-	<COL WIDTH=529>
-	<THEAD>
-		<TR VALIGN=TOP>
-			<TH WIDTH=147>
-				<P CLASS="western">Erract Field</P>
-			</TH>
-			<TH WIDTH=529>
-				<P CLASS="western">Description</P>
-			</TH>
-		</TR>
-	</THEAD>
-	<TBODY>
-		<TR VALIGN=TOP>
-			<TD WIDTH=147>
-				<PRE>abort / noabort</PRE>
-			</TD>
-			<TD WIDTH=529>
-				<P CLASS="western">Outside an iferr block, a failed task results
-				in an immediate error. Inside an iferr block, a failed task
-				causes an error as soon as the iferr guarded block is exited. Set
-				to noabort and errors won't stop execution regardless of iferr
-				usage.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=147>
-				<PRE>trace / notrace</PRE>
-			</TD>
-			<TD WIDTH=529>
-				<P CLASS="western">Print traceback information to stderr, or
-				don't print.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=147>
-				<PRE>flpr / noflpr</PRE>
-			</TD>
-			<TD WIDTH=529>
-				<P CLASS="western">Flush the process cache on error, or don't
-				flush.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=147>
-				<PRE>clear / noclear</PRE>
-			</TD>
-			<TD WIDTH=529>
-				<P CLASS="western">Clear the error pseudo variables for a task
-				before running it, or retain the old error values which may or
-				may not be overwritten.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=147>
-				<PRE>full / nofull</PRE>
-			</TD>
-			<TD WIDTH=529>
-				<P CLASS="western">Print traceback information on the entire
-				procedure call chain, or only on the innermost CL procedure.</P>
-			</TD>
-		</TR>
-		<TR VALIGN=TOP>
-			<TD WIDTH=147>
-				<PRE>ecl  / noecl</PRE>
-			</TD>
-			<TD WIDTH=529>
-				<P CLASS="western">Use the new ECL error handling, or use classic
-				PyRAF/Python exception handling inside iferr and ifnoerr blocks.
-				Setting noecl causes an error to raise an immediate exception and
-				give a Python traceback.</P>
-				<P CLASS="western">This is a runtime control.   It does not
-				affect ECL compilation which can only be activated at system
-				startup.</P>
-			</TD>
-		</TR>
-	</TBODY>
-</TABLE>
-<P CLASS="western"><BR><BR>
-</P>
-</BODY>
-</HTML>
\ No newline at end of file
diff --git a/docs/ipython_notes.txt b/docs/ipython_notes.txt
deleted file mode 100644
index e62c491..0000000
--- a/docs/ipython_notes.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-IPython support for PyRAF works by replacing the PyRAF command line
-interpreter with the IPython shell.  This is accomplished using
-IPython's profile mechanism and input prefilter hook.  Initiating a
-PyRAF session using IPython can be accomplished two ways:
-
-1. Run PyRAF with the --ipython switch
-
-% pyraf --ipython
-
-This has the advantage that it installs the ipythonrc-pyraf profile in
-your $HOME/.ipython if needed before running ipython.  IPython needs
-to be installed first or it will not install IPython's own rc files in
-~/.ipython.
-
-2. Running IPython using the pyraf profile:
-
-After IPython, PyRAF, and $HOME/.ipython/ipythonrc-pyraf are
-installed, you can just run ipython using the pyraf profile.
-
-% ipython -p pyraf
-PyRAF 1.3dev (2006Mar24) Copyright (c) 2002 AURA
-Python 2.4.2 (#2, Oct 19 2005, 17:07:05)
-Type "copyright", "credits" or "license" for more information.
-
-IPython 0.7.2.svn -- An enhanced Interactive Python.
-?       -> Introduction to IPython's features.
-%magic  -> Information about IPython's 'magic' % functions.
-help    -> Python's own help system.
-object? -> Details about 'object'. ?object also works, ?? prints more.
-
-IPython profile: pyraf
-
-In [1]:
-
-Compared to the standard PyRAF command line shell, running PyRAF under
-IPython has these differences:
-
-a. PyRAF's "." directives are rendered obsolete and are not supported.
-There are IPython equivalents.
-
-b. As a result of all the "magic" in IPython and PyRAF, there is the
-potential for overlap and conflict between the two.  By default,
-IPython's magic commands are executed ahead of PyRAF commands, so if
-you type "history" you get the IPython command logging system, not the
-PyRAF system.  This is a little counter-intuitive since PyRAF support
-is added as an input pre-filter and you might expect it to get
-precedence by default.  IPython magic gets precedence but this
-behavior can be fine tuned using two IPython "magic" commands:
-
-In[2]: set_pyraf_magic <magic_function> on
-
-In[3]: set_pyraf_magic <magic_function> off
-
-This can be further disambiguated by saying "%<command>" which means
-use the IPython version of <command>.  There is currently no
-equivalent prefix for declaring "use PyRAF".
-
-c. There is an option to choose between default IPython exception
-handling and PyRAF exception handling.  PyRAF exception handling has
-the benefit that it hides the traceback from PyRAF itself; this keeps
-a user within the realm of their own application rather than debugging
-IPython or PyRAF.
-
-In[4]: use_pyraf_traceback on
-
-If you prefer the ipython exception handling system,  you can that instead
-with the following command:
-
-In[5]: use_pyraf_traceback off
-
-
-d. PyRAF's support for IPython includes CL emulation which is on by default.
-This can be turned on/off with the following commands:
-
-In[6]: use_pyraf_cl_emulation on
-
-In[7]: use_pyraf_cl_emulation off
-
-e. PyRAF's support for IPython includes PyRAF-enhanced readline completion
-which filters input before IPython.  It can be turned on and off using:
-
-In[8]: use_pyraf_readline_completer on
-
-In[9]: use_pyraf_readline_completer off
-
diff --git a/lib/pyraf.egg-info/PKG-INFO b/lib/pyraf.egg-info/PKG-INFO
index 0659b40..ac4ce9f 100644
--- a/lib/pyraf.egg-info/PKG-INFO
+++ b/lib/pyraf.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: pyraf
-Version: 2.1.10
+Version: 2.1.11
 Summary: Provides a Pythonic interface to IRAF that can be used in place of the existing IRAF CL
 Home-page: http://www.stsci.edu/resources/software_hardware/pyraf
 Author: Rick White, Perry Greenfield, Chris Sontag
diff --git a/lib/pyraf.egg-info/SOURCES.txt b/lib/pyraf.egg-info/SOURCES.txt
index 6e2cacf..a42ec73 100644
--- a/lib/pyraf.egg-info/SOURCES.txt
+++ b/lib/pyraf.egg-info/SOURCES.txt
@@ -1,14 +1,9 @@
-LICENSE.txt
 MANIFEST.in
 README
-defsetup.py
-distribute_setup.py
 setup.cfg
 setup.py
-shortcut.vbs
 data/blankcursor.xbm
 data/epar.optionDB
-data/ipythonrc-pyraf
 data/pyraflogo_rgb_web.gif
 data/clcache/+0ji52t3OceHbUM8JZCVzA==
 data/clcache/+4CBmmvoaDLSEeeQiK0DWA==
@@ -662,8 +657,6 @@ data/clcache/zapkya3TENsDYonGfybG+A==
 data/clcache/zhg9FR4mVNPjxHZMyGYYeg==
 data/clcache/zmUPlt0Gho6Y1wUC9aPTDw==
 data/clcache/zx2V7u++rn8rTqDpZW4rRg==
-docs/ecl.html
-docs/ipython_notes.txt
 docs/pyraf_tutorial.pdf
 docs/pyraf_tutorial.ps.gz
 lib/pyraf_setup.py
@@ -729,8 +722,7 @@ lib/pyraf/tpar.py
 lib/pyraf/urwfiledlg.py
 lib/pyraf/urwutil.py
 lib/pyraf/version.py
-lib/pyraf/version.py.orig1
-lib/pyraf/version.py.orig2
+lib/pyraf/version_vcs.py
 lib/pyraf/wutil.py
 lib/pyraf.egg-info/PKG-INFO
 lib/pyraf.egg-info/SOURCES.txt
@@ -749,6 +741,28 @@ required_pkgs/d2to1/ez_setup.py
 required_pkgs/d2to1/setup.cfg
 required_pkgs/d2to1/setup.py
 required_pkgs/d2to1/tox.ini
+required_pkgs/d2to1/.git/HEAD
+required_pkgs/d2to1/.git/config
+required_pkgs/d2to1/.git/description
+required_pkgs/d2to1/.git/index
+required_pkgs/d2to1/.git/packed-refs
+required_pkgs/d2to1/.git/hooks/applypatch-msg.sample
+required_pkgs/d2to1/.git/hooks/commit-msg.sample
+required_pkgs/d2to1/.git/hooks/post-update.sample
+required_pkgs/d2to1/.git/hooks/pre-applypatch.sample
+required_pkgs/d2to1/.git/hooks/pre-commit.sample
+required_pkgs/d2to1/.git/hooks/pre-push.sample
+required_pkgs/d2to1/.git/hooks/pre-rebase.sample
+required_pkgs/d2to1/.git/hooks/prepare-commit-msg.sample
+required_pkgs/d2to1/.git/hooks/update.sample
+required_pkgs/d2to1/.git/info/exclude
+required_pkgs/d2to1/.git/logs/HEAD
+required_pkgs/d2to1/.git/logs/refs/heads/master
+required_pkgs/d2to1/.git/logs/refs/remotes/origin/HEAD
+required_pkgs/d2to1/.git/objects/pack/pack-159bd5c22ad588492f346ac688ae5f4da0dc61f3.idx
+required_pkgs/d2to1/.git/objects/pack/pack-159bd5c22ad588492f346ac688ae5f4da0dc61f3.pack
+required_pkgs/d2to1/.git/refs/heads/master
+required_pkgs/d2to1/.git/refs/remotes/origin/HEAD
 required_pkgs/d2to1/d2to1/__init__.py
 required_pkgs/d2to1/d2to1/core.py
 required_pkgs/d2to1/d2to1/util.py
@@ -784,6 +798,28 @@ required_pkgs/stsci.distutils/distribute_setup.py
 required_pkgs/stsci.distutils/setup.cfg
 required_pkgs/stsci.distutils/setup.py
 required_pkgs/stsci.distutils/tox.ini
+required_pkgs/stsci.distutils/.git/HEAD
+required_pkgs/stsci.distutils/.git/config
+required_pkgs/stsci.distutils/.git/description
+required_pkgs/stsci.distutils/.git/index
+required_pkgs/stsci.distutils/.git/packed-refs
+required_pkgs/stsci.distutils/.git/hooks/applypatch-msg.sample
+required_pkgs/stsci.distutils/.git/hooks/commit-msg.sample
+required_pkgs/stsci.distutils/.git/hooks/post-update.sample
+required_pkgs/stsci.distutils/.git/hooks/pre-applypatch.sample
+required_pkgs/stsci.distutils/.git/hooks/pre-commit.sample
+required_pkgs/stsci.distutils/.git/hooks/pre-push.sample
+required_pkgs/stsci.distutils/.git/hooks/pre-rebase.sample
+required_pkgs/stsci.distutils/.git/hooks/prepare-commit-msg.sample
+required_pkgs/stsci.distutils/.git/hooks/update.sample
+required_pkgs/stsci.distutils/.git/info/exclude
+required_pkgs/stsci.distutils/.git/logs/HEAD
+required_pkgs/stsci.distutils/.git/logs/refs/heads/master
+required_pkgs/stsci.distutils/.git/logs/refs/remotes/origin/HEAD
+required_pkgs/stsci.distutils/.git/objects/pack/pack-ce874445d9fd1543519cf1b2af85ef8be1056f35.idx
+required_pkgs/stsci.distutils/.git/objects/pack/pack-ce874445d9fd1543519cf1b2af85ef8be1056f35.pack
+required_pkgs/stsci.distutils/.git/refs/heads/master
+required_pkgs/stsci.distutils/.git/refs/remotes/origin/HEAD
 required_pkgs/stsci.distutils/docs/Makefile
 required_pkgs/stsci.distutils/docs/source/api.rst
 required_pkgs/stsci.distutils/docs/source/changelog.rst
@@ -823,6 +859,28 @@ required_pkgs/stsci.tools/ez_setup.py
 required_pkgs/stsci.tools/setup.cfg
 required_pkgs/stsci.tools/setup.cfg.orig
 required_pkgs/stsci.tools/setup.py
+required_pkgs/stsci.tools/.git/HEAD
+required_pkgs/stsci.tools/.git/config
+required_pkgs/stsci.tools/.git/description
+required_pkgs/stsci.tools/.git/index
+required_pkgs/stsci.tools/.git/packed-refs
+required_pkgs/stsci.tools/.git/hooks/applypatch-msg.sample
+required_pkgs/stsci.tools/.git/hooks/commit-msg.sample
+required_pkgs/stsci.tools/.git/hooks/post-update.sample
+required_pkgs/stsci.tools/.git/hooks/pre-applypatch.sample
+required_pkgs/stsci.tools/.git/hooks/pre-commit.sample
+required_pkgs/stsci.tools/.git/hooks/pre-push.sample
+required_pkgs/stsci.tools/.git/hooks/pre-rebase.sample
+required_pkgs/stsci.tools/.git/hooks/prepare-commit-msg.sample
+required_pkgs/stsci.tools/.git/hooks/update.sample
+required_pkgs/stsci.tools/.git/info/exclude
+required_pkgs/stsci.tools/.git/logs/HEAD
+required_pkgs/stsci.tools/.git/logs/refs/heads/master
+required_pkgs/stsci.tools/.git/logs/refs/remotes/origin/HEAD
+required_pkgs/stsci.tools/.git/objects/pack/pack-f16c85e988faf4e46fed9152d17fa03613bd4092.idx
+required_pkgs/stsci.tools/.git/objects/pack/pack-f16c85e988faf4e46fed9152d17fa03613bd4092.pack
+required_pkgs/stsci.tools/.git/refs/heads/master
+required_pkgs/stsci.tools/.git/refs/remotes/origin/HEAD
 required_pkgs/stsci.tools/doc/Makefile
 required_pkgs/stsci.tools/doc/make.bat
 required_pkgs/stsci.tools/doc/source/analysis.rst
@@ -851,6 +909,7 @@ required_pkgs/stsci.tools/lib/stsci/tools/clipboard_helper.py
 required_pkgs/stsci.tools/lib/stsci/tools/compmixin.py
 required_pkgs/stsci.tools/lib/stsci/tools/configobj.py
 required_pkgs/stsci.tools/lib/stsci/tools/convertgeis.py
+required_pkgs/stsci.tools/lib/stsci/tools/convertlog.py
 required_pkgs/stsci.tools/lib/stsci/tools/convertwaiveredfits.py
 required_pkgs/stsci.tools/lib/stsci/tools/dialog.py
 required_pkgs/stsci.tools/lib/stsci/tools/editpar.py
@@ -896,21 +955,18 @@ required_pkgs/stsci.tools/lib/stsci/tools/tests/cdva2.fits
 required_pkgs/stsci.tools/lib/stsci/tools/tests/o4sp040b0_raw.fits
 required_pkgs/stsci.tools/lib/stsci/tools/tests/testStpyfits.py
 required_pkgs/stsci.tools/lib/stsci/tools/tests/test_xyinterp.py
+required_pkgs/stsci.tools/scripts/convertlog
 required_pkgs/stsci.tools/scripts/convertwaiveredfits
 required_pkgs/stsci.tools/scripts/stscidocs
 scripts/pyraf
-scripts/pyraf.bat
 src/sscanfmodule.c
 src/xutil.c
 tools/cachecompare.py
 tools/cachesearch.py
 tools/checkcompileall.py
 tools/compileallcl.py
-tools/createTarBall.csh
 tools/fixcache.py
 tools/loadall.py
 tools/plotbench.py
 tools/simple_api_calls.py
-tools/subproc_Ph.py
-tools/test_usage.csh
-tools/test_usage.save
\ No newline at end of file
+tools/subproc_Ph.py
\ No newline at end of file
diff --git a/lib/pyraf/GkiMpl.py b/lib/pyraf/GkiMpl.py
index e8e4ec7..39a1a3e 100644
--- a/lib/pyraf/GkiMpl.py
+++ b/lib/pyraf/GkiMpl.py
@@ -1,7 +1,7 @@
 """
 matplotlib implementation of the gki kernel class
 
-$Id: GkiMpl.py 1742 2012-05-23 13:45:59Z sontag $
+$Id$
 """
 
 from __future__ import division # confidence high
diff --git a/lib/pyraf/MplCanvasAdapter.py b/lib/pyraf/MplCanvasAdapter.py
index 645b0cf..551ca88 100644
--- a/lib/pyraf/MplCanvasAdapter.py
+++ b/lib/pyraf/MplCanvasAdapter.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
 """
-$Id: MplCanvasAdapter.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 """
 
 from __future__ import division # confidence high
diff --git a/lib/pyraf/Ptkplot.py b/lib/pyraf/Ptkplot.py
index b7c7042..7802ed4 100644
--- a/lib/pyraf/Ptkplot.py
+++ b/lib/pyraf/Ptkplot.py
@@ -2,7 +2,7 @@
 
 
 """
-$Id: Ptkplot.py 1708 2012-04-30 17:00:37Z sontag $
+$Id$
 """
 
 from __future__ import division # confidence high
diff --git a/lib/pyraf/__init__.py b/lib/pyraf/__init__.py
index 3e93718..ce78f82 100644
--- a/lib/pyraf/__init__.py
+++ b/lib/pyraf/__init__.py
@@ -4,7 +4,7 @@
 Checks sys.argv[0] == 'pyraf' to determine whether IRAF initialization
 is done verbosely or quietly.
 
-$Id: __init__.py 2200 2014-06-04 17:40:38Z sontag $
+$Id$
 
 R. White, 2000 February 18
 """
@@ -15,9 +15,13 @@ from .version import *
 import os, sys, __main__
 
 # dev only: add revision number if possible (if not done yet)
-if __version__.endswith('dev') and len(__svn_revision__) > 0 and \
-   __svn_revision__[0].isdigit():
-    __version__ = __version__+__svn_revision__
+if __version__.endswith('dev'):
+    try: # did we set this via git?
+        from .version_vcs import __vcs_revision__
+        __version__ = __version__+'-'+__vcs_revision__
+    except: # must be using svn still
+        if len(__svn_revision__) > 0 and __svn_revision__[0].isdigit():
+            __version__ = __version__+__svn_revision__
 
 # Dump version and exit here, if requested
 if '-V' in sys.argv or '--version' in sys.argv:
diff --git a/lib/pyraf/aqutil.py b/lib/pyraf/aqutil.py
index e01dfac..b157756 100644
--- a/lib/pyraf/aqutil.py
+++ b/lib/pyraf/aqutil.py
@@ -2,7 +2,7 @@
 not possible in Tkinter.  In general, an attempt is made to use the Pyobjc
 bridging package so that compiling another C extension is not needed.
 
-$Id: aqutil.py 2399 2015-07-17 19:47:56Z bsimon $
+$Id$
 """
 
 from __future__ import division # confidence high
diff --git a/lib/pyraf/cgeneric.py b/lib/pyraf/cgeneric.py
index 8795cdb..d548f3b 100644
--- a/lib/pyraf/cgeneric.py
+++ b/lib/pyraf/cgeneric.py
@@ -15,7 +15,7 @@ objects.
 
 I also added the re match object as an argument to the action function.
 
-$Id: cgeneric.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 
 Created 1999 September 10 by R. White
 """
diff --git a/lib/pyraf/cl2py.py b/lib/pyraf/cl2py.py
index b0834f1..2064959 100644
--- a/lib/pyraf/cl2py.py
+++ b/lib/pyraf/cl2py.py
@@ -1,6 +1,6 @@
 """cl2py.py: Translate IRAF CL program to Python
 
-$Id: cl2py.py 2154 2014-03-11 15:30:31Z sontag $
+$Id$
 
 R. White, 1999 December 20
 """
diff --git a/lib/pyraf/clast.py b/lib/pyraf/clast.py
index b2cd12b..e9e8e26 100644
--- a/lib/pyraf/clast.py
+++ b/lib/pyraf/clast.py
@@ -1,6 +1,6 @@
 """clast.py: abstract syntax tree node type for CL parsing
 
-$Id: clast.py 1708 2012-04-30 17:00:37Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/clcache.py b/lib/pyraf/clcache.py
index 24735ae..f47a01e 100644
--- a/lib/pyraf/clcache.py
+++ b/lib/pyraf/clcache.py
@@ -1,6 +1,6 @@
 """clcache.py: Implement cache for Python translations of CL tasks
 
-$Id: clcache.py 2206 2014-06-09 20:03:35Z sontag $
+$Id$
 
 R. White, 2000 January 19
 """
diff --git a/lib/pyraf/cllinecache.py b/lib/pyraf/cllinecache.py
index 544f246..c7ae36c 100644
--- a/lib/pyraf/cllinecache.py
+++ b/lib/pyraf/cllinecache.py
@@ -2,7 +2,7 @@
 
 CL scripts have special filename "<CL script taskname>"
 
-$Id: cllinecache.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/clparse.py b/lib/pyraf/clparse.py
index 139660a..e1c4c0e 100644
--- a/lib/pyraf/clparse.py
+++ b/lib/pyraf/clparse.py
@@ -1,6 +1,6 @@
 """clparse.py: Parse IRAF CL
 
-$Id: clparse.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 
 R. White, 1999 August 24
 """
diff --git a/lib/pyraf/clscan.py b/lib/pyraf/clscan.py
index 5c35895..a2d008a 100644
--- a/lib/pyraf/clscan.py
+++ b/lib/pyraf/clscan.py
@@ -2,7 +2,7 @@
 
 This version uses a context-sensitive pattern stack
 
-$Id: clscan.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 
 R. White, 1999 September 10
 """
diff --git a/lib/pyraf/cltoken.py b/lib/pyraf/cltoken.py
index eb421ca..1d8014f 100644
--- a/lib/pyraf/cltoken.py
+++ b/lib/pyraf/cltoken.py
@@ -1,6 +1,6 @@
 """cltoken.py: Token definition for CL parser
 
-$Id: cltoken.py 1546 2011-10-06 17:42:55Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/describe.py b/lib/pyraf/describe.py
index 05c5c4d..f9a57f3 100644
--- a/lib/pyraf/describe.py
+++ b/lib/pyraf/describe.py
@@ -1,7 +1,7 @@
 # http://www.dejanews.com/getdoc.xp?AN=382948703
 #
 # Instant Python
-# $Id: describe.py 1771 2012-06-08 22:57:48Z sontag $
+# $Id$
 #
 # utilities to describe functions, methods, and classes
 #
diff --git a/lib/pyraf/dirdbm.py b/lib/pyraf/dirdbm.py
index f857717..abfc2ba 100644
--- a/lib/pyraf/dirdbm.py
+++ b/lib/pyraf/dirdbm.py
@@ -4,7 +4,7 @@ Allows simultaneous read-write access to the data since
 the OS allows multiple processes to have access to the
 file system.
 
-$Id: dirdbm.py 1772 2012-06-12 03:29:07Z sontag $
+$Id$
 
 XXX need to implement 'n' open flag (force new database creation)
 XXX maybe allow for known key with None as value in dict?
diff --git a/lib/pyraf/dirshelve.py b/lib/pyraf/dirshelve.py
index 7018e60..3967aa5 100644
--- a/lib/pyraf/dirshelve.py
+++ b/lib/pyraf/dirshelve.py
@@ -4,7 +4,7 @@ Allows simultaneous read-write access to the data since
 the OS allows multiple processes to have access to the
 file system.
 
-$Id: dirshelve.py 1761 2012-05-31 20:32:00Z sontag $
+$Id$
 
 XXX keys, len may be incorrect if directory database is modified
 XXX by another process after open
diff --git a/lib/pyraf/epar.py b/lib/pyraf/epar.py
index 6d4de29..b1edaa7 100644
--- a/lib/pyraf/epar.py
+++ b/lib/pyraf/epar.py
@@ -1,6 +1,6 @@
 """ Main module for the PyRAF-version of the Epar parameter editor
 
-$Id: epar.py 1666 2012-03-30 03:50:14Z sontag $
+$Id$
 
 M.D. De La Pena, 2000 February 04
 """
diff --git a/lib/pyraf/filecache.py b/lib/pyraf/filecache.py
index 2c4fe1e..d3be491 100644
--- a/lib/pyraf/filecache.py
+++ b/lib/pyraf/filecache.py
@@ -24,7 +24,7 @@ instance) of the objects to be created for each entry.  New files
 are added with the add() method, and values are retrieved by
 index (cachedict[filename]) or using the .get() method.
 
-$Id: filecache.py 1772 2012-06-12 03:29:07Z sontag $
+$Id$
 
 R. White, 2000 October 1
 """
diff --git a/lib/pyraf/fontdata.py b/lib/pyraf/fontdata.py
index a0479e5..e8a8511 100644
--- a/lib/pyraf/fontdata.py
+++ b/lib/pyraf/fontdata.py
@@ -1,6 +1,6 @@
 """fontdata.py
 
-$Id: fontdata.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/generic.py b/lib/pyraf/generic.py
index 890d9b2..46c6c1e 100644
--- a/lib/pyraf/generic.py
+++ b/lib/pyraf/generic.py
@@ -1,6 +1,6 @@
 """generic.py: John Aycock's little languages (SPARK) framework
 
-$Id: generic.py 1775 2012-06-18 15:09:34Z sontag $
+$Id$
 """
 
 #  Copyright (c) 1998-2000 John Aycock
diff --git a/lib/pyraf/gki.py b/lib/pyraf/gki.py
index 912e2bd..c1a0b39 100644
--- a/lib/pyraf/gki.py
+++ b/lib/pyraf/gki.py
@@ -36,7 +36,7 @@ allowing the kernel type to change.)
 GkiController is a GkiProxy that allows switching between different
 graphics kernels as directed by commands embedded in the metacode stream.
 
-$Id: gki.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 """
 from __future__ import division
 
diff --git a/lib/pyraf/gki_psikern_tests.py b/lib/pyraf/gki_psikern_tests.py
index f008ab9..a602fb1 100644
--- a/lib/pyraf/gki_psikern_tests.py
+++ b/lib/pyraf/gki_psikern_tests.py
@@ -5,7 +5,7 @@ The first version of this will be under-representative of the total
 functionality, but tests will be added over time, as code issues are
 researched and addressed.
 
-$Id: gki_psikern_tests.py 2254 2014-10-13 15:48:53Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/gki_sys_tests.py b/lib/pyraf/gki_sys_tests.py
index 79569c2..47e0ad8 100644
--- a/lib/pyraf/gki_sys_tests.py
+++ b/lib/pyraf/gki_sys_tests.py
@@ -5,7 +5,7 @@ The first version of this will be under-representative of the GKI
 functionality, but tests will be added over time, as code issues are
 researched and addressed.
 
-$Id: gki_sys_tests.py 1570 2011-11-15 22:08:46Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/gkicmd.py b/lib/pyraf/gkicmd.py
index 746d19f..acd5f8e 100644
--- a/lib/pyraf/gkicmd.py
+++ b/lib/pyraf/gkicmd.py
@@ -1,6 +1,6 @@
 """gki metacode generating functions for use by Pyraf in generating
 iraf gki metacode (primarily for interactive graphics)"""
-# $Id: gkicmd.py 1708 2012-04-30 17:00:37Z sontag $
+# $Id$
 
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/gkigcur.py b/lib/pyraf/gkigcur.py
index a0ed371..612bef7 100644
--- a/lib/pyraf/gkigcur.py
+++ b/lib/pyraf/gkigcur.py
@@ -1,7 +1,7 @@
 """
 implement IRAF gcur functionality
 
-$Id: gkigcur.py 1696 2012-04-17 20:16:57Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/gkiiraf.py b/lib/pyraf/gkiiraf.py
index b2dd3cc..0b5f6d0 100644
--- a/lib/pyraf/gkiiraf.py
+++ b/lib/pyraf/gkiiraf.py
@@ -1,7 +1,7 @@
 """
 OpenGL implementation of the gki kernel class
 
-$Id: gkiiraf.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/gkitkbase.py b/lib/pyraf/gkitkbase.py
index 3bacf3d..a219032 100644
--- a/lib/pyraf/gkitkbase.py
+++ b/lib/pyraf/gkitkbase.py
@@ -1,7 +1,7 @@
 """
 Tk gui implementation for the gki plot widget
 
-$Id: gkitkbase.py 2461 2015-10-16 15:17:19Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/gkitkplot.py b/lib/pyraf/gkitkplot.py
index a6fcbdc..5e0e4b4 100644
--- a/lib/pyraf/gkitkplot.py
+++ b/lib/pyraf/gkitkplot.py
@@ -1,7 +1,7 @@
 """
 Tkplot implementation of the gki kernel class
 
-$Id: gkitkplot.py 1742 2012-05-23 13:45:59Z sontag $
+$Id$
 """
 
 from __future__ import division # confidence high
diff --git a/lib/pyraf/graphcap.py b/lib/pyraf/graphcap.py
index 72217bc..13ef698 100644
--- a/lib/pyraf/graphcap.py
+++ b/lib/pyraf/graphcap.py
@@ -1,6 +1,6 @@
 """Finds device attributes from the graphcap
 
-$Id: graphcap.py 1545 2011-10-05 19:24:51Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/gwm.py b/lib/pyraf/gwm.py
index 53ebd05..5e30f34 100644
--- a/lib/pyraf/gwm.py
+++ b/lib/pyraf/gwm.py
@@ -2,7 +2,7 @@
 Graphics window manager, creates multiple toplevel togl widgets for
 use by python plotting
 
-$Id: gwm.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/ipython_api.py b/lib/pyraf/ipython_api.py
index 48405b6..1b7b91b 100644
--- a/lib/pyraf/ipython_api.py
+++ b/lib/pyraf/ipython_api.py
@@ -6,7 +6,7 @@ attempting a more conventional IPython interpretation of a command.
 
 Code derived from pyraf.pycmdline.py
 
-$Id: ipython_api.py 2120 2014-01-01 16:41:51Z sontag $
+$Id$
 """
 #*****************************************************************************
 #       Copyright (C) 2001-2004 Fernando Perez <fperez at colorado.edu>
diff --git a/lib/pyraf/iraf.py b/lib/pyraf/iraf.py
index ac0f458..6dd9a5b 100644
--- a/lib/pyraf/iraf.py
+++ b/lib/pyraf/iraf.py
@@ -1,6 +1,6 @@
 """module iraf.py -- home for all the IRAF tasks and basic access functions
 
-$Id: iraf.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 
 R. White, 1999 Jan 25
 """
diff --git a/lib/pyraf/irafcompleter.py b/lib/pyraf/irafcompleter.py
index 5f0f3bb..6e0c093 100644
--- a/lib/pyraf/irafcompleter.py
+++ b/lib/pyraf/irafcompleter.py
@@ -10,7 +10,7 @@ the native file system.)
 See the notes in the (standard Python) module rlcompleter.py for more
 information.
 
-$Id: irafcompleter.py 2175 2014-04-30 13:58:28Z sontag $
+$Id$
 
 RLW, 2000 February 13
 """
diff --git a/lib/pyraf/irafdisplay.py b/lib/pyraf/irafdisplay.py
index 55d3a81..3abc233 100644
--- a/lib/pyraf/irafdisplay.py
+++ b/lib/pyraf/irafdisplay.py
@@ -26,7 +26,7 @@ This could be used to maintain references to multiple display servers.
 Ultimately more functionality may be added to make this a complete
 replacement for CDL.
 
-$Id: irafdisplay.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/irafecl.py b/lib/pyraf/irafecl.py
index 8df9d41..369fff7 100644
--- a/lib/pyraf/irafecl.py
+++ b/lib/pyraf/irafecl.py
@@ -1,5 +1,5 @@
 """This module adds IRAF ECL style error handling to PyRAF."""
-# $Id: irafecl.py 1771 2012-06-08 22:57:48Z sontag $
+# $Id$
 
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/irafexecute.py b/lib/pyraf/irafexecute.py
index 02b1070..9a724c0 100644
--- a/lib/pyraf/irafexecute.py
+++ b/lib/pyraf/irafexecute.py
@@ -1,6 +1,6 @@
 """irafexecute.py: Functions to execute IRAF connected subprocesses
 
-$Id: irafexecute.py 1976 2013-05-13 17:27:36Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/iraffunctions.py b/lib/pyraf/iraffunctions.py
index 31eeb6a..63ba9ec 100644
--- a/lib/pyraf/iraffunctions.py
+++ b/lib/pyraf/iraffunctions.py
@@ -10,7 +10,7 @@ The exception is that iraffunctions can be used directly for modules
 that must be compiled and executed early, before the pyraf module
 initialization is complete.
 
-$Id: iraffunctions.py 2202 2014-06-04 20:59:25Z sontag $
+$Id$
 
 R. White, 2000 January 20
 """
diff --git a/lib/pyraf/irafgwcs.py b/lib/pyraf/irafgwcs.py
index 47a57d9..2e315c2 100644
--- a/lib/pyraf/irafgwcs.py
+++ b/lib/pyraf/irafgwcs.py
@@ -5,7 +5,7 @@ possibly other tasks) where the WCS for an existing plot gets changed
 before the plot is cleared.  I save the changed wcs in self.pending and
 only commit the change when it appears to really be applicable.
 
-$Id: irafgwcs.py 1708 2012-04-30 17:00:37Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/irafhelp.py b/lib/pyraf/irafhelp.py
index 816bd56..f58b950 100644
--- a/lib/pyraf/irafhelp.py
+++ b/lib/pyraf/irafhelp.py
@@ -32,7 +32,7 @@ The padchars keyword determines some details of the format of the output.
 The **kw argument allows minimum matching for the keyword arguments
 (so help(func=1) will work).
 
-$Id: irafhelp.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 
 R. White, 1999 September 23
 """
diff --git a/lib/pyraf/irafimcur.py b/lib/pyraf/irafimcur.py
index 17df761..711ee05 100644
--- a/lib/pyraf/irafimcur.py
+++ b/lib/pyraf/irafimcur.py
@@ -4,7 +4,7 @@ Read the cursor position from stdimage image display device (DS9,
 SAOIMAGE or XIMTOOL) and return a string compatible with IRAF's
 imcur parameter.
 
-$Id: irafimcur.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/irafimport.py b/lib/pyraf/irafimport.py
index 6445f35..857a1d8 100644
--- a/lib/pyraf/irafimport.py
+++ b/lib/pyraf/irafimport.py
@@ -8,7 +8,7 @@ Modify module import mechanism so that
 Assumes that all IRAF tasks and packages are accessible as iraf
 module attributes.  Only affects imports of iraf module.
 
-$Id: irafimport.py 2408 2015-07-31 14:06:11Z bsimon $
+$Id$
 
 R. White, 1999 August 17
 """
diff --git a/lib/pyraf/irafinst.py b/lib/pyraf/irafinst.py
index 22148fe..9254c65 100644
--- a/lib/pyraf/irafinst.py
+++ b/lib/pyraf/irafinst.py
@@ -6,7 +6,7 @@ Obviously, this module should refrain as much as possible from importing any
 IRAF related code (at least globally), since this is heavily relied upon in
 non-IRAF situations.
 
-$Id: irafinst.py 1575 2011-11-23 14:55:12Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/irafnames.py b/lib/pyraf/irafnames.py
index 9032bc3..0e3b3d6 100644
--- a/lib/pyraf/irafnames.py
+++ b/lib/pyraf/irafnames.py
@@ -2,7 +2,7 @@
 included in the user's namespace.  Uses a plug-in strategy so behavior can
 be changed.
 
-$Id: irafnames.py 1568 2011-11-15 04:46:10Z sontag $
+$Id$
 
 R. White, 1999 March 26
 """
diff --git a/lib/pyraf/irafpar.py b/lib/pyraf/irafpar.py
index 9f595a1..20d7bff 100644
--- a/lib/pyraf/irafpar.py
+++ b/lib/pyraf/irafpar.py
@@ -1,6 +1,6 @@
 """irafpar.py -- parse IRAF .par files and create lists of IrafPar objects
 
-$Id: irafpar.py 2205 2014-06-09 17:44:43Z sontag $
+$Id$
 
 R. White, 2000 January 7
 """
diff --git a/lib/pyraf/iraftask.py b/lib/pyraf/iraftask.py
index 9fcb534..5744d24 100644
--- a/lib/pyraf/iraftask.py
+++ b/lib/pyraf/iraftask.py
@@ -1,6 +1,6 @@
 """module iraftask.py -- defines IrafTask and IrafPkg classes
 
-$Id: iraftask.py 2247 2014-09-25 19:45:34Z sontag $
+$Id$
 
 R. White, 2000 June 26
 
diff --git a/lib/pyraf/irafukey.py b/lib/pyraf/irafukey.py
index 70e6fb8..69eb390 100644
--- a/lib/pyraf/irafukey.py
+++ b/lib/pyraf/irafukey.py
@@ -1,7 +1,7 @@
 """
 implement IRAF ukey functionality
 
-$Id: irafukey.py 1740 2012-05-22 20:46:48Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/msgiobuffer.py b/lib/pyraf/msgiobuffer.py
index 90cb634..0d5750a 100644
--- a/lib/pyraf/msgiobuffer.py
+++ b/lib/pyraf/msgiobuffer.py
@@ -4,7 +4,7 @@
    frame contains the latest I/O from the interactive program.
 
 M.D. De La Pena, 2000 June 28
-$Id: msgiobuffer.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/msgiowidget.py b/lib/pyraf/msgiowidget.py
index 6a84642..04b3b06 100644
--- a/lib/pyraf/msgiowidget.py
+++ b/lib/pyraf/msgiowidget.py
@@ -2,7 +2,7 @@
    This contains the MsgIOWidget class, which is an optionally hidden
    scrolling canvas composed of a text widget and frame.  When "hidden",
    it turns into a single-line text widget.
-   $Id: msgiowidget.py 2460 2015-10-16 15:08:13Z sontag $
+   $Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/newWindowHack.py b/lib/pyraf/newWindowHack.py
index c95b113..3a2e8e1 100644
--- a/lib/pyraf/newWindowHack.py
+++ b/lib/pyraf/newWindowHack.py
@@ -15,7 +15,7 @@ tk.withdraw()
 import tkSimpleDialog
 tkSimpleDialog.askstring("window title", "question?")
 
-$Id: newWindowHack.py 1699 2012-04-19 17:45:35Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/pseteparoption.py b/lib/pyraf/pseteparoption.py
index 5c9f1d5..81d4f57 100644
--- a/lib/pyraf/pseteparoption.py
+++ b/lib/pyraf/pseteparoption.py
@@ -2,7 +2,7 @@
    options to be used for PSETs in the parameter editor task.  Code was
    broken out from eparoption.py.
 
-$Id: pseteparoption.py 1523 2011-08-30 03:19:02Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/pycmdline.py b/lib/pyraf/pycmdline.py
index eee2481..5c830a3 100644
--- a/lib/pyraf/pycmdline.py
+++ b/lib/pyraf/pycmdline.py
@@ -20,7 +20,7 @@ Provides this functionality:
 Uses standard code module plus some ideas from cmd.py module
 (and of course Perry's Monty design.)
 
-$Id: pycmdline.py 2176 2014-04-30 14:00:08Z sontag $
+$Id$
 
 R. White, 2000 February 20
 """
diff --git a/lib/pyraf/pyrafTk.py b/lib/pyraf/pyrafTk.py
index b938123..49a4bc5 100644
--- a/lib/pyraf/pyrafTk.py
+++ b/lib/pyraf/pyrafTk.py
@@ -1,6 +1,6 @@
 """pyrafTk.py: modify Tkinter root to print short PyRAF tracebacks
 
-$Id: pyrafTk.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 
 R. L. White, 2000 November 17
 """
diff --git a/lib/pyraf/pyrafglobals.py b/lib/pyraf/pyrafglobals.py
index 636d336..7100e8b 100644
--- a/lib/pyraf/pyrafglobals.py
+++ b/lib/pyraf/pyrafglobals.py
@@ -5,7 +5,7 @@ _use_ecl        Flag to turn on ECL mode in PyRAF
 
 This is defined so it is safe to say 'from pyrafglobals import *'
 
-$Id: pyrafglobals.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 
 Broken out from irafglobals.py which was signed "R. White, 2000 January 5"
 """
diff --git a/lib/pyraf/splash.py b/lib/pyraf/splash.py
index 9e63114..2a12316 100644
--- a/lib/pyraf/splash.py
+++ b/lib/pyraf/splash.py
@@ -1,6 +1,6 @@
 """splash.py: Display PyRAF splash screen
 
-$Id: splash.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 
 R. White, 2001 Dec 15
 """
diff --git a/lib/pyraf/subproc.py b/lib/pyraf/subproc.py
index e81b2d6..92a27f4 100644
--- a/lib/pyraf/subproc.py
+++ b/lib/pyraf/subproc.py
@@ -15,7 +15,7 @@ Subprocess class features:
 
  - RecordFile class provides record-oriented IO for file-like stream objects.
 
-$Id: subproc.py 1753 2012-05-29 19:12:14Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
@@ -863,7 +863,7 @@ class RedirProcess(Subprocess):
 def test(fout = sys.stdout):
     fout.write("Starting test ...\n")
     assert hasattr(fout, 'write'), "Input not a file object: "+str(fout)
-    print "\tOpening subprocess:"
+    print "\tOpening subprocess (git 22 jul):"
     p = Subprocess('cat', expire_noisily=1)        # set to expire noisily...
     print p
     print "\tOpening bogus subprocess, should fail:"
diff --git a/lib/pyraf/textattrib.py b/lib/pyraf/textattrib.py
index d220fcb..efc98a2 100644
--- a/lib/pyraf/textattrib.py
+++ b/lib/pyraf/textattrib.py
@@ -1,6 +1,6 @@
 """Implements text rendering using stroked font and OpenGL
 
-$Id: textattrib.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 
 General description and discussion about the assumptions of how
 text is to be handled. This will be a phased implementation and
diff --git a/lib/pyraf/tkplottext.py b/lib/pyraf/tkplottext.py
index 1d9537b..d9ef2a5 100644
--- a/lib/pyraf/tkplottext.py
+++ b/lib/pyraf/tkplottext.py
@@ -1,6 +1,6 @@
 """Implements text rendering using stroked font and Tkplot/X
 
-$Id: tkplottext.py 1567 2011-11-07 17:39:43Z sontag $
+$Id$
 
 General description and discussion about the assumptions of how
 text is to be handled. This will be a phased implementation and
diff --git a/lib/pyraf/tpar.py b/lib/pyraf/tpar.py
index d7cc407..b3c1a4f 100644
--- a/lib/pyraf/tpar.py
+++ b/lib/pyraf/tpar.py
@@ -7,7 +7,7 @@ that it works in a simple terminal window (rather than requiring full
 X-11 and Tk); this is an improvement for low bandwidth network
 contexts or for people who prefer text interfaces to GUIs.
 
-$Id: tpar.py 1771 2012-06-08 22:57:48Z sontag $
+$Id$
 
 Todd Miller, 2006 May 30  derived from epar.py and IRAF CL epar.
 """
diff --git a/lib/pyraf/urwfiledlg.py b/lib/pyraf/urwfiledlg.py
index b3ed379..9fa1355 100644
--- a/lib/pyraf/urwfiledlg.py
+++ b/lib/pyraf/urwfiledlg.py
@@ -10,7 +10,7 @@ copy is based on r49 of urwid/contrib/trunk/rbreu_filechooser.py, updated
 2006.10.17.  Only minor changes were made - mostly to handle use with differing
 versions of urwid.  Many thanks to author Rebecca Breu.
 
-$Id: urwfiledlg.py 1699 2012-04-19 17:45:35Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/urwutil.py b/lib/pyraf/urwutil.py
index aaf55df..e6c344b 100755
--- a/lib/pyraf/urwutil.py
+++ b/lib/pyraf/urwutil.py
@@ -24,7 +24,7 @@ the DialogDisplay and the InputDialogDisplay classes.  Unsure why this
 functionality is not delivered with a standard urwid installation, so we will
 include this file until it comes with urwid.  This is slightly modified.
 
-$Id: urwutil.py 1463 2011-06-24 22:58:30Z stsci_embray $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/lib/pyraf/version.py b/lib/pyraf/version.py
index 6dda7c4..5984b74 100644
--- a/lib/pyraf/version.py
+++ b/lib/pyraf/version.py
@@ -7,14 +7,14 @@ __all__ = ['__version__', '__vdate__', '__svn_revision__', '__svn_full_info__',
 
 import datetime
 
-__version__ = '2.1.10'
+__version__ = '2.1.11'
 __vdate__ = 'unspecified'
-__svn_revision__ = "2486"
+__svn_revision__ = 'exported'
 __svn_full_info__ = 'unknown'
-__setup_datetime__ = datetime.datetime(2015, 11, 25, 13, 40, 1, 566121)
+__setup_datetime__ = datetime.datetime(2016, 7, 22, 15, 19, 17, 846349)
 
 # what version of stsci.distutils created this version.py
-stsci_distutils_version = '0.3.7'
+stsci_distutils_version = '0.3.8.dev'
 
 if '.dev' in __version__:
     def update_svn_info():
@@ -70,7 +70,7 @@ if '.dev' in __version__:
                 if pipe.returncode == 0:
                     stdout = stdout.decode('latin1').strip()
                     if stdout and stdout[0] in string.digits:
-                        __svn_revision__ = "2486"
+                        __svn_revision__ = stdout
             except OSError:
                 pass
 
@@ -79,5 +79,5 @@ if '.dev' in __version__:
             __svn_full_info__ = '\n'.join(__svn_full_info__)
 
 
-#update_svn_info()
+    update_svn_info()
     del update_svn_info
diff --git a/lib/pyraf/version.py.orig1 b/lib/pyraf/version.py.orig1
deleted file mode 100644
index e4ea356..0000000
--- a/lib/pyraf/version.py.orig1
+++ /dev/null
@@ -1,83 +0,0 @@
-"""This is an automatically generated file created by stsci.distutils.hooks.version_setup_hook.
-Do not modify this file by hand.
-"""
-
-__all__ = ['__version__', '__vdate__', '__svn_revision__', '__svn_full_info__',
-           '__setup_datetime__']
-
-import datetime
-
-__version__ = '2.1.10'
-__vdate__ = 'unspecified'
-__svn_revision__ = 'Unable to determine SVN revision'
-__svn_full_info__ = 'unknown'
-__setup_datetime__ = datetime.datetime(2015, 11, 25, 13, 39, 51, 97188)
-
-# what version of stsci.distutils created this version.py
-stsci_distutils_version = '0.3.7'
-
-if '.dev' in __version__:
-    def update_svn_info():
-        """Update the SVN info if running out of an SVN working copy."""
-
-        import os
-        import string
-        import subprocess
-
-        global __svn_revision__
-        global __svn_full_info__
-
-        path = os.path.abspath(os.path.dirname(__file__))
-
-        run_svnversion = True
-
-        try:
-            pipe = subprocess.Popen(['svn', 'info', path],
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            stdout, _ = pipe.communicate()
-            if pipe.returncode == 0:
-                lines = []
-                for line in stdout.splitlines():
-                    line = line.decode('latin1').strip()
-                    if not line:
-                        continue
-                    lines.append(line)
-
-                if not lines:
-                    __svn_full_info__ = ['unknown']
-                else:
-                    __svn_full_info__ = lines
-            else:
-                run_svnversion = False
-        except OSError:
-            run_svnversion = False
-
-        if run_svnversion:
-            # If updating the __svn_full_info__ succeeded then use its output
-            # to find the base of the working copy and use svnversion to get
-            # the svn revision.
-            for line in __svn_full_info__:
-                if line.startswith('Working Copy Root Path'):
-                    path = line.split(':', 1)[1].strip()
-                    break
-
-            try:
-                pipe = subprocess.Popen(['svnversion', path],
-                                        stdout=subprocess.PIPE,
-                                        stderr=subprocess.PIPE)
-                stdout, _ = pipe.communicate()
-                if pipe.returncode == 0:
-                    stdout = stdout.decode('latin1').strip()
-                    if stdout and stdout[0] in string.digits:
-                        __svn_revision__ = stdout
-            except OSError:
-                pass
-
-        # Convert __svn_full_info__ back to a string
-        if isinstance(__svn_full_info__, list):
-            __svn_full_info__ = '\n'.join(__svn_full_info__)
-
-
-    update_svn_info()
-    del update_svn_info
diff --git a/lib/pyraf/version.py.orig2 b/lib/pyraf/version.py.orig2
deleted file mode 100644
index 2ec7b7a..0000000
--- a/lib/pyraf/version.py.orig2
+++ /dev/null
@@ -1,83 +0,0 @@
-"""This is an automatically generated file created by stsci.distutils.hooks.version_setup_hook.
-Do not modify this file by hand.
-"""
-
-__all__ = ['__version__', '__vdate__', '__svn_revision__', '__svn_full_info__',
-           '__setup_datetime__']
-
-import datetime
-
-__version__ = '2.1.10'
-__vdate__ = 'unspecified'
-__svn_revision__ = 'Unable to determine SVN revision'
-__svn_full_info__ = 'unknown'
-__setup_datetime__ = datetime.datetime(2015, 11, 25, 13, 39, 51, 97188)
-
-# what version of stsci.distutils created this version.py
-stsci_distutils_version = '0.3.7'
-
-if '.dev' in __version__:
-    def update_svn_info():
-        """Update the SVN info if running out of an SVN working copy."""
-
-        import os
-        import string
-        import subprocess
-
-        global __svn_revision__
-        global __svn_full_info__
-
-        path = os.path.abspath(os.path.dirname(__file__))
-
-        run_svnversion = True
-
-        try:
-            pipe = subprocess.Popen(['svn', 'info', path],
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            stdout, _ = pipe.communicate()
-            if pipe.returncode == 0:
-                lines = []
-                for line in stdout.splitlines():
-                    line = line.decode('latin1').strip()
-                    if not line:
-                        continue
-                    lines.append(line)
-
-                if not lines:
-                    __svn_full_info__ = ['unknown']
-                else:
-                    __svn_full_info__ = lines
-            else:
-                run_svnversion = False
-        except OSError:
-            run_svnversion = False
-
-        if run_svnversion:
-            # If updating the __svn_full_info__ succeeded then use its output
-            # to find the base of the working copy and use svnversion to get
-            # the svn revision.
-            for line in __svn_full_info__:
-                if line.startswith('Working Copy Root Path'):
-                    path = line.split(':', 1)[1].strip()
-                    break
-
-            try:
-                pipe = subprocess.Popen(['svnversion', path],
-                                        stdout=subprocess.PIPE,
-                                        stderr=subprocess.PIPE)
-                stdout, _ = pipe.communicate()
-                if pipe.returncode == 0:
-                    stdout = stdout.decode('latin1').strip()
-                    if stdout and stdout[0] in string.digits:
-                        __svn_revision__ = stdout
-            except OSError:
-                pass
-
-        # Convert __svn_full_info__ back to a string
-        if isinstance(__svn_full_info__, list):
-            __svn_full_info__ = '\n'.join(__svn_full_info__)
-
-
-#update_svn_info()
-    del update_svn_info
diff --git a/lib/pyraf/version_vcs.py b/lib/pyraf/version_vcs.py
new file mode 100644
index 0000000..5d2bfb2
--- /dev/null
+++ b/lib/pyraf/version_vcs.py
@@ -0,0 +1,2 @@
+"This is automatically generated at package time.  Do not edit"
+__vcs_revision__ = '837a8509'
diff --git a/lib/pyraf/wutil.py b/lib/pyraf/wutil.py
index a401431..b929b44 100644
--- a/lib/pyraf/wutil.py
+++ b/lib/pyraf/wutil.py
@@ -4,7 +4,7 @@ These are python stubs that are overloaded by a c version implementations.
 If the c versions do not exist, then these routines will do nothing
 
 
-$Id: wutil.py 2484 2015-11-24 20:40:57Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
@@ -506,7 +506,10 @@ def dumpspecs(outstream = None, skip_volatiles = False):
        pass
 
     out = "python exec = "+str(sys.executable)
-    out += "\npython ver = "+sys.version.split()[0]
+    if skip_volatiles:
+        out += "\npython ver = "+'.'.join([str(v) for v in sys.version_info[0:2]])
+    else:
+        out += "\npython ver = "+'.'.join([str(v) for v in sys.version_info[0:3]])
     out += "\nplatform = "+str(sys.platform)
     if not skip_volatiles:
         out += "\nPyRAF ver = "+pyrver
diff --git a/lib/pyraf_setup.pyc b/lib/pyraf_setup.pyc
index 269038b..b4f9b65 100644
Binary files a/lib/pyraf_setup.pyc and b/lib/pyraf_setup.pyc differ
diff --git a/required_pkgs/d2to1 b/required_pkgs/d2to1
new file mode 160000
index 0000000..be01316
--- /dev/null
+++ b/required_pkgs/d2to1
@@ -0,0 +1 @@
+Subproject commit be0131609709ca43821649daa2e95d54f059039e
diff --git a/required_pkgs/d2to1/.authors b/required_pkgs/d2to1/.authors
deleted file mode 100644
index 697b7ae..0000000
--- a/required_pkgs/d2to1/.authors
+++ /dev/null
@@ -1,4 +0,0 @@
-embray = Erik M. Bray <embray at stsci.edu>
-sienkiew = Mark Sienkiewicz <sienkiew at stsci.edu>
-sontag = Chris Sontag <sontag at stsci.edu>
-iraf = iraf <iraf at stsci.edu>
diff --git a/required_pkgs/d2to1/.gitignore b/required_pkgs/d2to1/.gitignore
deleted file mode 100644
index 24f0fdd..0000000
--- a/required_pkgs/d2to1/.gitignore
+++ /dev/null
@@ -1,27 +0,0 @@
-# Compiled files
-*.py[co]
-*.a
-*.o
-*.so
-
-# Sphinx
-_build
-
-# Packages/installer info
-*.egg
-*.egg-info
-dist
-build
-eggs
-parts
-bin
-var
-sdist
-develop-eggs
-.installed.cfg
-
-# Other
-.tox
-.*.swp
-.coverage
-cover
diff --git a/required_pkgs/d2to1/.travis.yml b/required_pkgs/d2to1/.travis.yml
deleted file mode 100644
index 5585765..0000000
--- a/required_pkgs/d2to1/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: python
-python:
-  - "3.3"
-  - "3.2"
-  - "2.7"
-  - "2.6"
-script: python setup.py test
diff --git a/required_pkgs/d2to1/CHANGES.rst b/required_pkgs/d2to1/CHANGES.rst
deleted file mode 100644
index 977387d..0000000
--- a/required_pkgs/d2to1/CHANGES.rst
+++ /dev/null
@@ -1,224 +0,0 @@
-Changes
-=========
-
-
-0.2.12 (unreleased)
--------------------
-
-- Nothing changed yet.
-
-
-0.2.11 (2013-08-29)
--------------------
-
-- Replaced ``distribute_setup.py`` with ``ez_setup.py`` in order to bootstrap
-  with modern setuptools when necessary.
-
-- Fixed a couple minor Python 3-specific issues. In particular the
-  ``data_files`` option could be passed to ``setup()`` as a ``dict_items``
-  object instead of a ``list`` which is what would normally be expected.
-
-- Added a tox.ini (frankly I thought there already was one).
-
-
-0.2.10 (2013-04-10)
--------------------
-
-- Added support for the ``setup-requires-dist`` option in the ``[metadata]``
-  section of setup.cfg.  This is analogous to the Setup-Requires-Dist metadata
-  field supported by PEP-426 and uses the ``setup_requires`` argument to
-  setuptools' ``setup()`` to implement it.
-
-- Added support for the ``dependency_links`` and ``include_package_data``
-  arguments to setuptools' ``setup()`` in the ``[backwards_compat]`` section of
-  setup.cfg.
-
-- When a setup_hook calls sys.exit() don't show a traceback for the
-  SystemExit exception.
-
-- Fixed a bug in the exception formatting when exceptions occur in setup.cfg
-  handling.
-
-
-0.2.9 (2013-03-05)
-------------------
-
-- Fixed a bug in the extra-files supported added in 0.2.8.  Makes sure that
-  monkey-patches can't be installed more than once and that the log
-  reference is properly encapsulated.
-
-
-0.2.8 (2013-03-05)
-------------------
-
-- Improved handling of packages where the packages_root option is set. That is,
-  the Python package is not directly in the root of the source tree, but is in
-  some sub-directory.  Now the packages_root directory is prepended to
-  sys.path while processing the setup.cfg and running setup hooks.
-
-- Added support for the Keywords metadata field via the keywords option in the
-  ``[metadata]`` section of setup.cfg.
-
-- Fixed a missing import that caused a misleading exception when setup.cfg is
-  missing.
-
-- Upgraded the shipped distribute_setup.py to the latest for distribute 0.6.28
-
-- Added a few basic functional tests, along with an infrastructure to add more
-  as needed.  They can be run with nose and possibly with py.test though the
-  latter hasn't been tested.
-
-- Improved hook imports to work better with namespace packages.
-
-- Added support for the extra_files option of the ``[files]`` section in
-  setup.cfg.  This was a feature from distutils2 that provided an alternative
-  to MANIFEST.in for including additional files in source distributions (it
-  does not yet support wildcard patterns but maybe it should?)
-
-- Added support for the tests_require setup argument from setuptools via
-  the [backwards_compat] section in setup.cfg.
-
-- Supports Python 3 natively without 2to3.  This makes Python 3 testing of
-  d2to1 easier to support.
-
-
-0.2.7 (2012-02-20)
-------------------
-
-- If no extension modules or entry points are defined in the setup.cfg, don't
-  clobber any extension modules/entry points that may be defined in setup.py.
-
-
-0.2.6 (2012-02-17)
-------------------
-
-- Added support for setuptools entry points in an ``[entry_points]`` section of
-  setup.cfg--this is just for backwards-compatibility purposes, as
-  packaging/distutils2 does not yet have a standard replacement for the entry
-  points system.
-
-- Added a [backwards_compat] section for setup.cfg for other options that are
-  supported by setuptools/distribute, but that aren't part of the distutils2
-  standard.  So far the only options supported here are zip_safe and use_2to3.
-  (Note: packaging does support a use-2to3 option to the build command, but if
-  we tried to use that, distutils would not recognize it as a valid build
-  option.)
-
-- Added support for the new (and presumably final) extension section format
-  used by packaging.  In this format, extensions should be specified in config
-  sections of the format ``[extension: ext_mod_name]``, where any whitespace is
-  optional.  The old format used an ``=`` instead of ``:`` and is still
-  supported, but should be considered deprecated.
-
-- Added support for the new syntax used by packaging for the package_data
-  option (which is deprecated in packaging in favor of the resources system,
-  but still supported).  The new syntax is like::
-
-      package_data =
-          packagename = pattern1 pattern2 pattern3
-          packagename.subpack = 
-              pattern1
-              pattern2
-              pattern3
-
-  That is, after ``package_data =``, give the name of a package, followed by
-  an ``=``, followed by any number of whitespace separated wildcard patterns (or
-  actual filenames relative to the package).  Under this scheme, whitespace is
-  not allowed in the patterns themselves.
-
-
-0.2.5 (2011-07-21)
-------------------
-
-- Made the call to pkg_resources.get_distribution() to set __version__ more
-  robust, so that it doesn't fail on, for example, VersionConflict errors
-
-
-0.2.4 (2011-07-05)
-------------------
-
-- Fixed some bugs with installation on Python 3
-
-
-0.2.3 (2011-06-23)
-------------------
-
-- Renamed 'setup_hook' to 'setup_hooks' as is now the case in the packaging
-  module.  Added support for multiple setup_hooks
-
-
-0.2.2 (2011-06-15)
-------------------
-
-- Fixed a bug in DefaultGetDict where it didn't actually save the returned
-  default in the dictionary--so any command options would get lost
-- Fixed a KeyError when the distribution does not have any custom commands
-  specified
-
-
-0.2.1 (2011-06-15)
-------------------
-
-- Reimplemented command hooks without monkey-patching and more reliably in
-  general (though it's still a flaming hack).  The previous monkey patch-based
-  solution would break if d2to1 were entered multiple times, which could happen
-  in some scenarios
-
-
-0.2.0 (2011-06-14)
-------------------
-
-- Version bump to start using micro-version numbers for bug fixes only, now
-  that the my primary feature goals are complete
-
-
-0.1.5 (2011-06-02)
-------------------
-
-- Adds support for the data_files option under [files].  Though this is
-  considered deprecated and may go away at some point, it can be useful in the
-  absence of resources support
-- Adds support for command pre/post-hooks.  Warning: this monkey-patches
-  distutils.dist.Distribution a little bit... :(
-- Adds (slightly naive) support for PEP 345-style version specifiers in
-  requires-dist (environment markers not supported yet)
-- Fixed a bug where not enough newlines were inserted between description files
-
-
-0.1.4 (2011-05-24)
-------------------
-
-- Adds support for custom command classes specified in the 'commands' option
-  under the [global] section in setup.cfg
-- Adds preliminary support for custom compilers specified in the 'compilers'
-  option under the [global] section in setup.cfg.  This functionality doesn't
-  exist in distutils/setuptools/distribute, so adding support for it is a
-  flaming hack.  It hasn't really been tested beyond seeing that the custom
-  compilers come up in `setup.py build_ext --help-compiler`, so any real-world
-  testing of this feature would be appreciated
-
-
-0.1.3 (2011-04-20)
-------------------
-
-- Adds zest.releaser entry points for updating the version number in a
-  setup.cfg file; only useful if you use zest.releaser--otherwise harmless
-  (might eventually move this functionality out into a separate product)
-- Though version 0.1.2 worked in Python3, use_2to3 wasn't added to the setup.py
-  so 2to3 had to be run manually
-- Fixed a crash on projects that don't have a description-file option
-
-0.1.2 (2011-04-13)
-------------------
-
-- Fixed the self-installation--it did not work if a d2to1 version was not
-  already installed, due to the use of `pkg_resources.require()`
-- Adds nominal Python3 support
-- Fixes the 'classifier' option in setup.cfg
-
-0.1.1 (2011-04-12)
-------------------
-
-- Fixed an unhelpful error message when a setup_hook fails to import
-- Made d2to1 able to use its own machinery to install itself
-
diff --git a/required_pkgs/d2to1/CONTRIBUTORS b/required_pkgs/d2to1/CONTRIBUTORS
deleted file mode 100644
index 8a49a06..0000000
--- a/required_pkgs/d2to1/CONTRIBUTORS
+++ /dev/null
@@ -1,4 +0,0 @@
-Erik M. Bray
-Mark Sienkiewicz
-Monty Taylor
-Thomas Grainger
diff --git a/required_pkgs/d2to1/LICENSE b/required_pkgs/d2to1/LICENSE
deleted file mode 100644
index 16abf40..0000000
--- a/required_pkgs/d2to1/LICENSE
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (C) 2013 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-       notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-       copyright notice, this list of conditions and the following
-       disclaimer in the documentation and/or other materials provided
-       with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-       endorse or promote products derived from this software without
-       specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-
diff --git a/required_pkgs/d2to1/README.rst b/required_pkgs/d2to1/README.rst
deleted file mode 100644
index 30e9170..0000000
--- a/required_pkgs/d2to1/README.rst
+++ /dev/null
@@ -1,110 +0,0 @@
-Introduction
-==============
-.. image:: https://travis-ci.org/embray/d2to1.png?branch=master
-   :alt: travis build status
-   :target: https://travis-ci.org/embray/d2to1
-
-d2to1 (the 'd' is for 'distutils') allows using distutils2-like setup.cfg files
-for a package's metadata with a distribute/setuptools setup.py script.  It
-works by providing a distutils2-formatted setup.cfg file containing all of a
-package's metadata, and a very minimal setup.py which will slurp its arguments
-from the setup.cfg.
-
-Note: distutils2 has been merged into the CPython standard library, where it is
-now known as 'packaging'.  This project was started before that change was
-finalized.  So all references to distutils2 should also be assumed to refer to
-packaging.
-
-Rationale
-===========
-I'm currently in the progress of redoing the packaging of a sizeable number of
-projects.  I wanted to use distutils2-like setup.cfg files for all these
-projects, as they will hopefully be the future, and I much prefer them overall
-to using an executable setup.py.  So forward-support for distutils2 is
-appealing both as future-proofing, and simply the aesthetics of using a flat text file to describe a project's metadata.
-
-However, I did not want any of these projects to require distutils2 for
-installation yet--it is too unstable, and not widely installed.  So projects
-should still be installable using the familiar `./setup.py install`, for
-example.  Furthermore, not all use cases required by some of the packages I
-support are fully supported by distutils2 yet.  Hopefully they will be
-eventually, either through the distutils2 core or through extensions.  But in
-the meantime d2to1 will try to keep up with the state of the art and "best
-practices" for distutils2 distributions, while adding support in areas that
-it's lacking.
-
-Usage
-=======
-d2to1 requires a distribution to use distribute or setuptools.  Your
-distribution must include a distutils2-like setup.cfg file, and a minimal
-setup.py script.  For details on writing the setup.cfg, see the `distutils2
-documentation`_.  A simple sample can be found in d2to1's own setup.cfg (it
-uses its own machinery to install itself)::
-
- [metadata]
- name = d2to1
- version = 0.1.1
- author = Erik M. Bray
- author-email = embray at stsci.edu
- summary = Allows using distutils2-like setup.cfg files for a package's metadata
-  with a distribute/setuptools setup.py
- description-file = README
- license = BSD
- requires-dist = setuptools
- classifier =
-     Development Status :: 4 - Beta
-     Environment :: Plugins
-     Framework :: Setuptools Plugin
-     Intended Audience :: Developers
-     License :: OSI Approved :: BSD License
-     Operating System :: OS Independent
-     Programming Language :: Python
-     Topic :: Software Development :: Build Tools
-     Topic :: Software Development :: Libraries :: Python Modules
-     Topic :: System :: Archiving :: Packaging
- keywords =
-     setup
-     distutils
- [files]
- packages = d2to1
-
-The minimal setup.py should look something like this::
-
- #!/usr/bin/env python
-
- try:
-     from setuptools import setup
- except ImportError:
-     from distribute_setup import use_setuptools
-     use_setuptools()
-     from setuptools import setup
-
- setup(
-     setup_requires=['d2to1'],
-     d2to1=True
- )
-
-Note that it's important to specify d2to1=True or else the d2to1 functionality
-will not be enabled.  It is also possible to set d2to1='some_file.cfg' to
-specify the (relative) path of the setup.cfg file to use.  But in general this
-functionality should not be necessary.
-
-It should also work fine if additional arguments are passed to `setup()`,
-but it should be noted that they will be clobbered by any options in the
-setup.cfg file.
-
-Caveats
-=========
-- The requires-dist option in setup.cfg is implemented through the
-  distribute/setuptools install_requires option, rather than the broken
-  "requires" keyword in normal distutils.
-- Not all features of distutils2 are supported yet.  If something doesn't seem
-  to be working, it's probably not implemented yet.
-- Does not support distutils2 resources, and probably won't since it relies
-  heavily on the sysconfig module only available in Python 3.2 and up.  This is
-  one area in which d2to1 should really be seen as a transitional tool.  I
-  don't really want to include a backport like distutils2 does.  In the
-  meantime, package_data and data_files may still be used under the [files]
-  section of setup.cfg.
-
-.. _distutils2 documentation: http://distutils2.notmyidea.org/setupcfg.html
diff --git a/required_pkgs/d2to1/d2to1/__init__.py b/required_pkgs/d2to1/d2to1/__init__.py
deleted file mode 100644
index 4089e8f..0000000
--- a/required_pkgs/d2to1/d2to1/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-try:
-    __version__ = __import__('pkg_resources').get_distribution('d2to1').version
-except:
-    __version__ = ''
diff --git a/required_pkgs/d2to1/d2to1/core.py b/required_pkgs/d2to1/d2to1/core.py
deleted file mode 100644
index 929f575..0000000
--- a/required_pkgs/d2to1/d2to1/core.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import os
-import sys
-import warnings
-
-from distutils.core import Distribution as _Distribution
-from distutils.errors import DistutilsFileError, DistutilsSetupError
-from setuptools.dist import _get_unpatched
-
-from .extern import six
-from .util import DefaultGetDict, IgnoreDict, cfg_to_args
-
-
-_Distribution = _get_unpatched(_Distribution)
-
-
-def d2to1(dist, attr, value):
-    """Implements the actual d2to1 setup() keyword.  When used, this should be
-    the only keyword in your setup() aside from `setup_requires`.
-
-    If given as a string, the value of d2to1 is assumed to be the relative path
-    to the setup.cfg file to use.  Otherwise, if it evaluates to true, it
-    simply assumes that d2to1 should be used, and the default 'setup.cfg' is
-    used.
-
-    This works by reading the setup.cfg file, parsing out the supported
-    metadata and command options, and using them to rebuild the
-    `DistributionMetadata` object and set the newly added command options.
-
-    The reason for doing things this way is that a custom `Distribution` class
-    will not play nicely with setup_requires; however, this implementation may
-    not work well with distributions that do use a `Distribution` subclass.
-    """
-
-    if not value:
-        return
-    if isinstance(value, six.string_types):
-        path = os.path.abspath(value)
-    else:
-        path = os.path.abspath('setup.cfg')
-    if not os.path.exists(path):
-        raise DistutilsFileError(
-            'The setup.cfg file %s does not exist.' % path)
-
-    # Converts the setup.cfg file to setup() arguments
-    try:
-        attrs = cfg_to_args(path)
-    except:
-        e = sys.exc_info()[1]
-        raise DistutilsSetupError(
-            'Error parsing %s: %s: %s' % (path, e.__class__.__name__,
-                                          e.args[0]))
-
-    # Repeat some of the Distribution initialization code with the newly
-    # provided attrs
-    if attrs:
-        # Skips 'options' and 'licence' support which are rarely used; may add
-        # back in later if demanded
-        for key, val in six.iteritems(attrs):
-            if hasattr(dist.metadata, 'set_' + key):
-                getattr(dist.metadata, 'set_' + key)(val)
-            elif hasattr(dist.metadata, key):
-                setattr(dist.metadata, key, val)
-            elif hasattr(dist, key):
-                setattr(dist, key, val)
-            else:
-                msg = 'Unknown distribution option: %s' % repr(key)
-                warnings.warn(msg)
-
-    # Re-finalize the underlying Distribution
-    _Distribution.finalize_options(dist)
-
-    # This bit comes out of distribute/setuptools
-    if isinstance(dist.metadata.version, six.integer_types + (float,)):
-        # Some people apparently take "version number" too literally :)
-        dist.metadata.version = str(dist.metadata.version)
-
-    # This bit of hackery is necessary so that the Distribution will ignore
-    # normally unsupport command options (namely pre-hooks and post-hooks).
-    # dist.command_options is normally a dict mapping command names to dicts of
-    # their options.  Now it will be a defaultdict that returns IgnoreDicts for
-    # the each command's options so we can pass through the unsupported options
-    ignore = ['pre_hook.*', 'post_hook.*']
-    dist.command_options = DefaultGetDict(lambda: IgnoreDict(ignore))
diff --git a/required_pkgs/d2to1/d2to1/extern/__init__.py b/required_pkgs/d2to1/d2to1/extern/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/d2to1/d2to1/extern/six.py b/required_pkgs/d2to1/d2to1/extern/six.py
deleted file mode 100644
index 0cdd1c7..0000000
--- a/required_pkgs/d2to1/d2to1/extern/six.py
+++ /dev/null
@@ -1,386 +0,0 @@
-# Copyright (c) 2010-2011 Benjamin Peterson
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-import operator
-import sys
-import types
-
-__author__ = "Benjamin Peterson <benjamin at python.org>"
-__version__ = "1.2.0"
-
-
-# True if we are running on Python 3.
-PY3 = sys.version_info[0] == 3
-
-if PY3:
-    string_types = str,
-    integer_types = int,
-    class_types = type,
-    text_type = str
-    binary_type = bytes
-
-    MAXSIZE = sys.maxsize
-else:
-    string_types = basestring,
-    integer_types = (int, long)
-    class_types = (type, types.ClassType)
-    text_type = unicode
-    binary_type = str
-
-    if sys.platform == "java":
-        # Jython always uses 32 bits.
-        MAXSIZE = int((1 << 31) - 1)
-    else:
-        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
-        class X(object):
-            def __len__(self):
-                return 1 << 31
-        try:
-            len(X())
-        except OverflowError:
-            # 32-bit
-            MAXSIZE = int((1 << 31) - 1)
-        else:
-            # 64-bit
-            MAXSIZE = int((1 << 63) - 1)
-            del X
-
-
-def _add_doc(func, doc):
-    """Add documentation to a function."""
-    func.__doc__ = doc
-
-
-def _import_module(name):
-    """Import module, returning the module after the last dot."""
-    __import__(name)
-    return sys.modules[name]
-
-
-class _LazyDescr(object):
-
-    def __init__(self, name):
-        self.name = name
-
-    def __get__(self, obj, tp):
-        result = self._resolve()
-        setattr(obj, self.name, result)
-        # This is a bit ugly, but it avoids running this again.
-        delattr(tp, self.name)
-        return result
-
-
-class MovedModule(_LazyDescr):
-
-    def __init__(self, name, old, new=None):
-        super(MovedModule, self).__init__(name)
-        if PY3:
-            if new is None:
-                new = name
-            self.mod = new
-        else:
-            self.mod = old
-
-    def _resolve(self):
-        return _import_module(self.mod)
-
-
-class MovedAttribute(_LazyDescr):
-
-    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
-        super(MovedAttribute, self).__init__(name)
-        if PY3:
-            if new_mod is None:
-                new_mod = name
-            self.mod = new_mod
-            if new_attr is None:
-                if old_attr is None:
-                    new_attr = name
-                else:
-                    new_attr = old_attr
-            self.attr = new_attr
-        else:
-            self.mod = old_mod
-            if old_attr is None:
-                old_attr = name
-            self.attr = old_attr
-
-    def _resolve(self):
-        module = _import_module(self.mod)
-        return getattr(module, self.attr)
-
-
-
-class _MovedItems(types.ModuleType):
-    """Lazy loading of moved objects"""
-
-
-_moved_attributes = [
-    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
-    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
-    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
-    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
-    MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
-    MovedAttribute("reduce", "__builtin__", "functools"),
-    MovedAttribute("StringIO", "StringIO", "io"),
-    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
-    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
-
-    MovedModule("builtins", "__builtin__"),
-    MovedModule("configparser", "ConfigParser"),
-    MovedModule("copyreg", "copy_reg"),
-    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
-    MovedModule("http_cookies", "Cookie", "http.cookies"),
-    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
-    MovedModule("html_parser", "HTMLParser", "html.parser"),
-    MovedModule("http_client", "httplib", "http.client"),
-    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
-    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
-    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
-    MovedModule("cPickle", "cPickle", "pickle"),
-    MovedModule("queue", "Queue"),
-    MovedModule("reprlib", "repr"),
-    MovedModule("socketserver", "SocketServer"),
-    MovedModule("tkinter", "Tkinter"),
-    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
-    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
-    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
-    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
-    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
-    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
-    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
-    MovedModule("tkinter_colorchooser", "tkColorChooser",
-                "tkinter.colorchooser"),
-    MovedModule("tkinter_commondialog", "tkCommonDialog",
-                "tkinter.commondialog"),
-    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
-    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
-    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
-    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
-                "tkinter.simpledialog"),
-    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
-    MovedModule("winreg", "_winreg"),
-]
-for attr in _moved_attributes:
-    setattr(_MovedItems, attr.name, attr)
-del attr
-
-moves = sys.modules["six.moves"] = _MovedItems("moves")
-
-
-def add_move(move):
-    """Add an item to six.moves."""
-    setattr(_MovedItems, move.name, move)
-
-
-def remove_move(name):
-    """Remove item from six.moves."""
-    try:
-        delattr(_MovedItems, name)
-    except AttributeError:
-        try:
-            del moves.__dict__[name]
-        except KeyError:
-            raise AttributeError("no such move, %r" % (name,))
-
-
-if PY3:
-    _meth_func = "__func__"
-    _meth_self = "__self__"
-
-    _func_code = "__code__"
-    _func_defaults = "__defaults__"
-
-    _iterkeys = "keys"
-    _itervalues = "values"
-    _iteritems = "items"
-else:
-    _meth_func = "im_func"
-    _meth_self = "im_self"
-
-    _func_code = "func_code"
-    _func_defaults = "func_defaults"
-
-    _iterkeys = "iterkeys"
-    _itervalues = "itervalues"
-    _iteritems = "iteritems"
-
-
-try:
-    advance_iterator = next
-except NameError:
-    def advance_iterator(it):
-        return it.next()
-next = advance_iterator
-
-
-if PY3:
-    def get_unbound_function(unbound):
-        return unbound
-
-    Iterator = object
-
-    def callable(obj):
-        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
-else:
-    def get_unbound_function(unbound):
-        return unbound.im_func
-
-    class Iterator(object):
-
-        def next(self):
-            return type(self).__next__(self)
-
-    callable = callable
-_add_doc(get_unbound_function,
-         """Get the function out of a possibly unbound function""")
-
-
-get_method_function = operator.attrgetter(_meth_func)
-get_method_self = operator.attrgetter(_meth_self)
-get_function_code = operator.attrgetter(_func_code)
-get_function_defaults = operator.attrgetter(_func_defaults)
-
-
-def iterkeys(d):
-    """Return an iterator over the keys of a dictionary."""
-    return iter(getattr(d, _iterkeys)())
-
-def itervalues(d):
-    """Return an iterator over the values of a dictionary."""
-    return iter(getattr(d, _itervalues)())
-
-def iteritems(d):
-    """Return an iterator over the (key, value) pairs of a dictionary."""
-    return iter(getattr(d, _iteritems)())
-
-
-if PY3:
-    def b(s):
-        return s.encode("latin-1")
-    def u(s):
-        return s
-    if sys.version_info[1] <= 1:
-        def int2byte(i):
-            return bytes((i,))
-    else:
-        # This is about 2x faster than the implementation above on 3.2+
-        int2byte = operator.methodcaller("to_bytes", 1, "big")
-    import io
-    StringIO = io.StringIO
-    BytesIO = io.BytesIO
-else:
-    def b(s):
-        return s
-    def u(s):
-        return unicode(s, "unicode_escape")
-    int2byte = chr
-    import StringIO
-    StringIO = BytesIO = StringIO.StringIO
-_add_doc(b, """Byte literal""")
-_add_doc(u, """Text literal""")
-
-
-if PY3:
-    import builtins
-    exec_ = getattr(builtins, "exec")
-
-
-    def reraise(tp, value, tb=None):
-        if value.__traceback__ is not tb:
-            raise value.with_traceback(tb)
-        raise value
-
-
-    print_ = getattr(builtins, "print")
-    del builtins
-
-else:
-    def exec_(code, globs=None, locs=None):
-        """Execute code in a namespace."""
-        if globs is None:
-            frame = sys._getframe(1)
-            globs = frame.f_globals
-            if locs is None:
-                locs = frame.f_locals
-            del frame
-        elif locs is None:
-            locs = globs
-        exec("""exec code in globs, locs""")
-
-
-    exec_("""def reraise(tp, value, tb=None):
-    raise tp, value, tb
-""")
-
-
-    def print_(*args, **kwargs):
-        """The new-style print function."""
-        fp = kwargs.pop("file", sys.stdout)
-        if fp is None:
-            return
-        def write(data):
-            if not isinstance(data, basestring):
-                data = str(data)
-            fp.write(data)
-        want_unicode = False
-        sep = kwargs.pop("sep", None)
-        if sep is not None:
-            if isinstance(sep, unicode):
-                want_unicode = True
-            elif not isinstance(sep, str):
-                raise TypeError("sep must be None or a string")
-        end = kwargs.pop("end", None)
-        if end is not None:
-            if isinstance(end, unicode):
-                want_unicode = True
-            elif not isinstance(end, str):
-                raise TypeError("end must be None or a string")
-        if kwargs:
-            raise TypeError("invalid keyword arguments to print()")
-        if not want_unicode:
-            for arg in args:
-                if isinstance(arg, unicode):
-                    want_unicode = True
-                    break
-        if want_unicode:
-            newline = unicode("\n")
-            space = unicode(" ")
-        else:
-            newline = "\n"
-            space = " "
-        if sep is None:
-            sep = space
-        if end is None:
-            end = newline
-        for i, arg in enumerate(args):
-            if i:
-                write(sep)
-            write(arg)
-        write(end)
-
-_add_doc(reraise, """Reraise an exception.""")
-
-
-def with_metaclass(meta, base=object):
-    """Create a base class with a metaclass."""
-    return meta("NewBase", (base,), {})
diff --git a/required_pkgs/d2to1/d2to1/tests/__init__.py b/required_pkgs/d2to1/d2to1/tests/__init__.py
deleted file mode 100644
index 6146af2..0000000
--- a/required_pkgs/d2to1/d2to1/tests/__init__.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from __future__ import with_statement
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-
-import pkg_resources
-
-from .util import rmtree, open_config
-
-
-D2TO1_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
-                                         os.pardir, os.pardir))
-
-
-def fake_d2to1_dist():
-    # Fake a d2to1 distribution from the d2to1 package that these tests reside
-    # in and make sure it's active on the path with the appropriate entry
-    # points installed
-
-    class _FakeProvider(pkg_resources.EmptyProvider):
-        """A fake metadata provider that does almost nothing except to return
-        entry point metadata.
-        """
-
-        def has_metadata(self, name):
-            return name == 'entry_points.txt'
-
-        def get_metadata(self, name):
-            if name == 'entry_points.txt':
-                return '[distutils.setup_keywords]\nd2to1 = d2to1.core:d2to1\n'
-            else:
-                return ''
-
-
-    sys.path.insert(0, D2TO1_DIR)
-    if 'd2to1' in sys.modules:
-        del sys.modules['d2to1']
-    if 'd2to1' in pkg_resources.working_set.by_key:
-        del pkg_resources.working_set.by_key['d2to1']
-    dist = pkg_resources.Distribution(location=D2TO1_DIR, project_name='d2to1',
-                                      metadata=_FakeProvider())
-    pkg_resources.working_set.add(dist)
-
-
-class D2to1TestCase(object):
-    def setup(self):
-        self.temp_dir = tempfile.mkdtemp(prefix='d2to1-test-')
-        self.package_dir = os.path.join(self.temp_dir, 'testpackage')
-        shutil.copytree(os.path.join(os.path.dirname(__file__), 'testpackage'),
-                        self.package_dir)
-        self.oldcwd = os.getcwd()
-        os.chdir(self.package_dir)
-
-    def teardown(self):
-        os.chdir(self.oldcwd)
-        # Remove d2to1.testpackage from sys.modules so that it can be freshly
-        # re-imported by the next test
-        for k in list(sys.modules):
-            if (k == 'd2to1_testpackage' or
-                k.startswith('d2to1_testpackage.')):
-                del sys.modules[k]
-        rmtree(self.temp_dir)
-
-    def run_setup(self, *args):
-        cmd = ('-c',
-               'import sys;sys.path.insert(0, %r);'
-               'from d2to1.tests import fake_d2to1_dist;'
-               'from d2to1.extern.six import exec_;'
-               'fake_d2to1_dist();exec_(open("setup.py").read())' % D2TO1_DIR)
-        return self._run_cmd(sys.executable, cmd + args)
-
-    def run_svn(self, *args):
-        return self._run_cmd('svn', args)
-
-    def _run_cmd(self, cmd, args):
-        """
-        Runs a command, with the given argument list, in the root of the test
-        working copy--returns the stdout and stderr streams and the exit code
-        from the subprocess.
-        """
-
-        os.chdir(self.package_dir)
-        p = subprocess.Popen([cmd] + list(args), stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
-
-        streams = tuple(s.decode('latin1').strip() for s in p.communicate())
-        print(streams)
-        return (streams) + (p.returncode,)
diff --git a/required_pkgs/d2to1/d2to1/tests/test_commands.py b/required_pkgs/d2to1/d2to1/tests/test_commands.py
deleted file mode 100644
index 29342da..0000000
--- a/required_pkgs/d2to1/d2to1/tests/test_commands.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from . import D2to1TestCase
-
-
-class TestCommands(D2to1TestCase):
-    def test_custom_build_py_command(self):
-        """
-        Test that a custom subclass of the build_py command runs when listed in
-        the commands [global] option, rather than the normal build command.
-        """
-
-        stdout, _, return_code = self.run_setup('build_py')
-        assert 'Running custom build_py command.' in stdout
-        assert return_code == 0
diff --git a/required_pkgs/d2to1/d2to1/tests/test_core.py b/required_pkgs/d2to1/d2to1/tests/test_core.py
deleted file mode 100644
index d7962f4..0000000
--- a/required_pkgs/d2to1/d2to1/tests/test_core.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import glob
-import os
-import tarfile
-
-from . import D2to1TestCase
-
-
-VERSION = '0.1.dev'
-
-
-class TestCore(D2to1TestCase):
-    def test_setup_py_version(self):
-        """
-        Test that the `./setup.py --version` command returns the correct
-        value without balking.
-        """
-
-        self.run_setup('egg_info')
-        stdout, _, _ = self.run_setup('--version')
-        assert stdout == VERSION
-
-    def test_setup_py_keywords(self):
-        """
-        Test that the `./setup.py --keywords` command returns the correct
-        value without balking.
-        """
-
-        self.run_setup('egg_info')
-        stdout, _, _ = self.run_setup('--keywords')
-        assert stdout == 'packaging,distutils,setuptools'
-
-    def test_sdist_extra_files(self):
-        """
-        Test that the extra files are correctly added.
-        """
-
-        stdout, _, return_code = self.run_setup('sdist', '--formats=gztar')
-
-        # There can be only one
-        try:
-            tf_path = glob.glob(os.path.join('dist', '*.tar.gz'))[0]
-        except IndexError:
-            assert False, 'source dist not found'
-
-        tf = tarfile.open(tf_path)
-        names = ['/'.join(p.split('/')[1:]) for p in tf.getnames()]
-
-        assert 'extra-file.txt' in names
diff --git a/required_pkgs/d2to1/d2to1/tests/test_hooks.py b/required_pkgs/d2to1/d2to1/tests/test_hooks.py
deleted file mode 100644
index 047e0b9..0000000
--- a/required_pkgs/d2to1/d2to1/tests/test_hooks.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from __future__ import with_statement
-
-import os
-import textwrap
-
-from . import D2to1TestCase
-from .util import open_config
-
-
-class TestHooks(D2to1TestCase):
-    def setup(self):
-        super(TestHooks, self).setup()
-        with open_config(os.path.join(self.package_dir, 'setup.cfg')) as cfg:
-            cfg.set('global', 'setup-hooks',
-                    'd2to1_testpackage._setup_hooks.test_hook_1\n'
-                    'd2to1_testpackage._setup_hooks.test_hook_2')
-            cfg.set('build_ext', 'pre-hook.test_pre_hook',
-                    'd2to1_testpackage._setup_hooks.test_pre_hook')
-            cfg.set('build_ext', 'post-hook.test_post_hook',
-                    'd2to1_testpackage._setup_hooks.test_post_hook')
-
-    def test_global_setup_hooks(self):
-        """
-        Test that setup_hooks listed in the [global] section of setup.cfg are
-        executed in order.
-        """
-
-        stdout, _, return_code = self.run_setup('egg_info')
-        assert 'test_hook_1\ntest_hook_2' in stdout
-        assert return_code == 0
-
-    def test_command_hooks(self):
-        """
-        Simple test that the appropriate command hooks run at the
-        beginning/end of the appropriate command.
-        """
-
-        stdout, _, return_code = self.run_setup('egg_info')
-        assert 'build_ext pre-hook' not in stdout
-        assert 'build_ext post-hook' not in stdout
-        assert return_code == 0
-
-        stdout, _, return_code = self.run_setup('build_ext')
-        assert textwrap.dedent("""
-            running build_ext
-            running pre_hook d2to1_testpackage._setup_hooks.test_pre_hook for command build_ext
-            build_ext pre-hook
-        """) in stdout
-        assert stdout.endswith('build_ext post-hook')
-        assert return_code == 0
-
-
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/CHANGES.txt b/required_pkgs/d2to1/d2to1/tests/testpackage/CHANGES.txt
deleted file mode 100644
index 709b9d4..0000000
--- a/required_pkgs/d2to1/d2to1/tests/testpackage/CHANGES.txt
+++ /dev/null
@@ -1,86 +0,0 @@
-Changelog
-===========
-
-0.3 (unreleased)
-------------------
-
-- The ``glob_data_files`` hook became a pre-command hook for the install_data
-  command instead of being a setup-hook.  This is to support the additional
-  functionality of requiring data_files with relative destination paths to be
-  install relative to the package's install path (i.e. site-packages).
-
-- Dropped support for and deprecated the easier_install custom command.
-  Although it should still work, it probably won't be used anymore for
-  stsci_python packages.
-
-- Added support for the ``build_optional_ext`` command, which replaces/extends
-  the default ``build_ext`` command.  See the README for more details.
-
-- Added the ``tag_svn_revision`` setup_hook as a replacement for the
-  setuptools-specific tag_svn_revision option to the egg_info command.  This
-  new hook is easier to use than the old tag_svn_revision option: It's
-  automatically enabled by the presence of ``.dev`` in the version string, and
-  disabled otherwise.
-
-- The ``svn_info_pre_hook`` and ``svn_info_post_hook`` have been replaced with
-  ``version_pre_command_hook`` and ``version_post_command_hook`` respectively.
-  However, a new ``version_setup_hook``, which has the same purpose, has been
-  added.  It is generally easier to use and will give more consistent results
-  in that it will run every time setup.py is run, regardless of which command
-  is used.  ``stsci.distutils`` itself uses this hook--see the `setup.cfg` file
-  and `stsci/distutils/__init__.py` for example usage.
-
-- Instead of creating an `svninfo.py` module, the new ``version_`` hooks create
-  a file called `version.py`.  In addition to the SVN info that was included
-  in `svninfo.py`, it includes a ``__version__`` variable to be used by the
-  package's `__init__.py`.  This allows there to be a hard-coded
-  ``__version__`` variable included in the source code, rather than using
-  pkg_resources to get the version.
-
-- In `version.py`, the variables previously named ``__svn_version__`` and
-  ``__full_svn_info__`` are now named ``__svn_revision__`` and
-  ``__svn_full_info__``.
-
-- Fixed a bug when using stsci.distutils in the installation of other packages
-  in the ``stsci.*`` namespace package.  If stsci.distutils was not already
-  installed, and was downloaded automatically by distribute through the
-  setup_requires option, then ``stsci.distutils`` would fail to import.  This
-  is because the way the namespace package (nspkg) mechanism currently works,
-  all packages belonging to the nspkg *must* be on the import path at initial
-  import time.
-
-  So when installing stsci.tools, for example, if ``stsci.tools`` is imported
-  from within the source code at install time, but before ``stsci.distutils``
-  is downloaded and added to the path, the ``stsci`` package is already
-  imported and can't be extended to include the path of ``stsci.distutils``
-  after the fact.  The easiest way of dealing with this, it seems, is to
-  delete ``stsci`` from ``sys.modules``, which forces it to be reimported, now
-  the its ``__path__`` extended to include ``stsci.distutil``'s path.
-
-
-0.2.2 (2011-11-09)
-------------------
-
-- Fixed check for the issue205 bug on actual setuptools installs; before it
-  only worked on distribute.  setuptools has the issue205 bug prior to version
-  0.6c10.
-
-- Improved the fix for the issue205 bug, especially on setuptools.
-  setuptools, prior to 0.6c10, did not back of sys.modules either before
-  sandboxing, which causes serious problems.  In fact, it's so bad that it's
-  not enough to add a sys.modules backup to the current sandbox: It's in fact
-  necessary to monkeypatch setuptools.sandbox.run_setup so that any subsequent
-  calls to it also back up sys.modules.
-
-
-0.2.1 (2011-09-02)
-------------------
-
-- Fixed the dependencies so that setuptools is requirement but 'distribute'
-  specifically.  Previously installation could fail if users had plain
-  setuptools installed and not distribute
-
-0.2 (2011-08-23)
-------------------
-
-- Initial public release
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/LICENSE.txt b/required_pkgs/d2to1/d2to1/tests/testpackage/LICENSE.txt
deleted file mode 100644
index 7e8019a..0000000
--- a/required_pkgs/d2to1/d2to1/tests/testpackage/LICENSE.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (C) 2005 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-      endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/MANIFEST.in b/required_pkgs/d2to1/d2to1/tests/testpackage/MANIFEST.in
deleted file mode 100644
index cdc95ea..0000000
--- a/required_pkgs/d2to1/d2to1/tests/testpackage/MANIFEST.in
+++ /dev/null
@@ -1 +0,0 @@
-include data_files/*
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/README.txt b/required_pkgs/d2to1/d2to1/tests/testpackage/README.txt
deleted file mode 100644
index 4f00d32..0000000
--- a/required_pkgs/d2to1/d2to1/tests/testpackage/README.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-Introduction
-============
-This package contains utilities used to package some of STScI's Python
-projects; specifically those projects that comprise stsci_python_ and
-Astrolib_.
-
-It currently consists mostly of some setup_hook scripts meant for use with
-`distutils2/packaging`_ and/or d2to1_, and a customized easy_install command
-meant for use with distribute_.
-
-This package is not meant for general consumption, though it might be worth
-looking at for examples of how to do certain things with your own packages, but
-YMMV.
-
-Features
-========
-
-Hook Scripts
-------------
-Currently the main features of this package are a couple of setup_hook scripts.
-In distutils2, a setup_hook is a script that runs at the beginning of any
-pysetup command, and can modify the package configuration read from setup.cfg.
-There are also pre- and post-command hooks that only run before/after a
-specific setup command (eg. build_ext, install) is run.
-
-stsci.distutils.hooks.use_packages_root
-'''''''''''''''''''''''''''''''''''''''
-If using the ``packages_root`` option under the ``[files]`` section of
-setup.cfg, this hook will add that path to ``sys.path`` so that modules in your
-package can be imported and used in setup.  This can be used even if
-``packages_root`` is not specified--in this case it adds ``''`` to
-``sys.path``.
-
-stsci.distutils.hooks.version_setup_hook
-''''''''''''''''''''''''''''''''''''''''
-Creates a Python module called version.py which currently contains four
-variables:
-
-* ``__version__`` (the release version)
-* ``__svn_revision__`` (the SVN revision info as returned by the ``svnversion``
-  command)
-* ``__svn_full_info__`` (as returned by the ``svn info`` command)
-* ``__setup_datetime__`` (the date and time that setup.py was last run).
-
-These variables can be imported in the package's `__init__.py` for degugging
-purposes.  The version.py module will *only* be created in a package that
-imports from the version module in its `__init__.py`.  It should be noted that
-this is generally preferable to writing these variables directly into
-`__init__.py`, since this provides more control and is less likely to
-unexpectedly break things in `__init__.py`.
-
-stsci.distutils.hooks.version_pre_command_hook
-''''''''''''''''''''''''''''''''''''''''''''''
-Identical to version_setup_hook, but designed to be used as a pre-command
-hook.
-
-stsci.distutils.hooks.version_post_command_hook
-'''''''''''''''''''''''''''''''''''''''''''''''
-The complement to version_pre_command_hook.  This will delete any version.py
-files created during a build in order to prevent them from cluttering an SVN
-working copy (note, however, that version.py is *not* deleted from the build/
-directory, so a copy of it is still preserved).  It will also not be deleted
-if the current directory is not an SVN working copy.  For example, if source
-code extracted from a source tarball it will be preserved.
-
-stsci.distutils.hooks.tag_svn_revision
-''''''''''''''''''''''''''''''''''''''
-A setup_hook to add the SVN revision of the current working copy path to the
-package version string, but only if the version ends in .dev.
-
-For example, ``mypackage-1.0.dev`` becomes ``mypackage-1.0.dev1234``.  This is
-in accordance with the version string format standardized by PEP 386.
-
-This should be used as a replacement for the ``tag_svn_revision`` option to
-the egg_info command.  This hook is more compatible with packaging/distutils2,
-which does not include any VCS support.  This hook is also more flexible in
-that it turns the revision number on/off depending on the presence of ``.dev``
-in the version string, so that it's not automatically added to the version in
-final releases.
-
-This hook does require the ``svnversion`` command to be available in order to
-work.  It does not examine the working copy metadata directly.
-
-stsci.distutils.hooks.numpy_extension_hook
-''''''''''''''''''''''''''''''''''''''''''
-This is a pre-command hook for the build_ext command.  To use it, add a
-``[build_ext]`` section to your setup.cfg, and add to it::
-
-    pre-hook.numpy-extension-hook = stsci.distutils.hooks.numpy_extension_hook
-
-This hook must be used to build extension modules that use Numpy.   The primary
-side-effect of this hook is to add the correct numpy include directories to
-`include_dirs`.  To use it, add 'numpy' to the 'include-dirs' option of each
-extension module that requires numpy to build.  The value 'numpy' will be
-replaced with the actual path to the numpy includes.
-
-stsci.distutils.hooks.is_display_option
-'''''''''''''''''''''''''''''''''''''''
-This is not actually a hook, but is a useful utility function that can be used
-in writing other hooks.  Basically, it returns ``True`` if setup.py was run
-with a "display option" such as --version or --help.  This can be used to
-prevent your hook from running in such cases.
-
-stsci.distutils.hooks.glob_data_files
-'''''''''''''''''''''''''''''''''''''
-A pre-command hook for the install_data command.  Allows filename wildcards as
-understood by ``glob.glob()`` to be used in the data_files option.  This hook
-must be used in order to have this functionality since it does not normally
-exist in distutils.
-
-This hook also ensures that data files are installed relative to the package
-path.  data_files shouldn't normally be installed this way, but the
-functionality is required for a few special cases.
-
-
-Commands
---------
-build_optional_ext
-''''''''''''''''''
-This serves as an optional replacement for the default built_ext command,
-which compiles C extension modules.  Its purpose is to allow extension modules
-to be *optional*, so that if their build fails the rest of the package is
-still allowed to be built and installed.  This can be used when an extension
-module is not definitely required to use the package.
-
-To use this custom command, add::
-
-    commands = stsci.distutils.command.build_optional_ext.build_optional_ext
-
-under the ``[global]`` section of your package's setup.cfg.  Then, to mark
-an individual extension module as optional, under the setup.cfg section for
-that extension add::
-
-    optional = True
-
-Optionally, you may also add a custom failure message by adding::
-
-    fail_message = The foobar extension module failed to compile.
-                   This could be because you lack such and such headers.
-                   This package will still work, but such and such features
-                   will be disabled.
-
-
-.. _stsci_python: http://www.stsci.edu/resources/software_hardware/pyraf/stsci_python
-.. _Astrolib: http://www.scipy.org/AstroLib/
-.. _distutils2/packaging: http://distutils2.notmyidea.org/
-.. _d2to1: http://pypi.python.org/pypi/d2to1
-.. _distribute: http://pypi.python.org/pypi/distribute
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/d2to1_testpackage/__init__.py b/required_pkgs/d2to1/d2to1/tests/testpackage/d2to1_testpackage/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/d2to1_testpackage/_setup_hooks.py b/required_pkgs/d2to1/d2to1/tests/testpackage/d2to1_testpackage/_setup_hooks.py
deleted file mode 100644
index 77005b2..0000000
--- a/required_pkgs/d2to1/d2to1/tests/testpackage/d2to1_testpackage/_setup_hooks.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from distutils.command.build_py import build_py
-
-
-def test_hook_1(config):
-    print('test_hook_1')
-
-
-def test_hook_2(config):
-    print('test_hook_2')
-
-
-class test_command(build_py):
-    command_name = 'build_py'
-
-    def run(self):
-        print('Running custom build_py command.')
-        return build_py.run(self)
-
-
-def test_pre_hook(cmdobj):
-    print('build_ext pre-hook')
-
-
-def test_post_hook(cmdobj):
-    print('build_ext post-hook')
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/d2to1_testpackage/package_data/1.txt b/required_pkgs/d2to1/d2to1/tests/testpackage/d2to1_testpackage/package_data/1.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/d2to1_testpackage/package_data/2.txt b/required_pkgs/d2to1/d2to1/tests/testpackage/d2to1_testpackage/package_data/2.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/data_files/a.txt b/required_pkgs/d2to1/d2to1/tests/testpackage/data_files/a.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/data_files/b.txt b/required_pkgs/d2to1/d2to1/tests/testpackage/data_files/b.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/data_files/c.rst b/required_pkgs/d2to1/d2to1/tests/testpackage/data_files/c.rst
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/distribute_setup.py b/required_pkgs/d2to1/d2to1/tests/testpackage/distribute_setup.py
deleted file mode 100644
index bbb6f3c..0000000
--- a/required_pkgs/d2to1/d2to1/tests/testpackage/distribute_setup.py
+++ /dev/null
@@ -1,485 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
-    from distribute_setup import use_setuptools
-    use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-try:
-    import subprocess
-
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        return subprocess.call(args) == 0
-
-except ImportError:
-    # will be used for python 2.3
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        # quoting arguments if windows
-        if sys.platform == 'win32':
-            def quote(arg):
-                if ' ' in arg:
-                    return '"%s"' % arg
-                return arg
-            args = [quote(arg) for arg in args]
-        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.19"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # installing
-        log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install'):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-    finally:
-        os.chdir(old_wd)
-
-
-def _build_egg(egg, tarball, to_dir):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # building an egg
-        log.warn('Building a Distribute egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
-    finally:
-        os.chdir(old_wd)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        tarball = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, tarball, to_dir)
-    sys.path.insert(0, egg)
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                   to_dir=os.curdir, download_delay=15, no_fake=True):
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    was_imported = 'pkg_resources' in sys.modules or \
-        'setuptools' in sys.modules
-    try:
-        try:
-            import pkg_resources
-            if not hasattr(pkg_resources, '_distribute'):
-                if not no_fake:
-                    _fake_setuptools()
-                raise ImportError
-        except ImportError:
-            return _do_download(version, download_base, to_dir, download_delay)
-        try:
-            pkg_resources.require("distribute>="+version)
-            return
-        except pkg_resources.VersionConflict:
-            e = sys.exc_info()[1]
-            if was_imported:
-                sys.stderr.write(
-                "The required version of distribute (>=%s) is not available,\n"
-                "and can't be installed while this script is running. Please\n"
-                "install a more recent version first, using\n"
-                "'easy_install -U distribute'."
-                "\n\n(Currently using %r)\n" % (version, e.args[0]))
-                sys.exit(2)
-            else:
-                del pkg_resources, sys.modules['pkg_resources']    # reload ok
-                return _do_download(version, download_base, to_dir,
-                                    download_delay)
-        except pkg_resources.DistributionNotFound:
-            return _do_download(version, download_base, to_dir,
-                                download_delay)
-    finally:
-        if not no_fake:
-            _create_fake_setuptools_pkg_info(to_dir)
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                        to_dir=os.curdir, delay=15):
-    """Download distribute from a specified location and return its filename
-
-    `version` should be a valid distribute version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    tgz_name = "distribute-%s.tar.gz" % version
-    url = download_base + tgz_name
-    saveto = os.path.join(to_dir, tgz_name)
-    src = dst = None
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        try:
-            log.warn("Downloading %s", url)
-            src = urlopen(url)
-            # Read/write all in one block, so we don't create a corrupt file
-            # if the download is interrupted.
-            data = src.read()
-            dst = open(saveto, "wb")
-            dst.write(data)
-        finally:
-            if src:
-                src.close()
-            if dst:
-                dst.close()
-    return os.path.realpath(saveto)
-
-def _no_sandbox(function):
-    def __no_sandbox(*args, **kw):
-        try:
-            from setuptools.sandbox import DirectorySandbox
-            if not hasattr(DirectorySandbox, '_old'):
-                def violation(*args):
-                    pass
-                DirectorySandbox._old = DirectorySandbox._violation
-                DirectorySandbox._violation = violation
-                patched = True
-            else:
-                patched = False
-        except ImportError:
-            patched = False
-
-        try:
-            return function(*args, **kw)
-        finally:
-            if patched:
-                DirectorySandbox._violation = DirectorySandbox._old
-                del DirectorySandbox._old
-
-    return __no_sandbox
-
-def _patch_file(path, content):
-    """Will backup the file then patch it"""
-    existing_content = open(path).read()
-    if existing_content == content:
-        # already patched
-        log.warn('Already patched.')
-        return False
-    log.warn('Patching...')
-    _rename_path(path)
-    f = open(path, 'w')
-    try:
-        f.write(content)
-    finally:
-        f.close()
-    return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-def _same_content(path, content):
-    return open(path).read() == content
-
-def _rename_path(path):
-    new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s into %s', path, new_name)
-    os.rename(path, new_name)
-    return new_name
-
-def _remove_flat_installation(placeholder):
-    if not os.path.isdir(placeholder):
-        log.warn('Unkown installation at %s', placeholder)
-        return False
-    found = False
-    for file in os.listdir(placeholder):
-        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
-            found = True
-            break
-    if not found:
-        log.warn('Could not locate setuptools*.egg-info')
-        return
-
-    log.warn('Removing elements out of the way...')
-    pkg_info = os.path.join(placeholder, file)
-    if os.path.isdir(pkg_info):
-        patched = _patch_egg_dir(pkg_info)
-    else:
-        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
-    if not patched:
-        log.warn('%s already patched.', pkg_info)
-        return False
-    # now let's move the files out of the way
-    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
-        element = os.path.join(placeholder, element)
-        if os.path.exists(element):
-            _rename_path(element)
-        else:
-            log.warn('Could not find the %s element of the '
-                     'Setuptools distribution', element)
-    return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-def _after_install(dist):
-    log.warn('After install bootstrap.')
-    placeholder = dist.get_command_obj('install').install_purelib
-    _create_fake_setuptools_pkg_info(placeholder)
-
-def _create_fake_setuptools_pkg_info(placeholder):
-    if not placeholder or not os.path.exists(placeholder):
-        log.warn('Could not find the install location')
-        return
-    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
-    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
-            (SETUPTOOLS_FAKED_VERSION, pyver)
-    pkg_info = os.path.join(placeholder, setuptools_file)
-    if os.path.exists(pkg_info):
-        log.warn('%s already exists', pkg_info)
-        return
-
-    log.warn('Creating %s', pkg_info)
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-
-    pth_file = os.path.join(placeholder, 'setuptools.pth')
-    log.warn('Creating %s', pth_file)
-    f = open(pth_file, 'w')
-    try:
-        f.write(os.path.join(os.curdir, setuptools_file))
-    finally:
-        f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
-
-def _patch_egg_dir(path):
-    # let's check if it's already patched
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    if os.path.exists(pkg_info):
-        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
-            log.warn('%s already patched.', pkg_info)
-            return False
-    _rename_path(path)
-    os.mkdir(path)
-    os.mkdir(os.path.join(path, 'EGG-INFO'))
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-    return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-def _before_install():
-    log.warn('Before install bootstrap.')
-    _fake_setuptools()
-
-
-def _under_prefix(location):
-    if 'install' not in sys.argv:
-        return True
-    args = sys.argv[sys.argv.index('install')+1:]
-    for index, arg in enumerate(args):
-        for option in ('--root', '--prefix'):
-            if arg.startswith('%s=' % option):
-                top_dir = arg.split('root=')[-1]
-                return location.startswith(top_dir)
-            elif arg == option:
-                if len(args) > index:
-                    top_dir = args[index+1]
-                    return location.startswith(top_dir)
-        if arg == '--user' and USER_SITE is not None:
-            return location.startswith(USER_SITE)
-    return True
-
-
-def _fake_setuptools():
-    log.warn('Scanning installed packages')
-    try:
-        import pkg_resources
-    except ImportError:
-        # we're cool
-        log.warn('Setuptools or Distribute does not seem to be installed.')
-        return
-    ws = pkg_resources.working_set
-    try:
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
-                                  replacement=False))
-    except TypeError:
-        # old distribute API
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
-
-    if setuptools_dist is None:
-        log.warn('No setuptools distribution found')
-        return
-    # detecting if it was already faked
-    setuptools_location = setuptools_dist.location
-    log.warn('Setuptools installation detected at %s', setuptools_location)
-
-    # if --root or --preix was provided, and if
-    # setuptools is not located in them, we don't patch it
-    if not _under_prefix(setuptools_location):
-        log.warn('Not patching, --root or --prefix is installing Distribute'
-                 ' in another location')
-        return
-
-    # let's see if its an egg
-    if not setuptools_location.endswith('.egg'):
-        log.warn('Non-egg installation')
-        res = _remove_flat_installation(setuptools_location)
-        if not res:
-            return
-    else:
-        log.warn('Egg installation')
-        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
-        if (os.path.exists(pkg_info) and
-            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
-            log.warn('Already patched.')
-            return
-        log.warn('Patching...')
-        # let's create a fake egg replacing setuptools one
-        res = _patch_egg_dir(setuptools_location)
-        if not res:
-            return
-    log.warn('Patched done.')
-    _relaunch()
-
-
-def _relaunch():
-    log.warn('Relaunching...')
-    # we have to relaunch the process
-    # pip marker to avoid a relaunch bug
-    if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
-        sys.argv[0] = 'setup.py'
-    args = [sys.executable] + sys.argv
-    sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
-    """Extract all members from the archive to the current working
-       directory and set owner, modification time and permissions on
-       directories afterwards. `path' specifies a different directory
-       to extract to. `members' is optional and must be a subset of the
-       list returned by getmembers().
-    """
-    import copy
-    import operator
-    from tarfile import ExtractError
-    directories = []
-
-    if members is None:
-        members = self
-
-    for tarinfo in members:
-        if tarinfo.isdir():
-            # Extract directories with a safe mode.
-            directories.append(tarinfo)
-            tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448 # decimal for oct 0700
-        self.extract(tarinfo, path)
-
-    # Reverse sort directories.
-    if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
-        directories.reverse()
-    else:
-        directories.sort(key=operator.attrgetter('name'), reverse=True)
-
-    # Set correct owner, mtime and filemode on directories.
-    for tarinfo in directories:
-        dirpath = os.path.join(path, tarinfo.name)
-        try:
-            self.chown(tarinfo, dirpath)
-            self.utime(tarinfo, dirpath)
-            self.chmod(tarinfo, dirpath)
-        except ExtractError:
-            e = sys.exc_info()[1]
-            if self.errorlevel > 1:
-                raise
-            else:
-                self._dbg(1, "tarfile: %s" % e)
-
-
-def main(argv, version=DEFAULT_VERSION):
-    """Install or upgrade setuptools and EasyInstall"""
-    tarball = download_setuptools()
-    _install(tarball)
-
-
-if __name__ == '__main__':
-    main(sys.argv[1:])
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/extra-file.txt b/required_pkgs/d2to1/d2to1/tests/testpackage/extra-file.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/setup.cfg b/required_pkgs/d2to1/d2to1/tests/testpackage/setup.cfg
deleted file mode 100644
index a200616..0000000
--- a/required_pkgs/d2to1/d2to1/tests/testpackage/setup.cfg
+++ /dev/null
@@ -1,46 +0,0 @@
-[metadata]
-name = d2to1_testpackage
-version = 0.1.dev
-author = Erik M. Bray
-author-email = embray at stsci.edu
-home-page = http://www.stsci.edu/resources/software_hardware/stsci_python
-summary = Test package for testing d2to1
-description-file =
-    README.txt
-    CHANGES.txt
-requires-python = >=2.5
-
-requires-dist =
-    setuptools
-
-classifier =
-    Development Status :: 3 - Alpha
-    Intended Audience :: Developers
-    License :: OSI Approved :: BSD License
-    Programming Language :: Python
-    Topic :: Scientific/Engineering
-    Topic :: Software Development :: Build Tools
-    Topic :: Software Development :: Libraries :: Python Modules
-    Topic :: System :: Archiving :: Packaging
-
-keywords = packaging, distutils, setuptools
-
-[files]
-packages = d2to1_testpackage
-package-data = testpackage = package_data/*.txt
-data-files = testpackage/data_files = data_files/*.txt
-extra-files = extra-file.txt
-
-[extension=d2to1_testpackage.testext]
-sources = src/testext.c
-optional = True
-
-[global]
-#setup-hooks =
-#    d2to1_testpackage._setup_hooks.test_hook_1
-#    d2to1_testpackage._setup_hooks.test_hook_2
-commands = d2to1_testpackage._setup_hooks.test_command
-
-[build_ext]
-#pre-hook.test_pre_hook = d2to1_testpackage._setup_hooks.test_pre_hook
-#post-hook.test_post_hook = d2to1_testpackage._setup_hooks.test_post_hook
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/setup.py b/required_pkgs/d2to1/d2to1/tests/testpackage/setup.py
deleted file mode 100755
index dbaba47..0000000
--- a/required_pkgs/d2to1/d2to1/tests/testpackage/setup.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env python
-try:
-    from setuptools import setup
-except ImportError:
-    from distribute_setup import use_setuptools
-    use_setuptools()
-    from setuptools import setup
-
-setup(
-    setup_requires=['d2to1'],
-    d2to1=True,
-)
diff --git a/required_pkgs/d2to1/d2to1/tests/testpackage/src/testext.c b/required_pkgs/d2to1/d2to1/tests/testpackage/src/testext.c
deleted file mode 100644
index 872d43c..0000000
--- a/required_pkgs/d2to1/d2to1/tests/testpackage/src/testext.c
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <Python.h>
-
-
-static PyMethodDef TestextMethods[] = {
-    {NULL, NULL, 0, NULL}
-};
-
-
-#if PY_MAJOR_VERSION >=3
-static struct PyModuleDef testextmodule = {
-    PyModuleDef_HEAD_INIT,
-    "testext",
-    -1,
-    TestextMethods
-};
-
-PyObject*
-PyInit_testext(void)
-{
-    return PyModule_Create(&testextmodule);
-}
-#else
-PyMODINIT_FUNC
-inittestext(void)
-{
-    Py_InitModule("testext", TestextMethods);
-}
-#endif
diff --git a/required_pkgs/d2to1/d2to1/tests/util.py b/required_pkgs/d2to1/d2to1/tests/util.py
deleted file mode 100644
index fa55587..0000000
--- a/required_pkgs/d2to1/d2to1/tests/util.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from __future__ import with_statement
-
-import contextlib
-import os
-import shutil
-import stat
-
-
-from ..extern.six import moves as m
-ConfigParser = m.configparser.ConfigParser
-
-
- at contextlib.contextmanager
-def open_config(filename):
-    cfg = ConfigParser()
-    cfg.read(filename)
-    yield cfg
-    with open(filename, 'w') as fp:
-        cfg.write(fp)
-
-
-def rmtree(path):
-    """
-    shutil.rmtree() with error handler for 'access denied' from trying to
-    delete read-only files.
-    """
-
-    def onerror(func, path, exc_info):
-        if not os.access(path, os.W_OK):
-            os.chmod(path, stat.S_IWUSR)
-            func(path)
-        else:
-            raise
-
-    return shutil.rmtree(path, onerror=onerror)
diff --git a/required_pkgs/d2to1/d2to1/util.py b/required_pkgs/d2to1/d2to1/util.py
deleted file mode 100644
index 88c2623..0000000
--- a/required_pkgs/d2to1/d2to1/util.py
+++ /dev/null
@@ -1,580 +0,0 @@
-"""The code in this module is mostly copy/pasted out of the distutils2 source
-code, as recommended by Tarek Ziade.  As such, it may be subject to some change
-as distutils2 development continues, and will have to be kept up to date.
-
-I didn't want to use it directly from distutils2 itself, since I do not want it
-to be an installation dependency for our packages yet--it is still too unstable
-(the latest version on PyPI doesn't even install).
-"""
-
-# These first two imports are not used, but are needed to get around an
-# irritating Python bug that can crop up when using ./setup.py test.
-# See: http://www.eby-sarna.com/pipermail/peak/2010-May/003355.html
-try:
-    import multiprocessing
-except ImportError:
-    pass
-import logging
-
-import os
-import re
-import sys
-import traceback
-
-from collections import defaultdict
-
-import distutils.ccompiler
-
-from distutils import log
-from distutils.errors import (DistutilsOptionError, DistutilsModuleError,
-                              DistutilsFileError)
-from setuptools.command.egg_info import manifest_maker
-from setuptools.dist import Distribution
-from setuptools.extension import Extension
-
-from .extern.six import moves as m
-RawConfigParser = m.configparser.RawConfigParser
-
-
-# A simplified RE for this; just checks that the line ends with version
-# predicates in ()
-_VERSION_SPEC_RE = re.compile(r'\s*(.*?)\s*\((.*)\)\s*$')
-
-
-# Mappings from setup() keyword arguments to setup.cfg options;
-# The values are (section, option) tuples, or simply (section,) tuples if
-# the option has the same name as the setup() argument
-D1_D2_SETUP_ARGS = {
-    "name": ("metadata",),
-    "version": ("metadata",),
-    "author": ("metadata",),
-    "author_email": ("metadata",),
-    "maintainer": ("metadata",),
-    "maintainer_email": ("metadata",),
-    "url": ("metadata", "home_page"),
-    "description": ("metadata", "summary"),
-    "keywords": ("metadata",),
-    "long_description": ("metadata", "description"),
-    "download-url": ("metadata",),
-    "classifiers": ("metadata", "classifier"),
-    "platforms": ("metadata", "platform"),  # **
-    "license": ("metadata",),
-    # Use setuptools install_requires, not
-    # broken distutils requires
-    "install_requires": ("metadata", "requires_dist"),
-    "setup_requires": ("metadata", "setup_requires_dist"),
-    "provides": ("metadata", "provides_dist"),  # **
-    "obsoletes": ("metadata", "obsoletes_dist"),  # **
-    "package_dir": ("files", 'packages_root'),
-    "packages": ("files",),
-    "package_data": ("files",),
-    "data_files": ("files",),
-    "scripts": ("files",),
-    "py_modules": ("files", "modules"),   # **
-    "cmdclass": ("global", "commands"),
-    # Not supported in distutils2, but provided for
-    # backwards compatibility with setuptools
-    "use_2to3": ("backwards_compat", "use_2to3"),
-    "zip_safe": ("backwards_compat", "zip_safe"),
-    "tests_require": ("backwards_compat", "tests_require"),
-    "dependency_links": ("backwards_compat",),
-    "include_package_data": ("backwards_compat",),
-}
-
-# setup() arguments that can have multiple values in setup.cfg
-MULTI_FIELDS = ("classifiers",
-                "platforms",
-                "install_requires",
-                "provides",
-                "obsoletes",
-                "packages",
-                "package_data",
-                "data_files",
-                "scripts",
-                "py_modules",
-                "dependency_links",
-                "setup_requires",
-                "tests_require",
-                "cmdclass")
-
-# setup() arguments that contain boolean values
-BOOL_FIELDS = ("use_2to3", "zip_safe", "include_package_data")
-
-
-CSV_FIELDS = ("keywords",)
-
-
-log.set_verbosity(log.INFO)
-
-
-def resolve_name(name):
-    """Resolve a name like ``module.object`` to an object and return it.
-
-    Raise ImportError if the module or name is not found.
-    """
-
-    parts = name.split('.')
-    cursor = len(parts) - 1
-    module_name = parts[:cursor]
-    attr_name = parts[-1]
-
-    while cursor > 0:
-        try:
-            ret = __import__('.'.join(module_name), fromlist=[attr_name])
-            break
-        except ImportError:
-            if cursor == 0:
-                raise
-            cursor -= 1
-            module_name = parts[:cursor]
-            attr_name = parts[cursor]
-            ret = ''
-
-    for part in parts[cursor:]:
-        try:
-            ret = getattr(ret, part)
-        except AttributeError:
-            raise ImportError(name)
-
-    return ret
-
-
-def cfg_to_args(path='setup.cfg'):
-    """ Distutils2 to distutils1 compatibility util.
-
-        This method uses an existing setup.cfg to generate a dictionary of
-        keywords that can be used by distutils.core.setup(kwargs**).
-
-        :param file:
-            The setup.cfg path.
-        :raises DistutilsFileError:
-            When the setup.cfg file is not found.
-
-    """
-
-    # The method source code really starts here.
-    parser = RawConfigParser()
-    if not os.path.exists(path):
-        raise DistutilsFileError("file '%s' does not exist" %
-                                 os.path.abspath(path))
-    parser.read(path)
-    config = {}
-    for section in parser.sections():
-        config[section] = dict(parser.items(section))
-
-    # Run setup_hooks, if configured
-    setup_hooks = has_get_option(config, 'global', 'setup_hooks')
-    package_dir = has_get_option(config, 'files', 'packages_root')
-
-    # Add the source package directory to sys.path in case it contains
-    # additional hooks, and to make sure it's on the path before any existing
-    # installations of the package
-    if package_dir:
-        package_dir = os.path.abspath(package_dir)
-        sys.path.insert(0, package_dir)
-
-    try:
-        if setup_hooks:
-            setup_hooks = split_multiline(setup_hooks)
-            for hook in setup_hooks:
-                hook_fn = resolve_name(hook)
-                try :
-                    hook_fn(config)
-                except SystemExit:
-                    log.error('setup hook %s terminated the installation')
-                except:
-                    e = sys.exc_info()[1]
-                    log.error('setup hook %s raised exception: %s\n' %
-                              (hook, e))
-                    log.error(traceback.format_exc())
-                    sys.exit(1)
-
-        kwargs = setup_cfg_to_setup_kwargs(config)
-
-        register_custom_compilers(config)
-
-        ext_modules = get_extension_modules(config)
-        if ext_modules:
-            kwargs['ext_modules'] = ext_modules
-
-        entry_points = get_entry_points(config)
-        if entry_points:
-            kwargs['entry_points'] = entry_points
-
-        wrap_commands(kwargs)
-
-        # Handle the [files]/extra_files option
-        extra_files = has_get_option(config, 'files', 'extra_files')
-        if extra_files:
-            extra_files = split_multiline(extra_files)
-            # Let's do a sanity check
-            for filename in extra_files:
-                if not os.path.exists(filename):
-                    raise DistutilsFileError(
-                        '%s from the extra_files option in setup.cfg does not '
-                        'exist' % filename)
-            # Unfortunately the only really sensible way to do this is to
-            # monkey-patch the manifest_maker class
-            @monkeypatch_method(manifest_maker)
-            def add_defaults(self, extra_files=extra_files, log=log):
-                log.info('[d2to1] running patched manifest_maker command '
-                          'with extra_files support')
-                add_defaults._orig(self)
-                self.filelist.extend(extra_files)
-
-    finally:
-        # Perform cleanup if any paths were added to sys.path
-        if package_dir:
-            sys.path.pop(0)
-
-    return kwargs
-
-
-def setup_cfg_to_setup_kwargs(config):
-    """Processes the setup.cfg options and converts them to arguments accepted
-    by setuptools' setup() function.
-    """
-
-    kwargs = {}
-
-    for arg in D1_D2_SETUP_ARGS:
-        if len(D1_D2_SETUP_ARGS[arg]) == 2:
-            # The distutils field name is different than distutils2's.
-            section, option = D1_D2_SETUP_ARGS[arg]
-
-        elif len(D1_D2_SETUP_ARGS[arg]) == 1:
-            # The distutils field name is the same thant distutils2's.
-            section = D1_D2_SETUP_ARGS[arg][0]
-            option = arg
-
-        in_cfg_value = has_get_option(config, section, option)
-        if not in_cfg_value:
-            # There is no such option in the setup.cfg
-            if arg == "long_description":
-                in_cfg_value = has_get_option(config, section,
-                                              "description_file")
-                if in_cfg_value:
-                    in_cfg_value = split_multiline(in_cfg_value)
-                    value = ''
-                    for filename in in_cfg_value:
-                        description_file = open(filename)
-                        try:
-                            value += description_file.read().strip() + '\n\n'
-                        finally:
-                            description_file.close()
-                    in_cfg_value = value
-            else:
-                continue
-
-        if arg in CSV_FIELDS:
-            in_cfg_value = split_csv(in_cfg_value)
-        if arg in MULTI_FIELDS:
-            in_cfg_value = split_multiline(in_cfg_value)
-        elif arg in BOOL_FIELDS:
-            # Provide some flexibility here...
-            if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'):
-                in_cfg_value = True
-            else:
-                in_cfg_value = False
-
-        if in_cfg_value:
-            if arg in ('install_requires', 'tests_require'):
-                # Replaces PEP345-style version specs with the sort expected by
-                # setuptools
-                in_cfg_value = [_VERSION_SPEC_RE.sub(r'\1\2', pred)
-                                for pred in in_cfg_value]
-            elif arg == 'package_dir':
-                in_cfg_value = {'': in_cfg_value}
-            elif arg in ('package_data', 'data_files'):
-                data_files = {}
-                firstline = True
-                prev = None
-                for line in in_cfg_value:
-                    if '=' in line:
-                        key, value = line.split('=', 1)
-                        key, value = (key.strip(), value.strip())
-                        if key in data_files:
-                            # Multiple duplicates of the same package name;
-                            # this is for backwards compatibility of the old
-                            # format prior to d2to1 0.2.6.
-                            prev = data_files[key]
-                            prev.extend(value.split())
-                        else:
-                            prev = data_files[key.strip()] = value.split()
-                    elif firstline:
-                        raise DistutilsOptionError(
-                            'malformed package_data first line %r (misses '
-                            '"=")' % line)
-                    else:
-                        prev.extend(line.strip().split())
-                    firstline = False
-                if arg == 'data_files':
-                    # the data_files value is a pointlessly different structure
-                    # from the package_data value
-                    data_files = list(data_files.items())
-                in_cfg_value = data_files
-            elif arg == 'cmdclass':
-                cmdclass = {}
-                dist = Distribution()
-                for cls in in_cfg_value:
-                    cls = resolve_name(cls)
-                    cmd = cls(dist)
-                    cmdclass[cmd.get_command_name()] = cls
-                in_cfg_value = cmdclass
-
-        kwargs[arg] = in_cfg_value
-
-    return kwargs
-
-
-def register_custom_compilers(config):
-    """Handle custom compilers; this has no real equivalent in distutils, where
-    additional compilers could only be added programmatically, so we have to
-    hack it in somehow.
-    """
-
-    compilers = has_get_option(config, 'global', 'compilers')
-    if compilers:
-        compilers = split_multiline(compilers)
-        for compiler in compilers:
-            compiler = resolve_name(compiler)
-
-            # In distutils2 compilers these class attributes exist; for
-            # distutils1 we just have to make something up
-            if hasattr(compiler, 'name'):
-                name = compiler.name
-            else:
-                name = compiler.__name__
-            if hasattr(compiler, 'description'):
-                desc = compiler.description
-            else:
-                desc = 'custom compiler %s' % name
-
-            module_name = compiler.__module__
-            # Note; this *will* override built in compilers with the same name
-            # TODO: Maybe display a warning about this?
-            cc = distutils.ccompiler.compiler_class
-            cc[name] = (module_name, compiler.__name__, desc)
-
-            # HACK!!!!  Distutils assumes all compiler modules are in the
-            # distutils package
-            sys.modules['distutils.' + module_name] = sys.modules[module_name]
-
-
-def get_extension_modules(config):
-    """Handle extension modules"""
-
-    EXTENSION_FIELDS = ("sources",
-                        "include_dirs",
-                        "define_macros",
-                        "undef_macros",
-                        "library_dirs",
-                        "libraries",
-                        "runtime_library_dirs",
-                        "extra_objects",
-                        "extra_compile_args",
-                        "extra_link_args",
-                        "export_symbols",
-                        "swig_opts",
-                        "depends")
-
-    ext_modules = []
-    for section in config:
-        if ':' in section:
-            labels = section.split(':', 1)
-        else:
-            # Backwards compatibility for old syntax; don't use this though
-            labels = section.split('=', 1)
-        labels = [l.strip() for l in labels]
-        if (len(labels) == 2) and (labels[0] == 'extension'):
-            ext_args = {}
-            for field in EXTENSION_FIELDS:
-                value = has_get_option(config, section, field)
-                # All extension module options besides name can have multiple
-                # values
-                if not value:
-                    continue
-                value = split_multiline(value)
-                if field == 'define_macros':
-                    macros = []
-                    for macro in value:
-                        macro = macro.split('=', 1)
-                        if len(macro) == 1:
-                            macro = (macro[0].strip(), None)
-                        else:
-                            macro = (macro[0].strip(), macro[1].strip())
-                        macros.append(macro)
-                    value = macros
-                ext_args[field] = value
-            if ext_args:
-                if 'name' not in ext_args:
-                    ext_args['name'] = labels[1]
-                ext_modules.append(Extension(ext_args.pop('name'),
-                                             **ext_args))
-    return ext_modules
-
-
-def get_entry_points(config):
-    """Process the [entry_points] section of setup.cfg to handle setuptools
-    entry points.  This is, of course, not a standard feature of
-    distutils2/packaging, but as there is not currently a standard alternative
-    in packaging, we provide support for them.
-    """
-
-    if not 'entry_points' in config:
-        return {}
-
-    return dict((option, split_multiline(value))
-                for option, value in config['entry_points'].items())
-
-
-def wrap_commands(kwargs):
-    dist = Distribution()
-
-    # This should suffice to get the same config values and command classes
-    # that the actual Distribution will see (not counting cmdclass, which is
-    # handled below)
-    dist.parse_config_files()
-
-    for cmd, _ in dist.get_command_list():
-        hooks = {}
-        for opt, val in dist.get_option_dict(cmd).items():
-            val = val[1]
-            if opt.startswith('pre_hook.') or opt.startswith('post_hook.'):
-                hook_type, alias = opt.split('.', 1)
-                hook_dict = hooks.setdefault(hook_type, {})
-                hook_dict[alias] = val
-        if not hooks:
-            continue
-
-        if 'cmdclass' in kwargs and cmd in kwargs['cmdclass']:
-            cmdclass = kwargs['cmdclass'][cmd]
-        else:
-            cmdclass = dist.get_command_class(cmd)
-
-        new_cmdclass = wrap_command(cmd, cmdclass, hooks)
-        kwargs.setdefault('cmdclass', {})[cmd] = new_cmdclass
-
-
-def wrap_command(cmd, cmdclass, hooks):
-    def run(self, cmdclass=cmdclass):
-        self.run_command_hooks('pre_hook')
-        cmdclass.run(self)
-        self.run_command_hooks('post_hook')
-
-    return type(cmd, (cmdclass, object),
-                {'run': run, 'run_command_hooks': run_command_hooks,
-                 'pre_hook': hooks.get('pre_hook'),
-                 'post_hook': hooks.get('post_hook')})
-
-
-def run_command_hooks(cmd_obj, hook_kind):
-    """Run hooks registered for that command and phase.
-
-    *cmd_obj* is a finalized command object; *hook_kind* is either
-    'pre_hook' or 'post_hook'.
-    """
-
-    if hook_kind not in ('pre_hook', 'post_hook'):
-        raise ValueError('invalid hook kind: %r' % hook_kind)
-
-    hooks = getattr(cmd_obj, hook_kind, None)
-
-    if hooks is None:
-        return
-
-    for hook in hooks.values():
-        if isinstance(hook, str):
-            try:
-                hook_obj = resolve_name(hook)
-            except ImportError:
-                err = sys.exc_info()[1] # For py3k
-                raise DistutilsModuleError('cannot find hook %s: %s' %
-                                           (hook,err))
-        else:
-            hook_obj = hook
-
-        if not hasattr(hook_obj, '__call__'):
-            raise DistutilsOptionError('hook %r is not callable' % hook)
-
-        log.info('running %s %s for command %s',
-                 hook_kind, hook, cmd_obj.get_command_name())
-
-        try :
-            hook_obj(cmd_obj)
-        except:
-            e = sys.exc_info()[1]
-            log.error('hook %s raised exception: %s\n' % (hook, e))
-            log.error(traceback.format_exc())
-            sys.exit(1)
-
-
-def has_get_option(config, section, option):
-    if section in config and option in config[section]:
-        return config[section][option]
-    elif section in config and option.replace('_', '-') in config[section]:
-        return config[section][option.replace('_', '-')]
-    else:
-        return False
-
-
-def split_multiline(value):
-    """Special behaviour when we have a multi line options"""
-
-    value = [element for element in
-             (line.strip() for line in value.split('\n'))
-             if element]
-    return value
-
-
-def split_csv(value):
-    """Special behaviour when we have a comma separated options"""
-
-    value = [element for element in
-             (chunk.strip() for chunk in value.split(','))
-             if element]
-    return value
-
-
-def monkeypatch_method(cls):
-    """A function decorator to monkey-patch a method of the same name on the
-    given class.
-    """
-
-    def wrapper(func):
-        orig = getattr(cls, func.__name__, None)
-        if orig and not hasattr(orig, '_orig'):  # Already patched
-            setattr(func, '_orig', orig)
-            setattr(cls, func.__name__, func)
-        return func
-
-    return wrapper
-
-
-# The following classes are used to hack Distribution.command_options a bit
-class DefaultGetDict(defaultdict):
-    """Like defaultdict, but the get() method also sets and returns the default
-    value.
-    """
-
-    def get(self, key, default=None):
-        if default is None:
-            default = self.default_factory()
-        return super(DefaultGetDict, self).setdefault(key, default)
-
-
-class IgnoreDict(dict):
-    """A dictionary that ignores any insertions in which the key is a string
-    matching any string in `ignore`.  The ignore list can also contain wildcard
-    patterns using '*'.
-    """
-
-    def __init__(self, ignore):
-        self.__ignore = re.compile(r'(%s)' % ('|'.join(
-                                   [pat.replace('*', '.*')
-                                    for pat in ignore])))
-
-    def __setitem__(self, key, val):
-        if self.__ignore.match(key):
-            return
-        super(IgnoreDict, self).__setitem__(key, val)
diff --git a/required_pkgs/d2to1/d2to1/zestreleaser.py b/required_pkgs/d2to1/d2to1/zestreleaser.py
deleted file mode 100644
index a2b6632..0000000
--- a/required_pkgs/d2to1/d2to1/zestreleaser.py
+++ /dev/null
@@ -1,161 +0,0 @@
-"""zest.releaser entry points to support projects using distutils2-like
-setup.cfg files.  The only actual functionality this adds is to update the
-version option in a setup.cfg file, if it exists.  If setup.cfg does not exist,
-or does not contain a version option, then this does nothing.
-
-TODO: d2to1 theoretically supports using a different filename for setup.cfg;
-this does not support that.  We could hack in support, though I'm not sure how
-useful the original functionality is to begin with (and it might be removed) so
-we ignore that for now.
-
-TODO: There exists a proposal
-(http://mail.python.org/pipermail/distutils-sig/2011-March/017628.html) to add
-a 'version-from-file' option (or something of the like) to distutils2; if this
-is added then support for it should be included here as well.
-"""
-
-
-import logging
-import os
-
-from .extern.six import print_
-from .extern.six import moves as m
-ConfigParser = m.configparser.ConfigParser
-
-
-logger = logging.getLogger(__name__)
-
-
-
-def update_setupcfg_version(filename, version):
-    """Opens the given setup.cfg file, locates the version option in the
-    [metadata] section, updates it to the new version.
-    """
-
-    setup_cfg = open(filename).readlines()
-    current_section = None
-    updated = False
-
-    for idx, line in enumerate(setup_cfg):
-        m = ConfigParser.SECTCRE.match(line)
-        if m:
-            if current_section == 'metadata':
-                # We already parsed the entire metadata section without finding
-                # a version line, and are now moving into a new section
-                break
-            current_section = m.group('header')
-            continue
-
-        if '=' not in line:
-            continue
-
-        opt, val = line.split('=', 1)
-        opt, val = opt.strip(), val.strip()
-        if current_section == 'metadata' and opt == 'version':
-            setup_cfg[idx] = 'version = %s\n' % version
-            updated = True
-            break
-
-    if updated:
-        open(filename, 'w').writelines(setup_cfg)
-        logger.info("Set %s's version to %r" % (os.path.basename(filename),
-                                                version))
-
-
-def prereleaser_middle(data):
-    filename = os.path.join(data['workingdir'], 'setup.cfg')
-    if os.path.exists(filename):
-        update_setupcfg_version(filename, data['new_version'])
-
-
-def releaser_middle(data):
-    """
-    releaser.middle hook to monkey-patch zest.releaser to support signed
-    tagging--currently this is the only way to do this.  Also monkey-patches to
-    disable an annoyance where zest.releaser only creates .zip source
-    distributions.  This is supposedly a workaround for a bug in Python 2.4,
-    but we don't care about Python 2.4.
-    """
-
-    import os
-    import sys
-
-    from zest.releaser.git import Git
-    from zest.releaser.release import Releaser
-
-    # Copied verbatim from zest.releaser, but with the cmd string modified to
-    # use the -s option to create a signed tag
-    def _my_create_tag(self, version):
-        msg = "Tagging %s" % (version,)
-        cmd = 'git tag -s %s -m "%s"' % (version, msg)
-        if os.path.isdir('.git/svn'):
-            print_("\nEXPERIMENTAL support for git-svn tagging!\n")
-            cur_branch = open('.git/HEAD').read().strip().split('/')[-1]
-            print_("You are on branch %s." % (cur_branch,))
-            if cur_branch != 'master':
-                print_("Only the master branch is supported for git-svn "
-                       "tagging.")
-                print_("Please tag yourself.")
-                print_("'git tag' needs to list tag named %s." % (version,))
-                sys.exit()
-            cmd = [cmd]
-            local_head = open('.git/refs/heads/master').read()
-            trunk = open('.git/refs/remotes/trunk').read()
-            if local_head != trunk:
-                print_("Your local master diverges from trunk.\n")
-                # dcommit before local tagging
-                cmd.insert(0, 'git svn dcommit')
-            # create tag in svn
-            cmd.append('git svn tag -m "%s" %s' % (msg, version))
-        return cmd
-
-    # Similarly copied from zer.releaser to support use of 'v' in front
-    # of the version number
-    def _my_make_tag(self):
-        from zest.releaser import utils
-        from os import system
-
-        if self.data['tag_already_exists']:
-            return
-        cmds = self.vcs.cmd_create_tag(self.data['version'])
-        if not isinstance(cmds, list):
-            cmds = [cmds]
-        if len(cmds) == 1:
-            print_("Tag needed to proceed, you can use the following command:")
-        for cmd in cmds:
-            print_(cmd)
-            if utils.ask("Run this command"):
-                print_(system(cmd))
-            else:
-                # all commands are needed in order to proceed normally
-                print_("Please create a tag for %s yourself and rerun." % \
-                        (self.data['version'],))
-                sys.exit()
-        if not self.vcs.tag_exists('v' + self.data['version']):
-            print_("\nFailed to create tag %s!" % (self.data['version'],))
-            sys.exit()
-
-    # Normally all this does is to return '--formats=zip', which is currently
-    # hard-coded as an option to always add to the sdist command; they ought to
-    # make this actually optional
-    def _my_sdist_options(self):
-        return ''
-
-    Git.cmd_create_tag = _my_create_tag
-    Releaser._make_tag = _my_make_tag
-    Releaser._sdist_options = _my_sdist_options
-
-
-def postreleaser_before(data):
-    """
-    Fix the irritating .dev0 default appended to new development versions by
-    zest.releaser to just append ".dev" without the "0".
-    """
-
-    data['dev_version_template'] = '%(new_version)s.dev'
-
-
-def postreleaser_middle(data):
-    filename = os.path.join(data['workingdir'], 'setup.cfg')
-    if os.path.exists(filename):
-        update_setupcfg_version(filename, data['dev_version'])
diff --git a/required_pkgs/d2to1/ez_setup.py b/required_pkgs/d2to1/ez_setup.py
deleted file mode 100644
index 8f11fa2..0000000
--- a/required_pkgs/d2to1/ez_setup.py
+++ /dev/null
@@ -1,264 +0,0 @@
-#!python
-"""Bootstrap setuptools installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
-    from ez_setup import use_setuptools
-    use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import shutil
-import sys
-import tempfile
-import tarfile
-import optparse
-import subprocess
-
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-DEFAULT_VERSION = "0.9.8"
-DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
-
-def _python_cmd(*args):
-    args = (sys.executable,) + args
-    return subprocess.call(args) == 0
-
-def _install(tarball, install_args=()):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # installing
-        log.warn('Installing Setuptools')
-        if not _python_cmd('setup.py', 'install', *install_args):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-            # exitcode will be 2
-            return 2
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-
-
-def _build_egg(egg, tarball, to_dir):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # building an egg
-        log.warn('Building a Setuptools egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        tarball = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, tarball, to_dir)
-    sys.path.insert(0, egg)
-
-    # Remove previously-imported pkg_resources if present (see
-    # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
-    if 'pkg_resources' in sys.modules:
-        del sys.modules['pkg_resources']
-
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                   to_dir=os.curdir, download_delay=15):
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    was_imported = 'pkg_resources' in sys.modules or \
-        'setuptools' in sys.modules
-    try:
-        import pkg_resources
-    except ImportError:
-        return _do_download(version, download_base, to_dir, download_delay)
-    try:
-        pkg_resources.require("setuptools>=" + version)
-        return
-    except pkg_resources.VersionConflict:
-        e = sys.exc_info()[1]
-        if was_imported:
-            sys.stderr.write(
-            "The required version of setuptools (>=%s) is not available,\n"
-            "and can't be installed while this script is running. Please\n"
-            "install a more recent version first, using\n"
-            "'easy_install -U setuptools'."
-            "\n\n(Currently using %r)\n" % (version, e.args[0]))
-            sys.exit(2)
-        else:
-            del pkg_resources, sys.modules['pkg_resources']    # reload ok
-            return _do_download(version, download_base, to_dir,
-                                download_delay)
-    except pkg_resources.DistributionNotFound:
-        return _do_download(version, download_base, to_dir,
-                            download_delay)
-
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                        to_dir=os.curdir, delay=15):
-    """Download setuptools from a specified location and return its filename
-
-    `version` should be a valid setuptools version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    tgz_name = "setuptools-%s.tar.gz" % version
-    url = download_base + tgz_name
-    saveto = os.path.join(to_dir, tgz_name)
-    src = dst = None
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        try:
-            log.warn("Downloading %s", url)
-            src = urlopen(url)
-            # Read/write all in one block, so we don't create a corrupt file
-            # if the download is interrupted.
-            data = src.read()
-            dst = open(saveto, "wb")
-            dst.write(data)
-        finally:
-            if src:
-                src.close()
-            if dst:
-                dst.close()
-    return os.path.realpath(saveto)
-
-
-def _extractall(self, path=".", members=None):
-    """Extract all members from the archive to the current working
-       directory and set owner, modification time and permissions on
-       directories afterwards. `path' specifies a different directory
-       to extract to. `members' is optional and must be a subset of the
-       list returned by getmembers().
-    """
-    import copy
-    import operator
-    from tarfile import ExtractError
-    directories = []
-
-    if members is None:
-        members = self
-
-    for tarinfo in members:
-        if tarinfo.isdir():
-            # Extract directories with a safe mode.
-            directories.append(tarinfo)
-            tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448  # decimal for oct 0700
-        self.extract(tarinfo, path)
-
-    # Reverse sort directories.
-    if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
-        directories.reverse()
-    else:
-        directories.sort(key=operator.attrgetter('name'), reverse=True)
-
-    # Set correct owner, mtime and filemode on directories.
-    for tarinfo in directories:
-        dirpath = os.path.join(path, tarinfo.name)
-        try:
-            self.chown(tarinfo, dirpath)
-            self.utime(tarinfo, dirpath)
-            self.chmod(tarinfo, dirpath)
-        except ExtractError:
-            e = sys.exc_info()[1]
-            if self.errorlevel > 1:
-                raise
-            else:
-                self._dbg(1, "tarfile: %s" % e)
-
-
-def _build_install_args(options):
-    """
-    Build the arguments to 'python setup.py install' on the setuptools package
-    """
-    install_args = []
-    if options.user_install:
-        if sys.version_info < (2, 6):
-            log.warn("--user requires Python 2.6 or later")
-            raise SystemExit(1)
-        install_args.append('--user')
-    return install_args
-
-def _parse_args():
-    """
-    Parse the command line for options
-    """
-    parser = optparse.OptionParser()
-    parser.add_option(
-        '--user', dest='user_install', action='store_true', default=False,
-        help='install in user site package (requires Python 2.6 or later)')
-    parser.add_option(
-        '--download-base', dest='download_base', metavar="URL",
-        default=DEFAULT_URL,
-        help='alternative URL from where to download the setuptools package')
-    options, args = parser.parse_args()
-    # positional arguments are ignored
-    return options
-
-def main(version=DEFAULT_VERSION):
-    """Install or upgrade setuptools and EasyInstall"""
-    options = _parse_args()
-    tarball = download_setuptools(download_base=options.download_base)
-    return _install(tarball, _build_install_args(options))
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/required_pkgs/d2to1/setup.cfg b/required_pkgs/d2to1/setup.cfg
deleted file mode 100644
index 6db07c6..0000000
--- a/required_pkgs/d2to1/setup.cfg
+++ /dev/null
@@ -1,52 +0,0 @@
-[metadata]
-name = d2to1
-version = 0.2.13.dev
-author = Erik M. Bray
-author-email = embray at stsci.edu
-summary = Allows using distutils2-like setup.cfg files for a package's metadata with a distribute/setuptools setup.py
-description-file = 
-	README.rst
-	CHANGES.rst
-home-page = http://pypi.python.org/pypi/d2to1
-requires-dist = setuptools
-classifier = 
-	Development Status :: 5 - Production/Stable
-	Environment :: Plugins
-	Framework :: Setuptools Plugin
-	Intended Audience :: Developers
-	License :: OSI Approved :: BSD License
-	Operating System :: OS Independent
-	Programming Language :: Python
-	Programming Language :: Python :: 3
-	Topic :: Software Development :: Build Tools
-	Topic :: Software Development :: Libraries :: Python Modules
-	Topic :: System :: Archiving :: Packaging
-
-[files]
-packages = 
-	d2to1
-	d2to1.extern
-extra_files = 
-	CHANGES.rst
-	LICENSE
-	ez_setup.py
-
-[backwards_compat]
-zip-safe = False
-tests-require = nose
-
-[entry_points]
-distutils.setup_keywords = 
-	d2to1 = d2to1.core:d2to1
-zest.releaser.prereleaser.middle = 
-	d2_version = d2to1.zestreleaser:prereleaser_middle
-zest.releaser.postreleaser.middle = 
-	d2_version = d2to1.zestreleaser:postreleaser_middle
-
-[test]
-test-suite = nose.collector
-
-[zest.releaser]
-releaser.middle = d2to1.zestreleaser.releaser_middle
-postreleaser.before = d2to1.zestreleaser.postreleaser_before
-
diff --git a/required_pkgs/d2to1/setup.py b/required_pkgs/d2to1/setup.py
deleted file mode 100755
index 74a10d4..0000000
--- a/required_pkgs/d2to1/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-try:
-    from setuptools import setup
-except ImportError:
-    from ez_setup import use_setuptools
-    use_setuptools()
-    from setuptools import setup
-
-# d2to1 basically installs itself!  See setup.cfg for the project metadata.
-from d2to1.util import cfg_to_args
-
-
-setup(**cfg_to_args())
diff --git a/required_pkgs/d2to1/tox.ini b/required_pkgs/d2to1/tox.ini
deleted file mode 100644
index d20127b..0000000
--- a/required_pkgs/d2to1/tox.ini
+++ /dev/null
@@ -1,11 +0,0 @@
-[tox]
-envlist = py25,py26,py27,py32,py33
-
-[testenv]
-deps =
-    nose
-    numpy
-    setuptools-subversion
-commands =
-    python setup.py test
-sitepackages = True
diff --git a/required_pkgs/stsci.distutils b/required_pkgs/stsci.distutils
new file mode 160000
index 0000000..8cfac18
--- /dev/null
+++ b/required_pkgs/stsci.distutils
@@ -0,0 +1 @@
+Subproject commit 8cfac18a276205afe90d67513de9af00eb1547c7
diff --git a/required_pkgs/stsci.distutils/CHANGES.txt b/required_pkgs/stsci.distutils/CHANGES.txt
deleted file mode 100644
index c9a6b2c..0000000
--- a/required_pkgs/stsci.distutils/CHANGES.txt
+++ /dev/null
@@ -1,171 +0,0 @@
-Changelog
-===========
-
-0.3.8 (unreleased)
-------------------
-
-- Nothing changed yet.
-
-
-0.3.7 (2013-12-23)
-------------------
-
-- Avoid using ``Popen.stdout`` directly in the version.py SVN revision
-  auto-update script to avoid possible ResourceWarnings on Python >= 3.2.
-  See https://github.com/spacetelescope/PyFITS/issues/45
-
-
-0.3.6 (2013-11-21)
-------------------
-
-- Fixed a syntax error in Python 3 that was introduced in 0.3.5.  This
-  could occur very early in the setup such that it bailed before even 2to3
-  could run on the rest of the package.
-
-
-0.3.5 (2013-11-18)
-------------------
-
-- Fixed an obscure issue that could occur when trying to install with
-  easy_install on Python 2 systems that have lib2to3 installed but have never
-  used it.
-
-
-0.3.4 (2013-07-31)
-------------------
-
-- Updated the check for ``__loader__`` added in v0.3.3 to only perform
-  that check on Python >= 3.3, since the issue doesn't apply to older
-  Python versions.
-
-
-0.3.3 (2013-07-25)
-------------------
-
-- Updated the import-time SVN revision update mechanism in the ``version.py``
-  module generated by the ``version_setup_hook`` to avoid running when not in
-  a dev version of the package.  This saves time on importing released
-  packages when installed on users' systems.
-
-- Added a workaround to a bug on Python 3.3 that could cause stsci.distutils
-  to crash during installation.
-
-
-0.3.2 (2013-03-27)
-------------------
-
-- Fixed a bug in the version hook that could occur if the svnversion command
-  fails.
-
-- Updated the template for the version.py module generated by the version hook
-  so that ``from .version import *`` will work for applications.
-
-- Added a ``__vdate__`` variable in version.py which may contain a release
-  date string by specifying a ``vdate`` option in the ``[metadata]`` section
-  of setup.cfg.
-
-- Added a ``stsci_distutils_version`` variable in version.py containing the
-  version of stsci.distutils used to generate the file--useful primarily for
-  debugging purposes.
-
-- Version 0.3.1 added a new zest.releaser hooks to ensure that source
-  distributes are created as tar.gz files instead of zip files--this was left
-  out of the changelog for 0.3.1.
-
-- The tar.gz zest.releaser hook is updated in 0.3.2 to only run on stsci
-  packages.
-
-
-0.3.1 (2012-06-28)
-------------------
-
-- Fixed a bug where console output from svn-related programs was assumed to be
-  ascii, leading to possible crashes on non-English systems.
-
-
-0.3 (2012-02-20)
-----------------
-
-- The ``glob_data_files`` hook became a pre-command hook for the install_data
-  command instead of being a setup-hook.  This is to support the additional
-  functionality of requiring data_files with relative destination paths to be
-  install relative to the package's install path (i.e. site-packages).
-
-- Dropped support for and deprecated the easier_install custom command.
-  Although it should still work, it probably won't be used anymore for
-  stsci_python packages.
-
-- Added support for the ``build_optional_ext`` command, which replaces/extends
-  the default ``build_ext`` command.  See the README for more details.
-
-- Added the ``tag_svn_revision`` setup_hook as a replacement for the
-  setuptools-specific tag_svn_revision option to the egg_info command.  This
-  new hook is easier to use than the old tag_svn_revision option: It's
-  automatically enabled by the presence of ``.dev`` in the version string, and
-  disabled otherwise.
-
-- The ``svn_info_pre_hook`` and ``svn_info_post_hook`` have been replaced with
-  ``version_pre_command_hook`` and ``version_post_command_hook`` respectively.
-  However, a new ``version_setup_hook``, which has the same purpose, has been
-  added.  It is generally easier to use and will give more consistent results
-  in that it will run every time setup.py is run, regardless of which command
-  is used.  ``stsci.distutils`` itself uses this hook--see the `setup.cfg` file
-  and `stsci/distutils/__init__.py` for example usage.
-
-- Instead of creating an `svninfo.py` module, the new ``version_`` hooks create
-  a file called `version.py`.  In addition to the SVN info that was included
-  in `svninfo.py`, it includes a ``__version__`` variable to be used by the
-  package's `__init__.py`.  This allows there to be a hard-coded
-  ``__version__`` variable included in the source code, rather than using
-  pkg_resources to get the version.
-
-- In `version.py`, the variables previously named ``__svn_version__`` and
-  ``__full_svn_info__`` are now named ``__svn_revision__`` and
-  ``__svn_full_info__``.
-
-- Fixed a bug when using stsci.distutils in the installation of other packages
-  in the ``stsci.*`` namespace package.  If stsci.distutils was not already
-  installed, and was downloaded automatically by distribute through the
-  setup_requires option, then ``stsci.distutils`` would fail to import.  This
-  is because the way the namespace package (nspkg) mechanism currently works,
-  all packages belonging to the nspkg *must* be on the import path at initial
-  import time.
-
-  So when installing stsci.tools, for example, if ``stsci.tools`` is imported
-  from within the source code at install time, but before ``stsci.distutils``
-  is downloaded and added to the path, the ``stsci`` package is already
-  imported and can't be extended to include the path of ``stsci.distutils``
-  after the fact.  The easiest way of dealing with this, it seems, is to
-  delete ``stsci`` from ``sys.modules``, which forces it to be reimported, now
-  the its ``__path__`` extended to include ``stsci.distutil``'s path.
-
-- Added zest.releaser hooks for tweaking the development version string
-  template, and for uploading new releases to STScI's local package index.
-
-
-0.2.2 (2011-11-09)
-------------------
-
-- Fixed check for the issue205 bug on actual setuptools installs; before it
-  only worked on distribute.  setuptools has the issue205 bug prior to version
-  0.6c10.
-
-- Improved the fix for the issue205 bug, especially on setuptools.
-  setuptools, prior to 0.6c10, did not back of sys.modules either before
-  sandboxing, which causes serious problems.  In fact, it's so bad that it's
-  not enough to add a sys.modules backup to the current sandbox: It's in fact
-  necessary to monkeypatch setuptools.sandbox.run_setup so that any subsequent
-  calls to it also back up sys.modules.
-
-
-0.2.1 (2011-09-02)
-------------------
-
-- Fixed the dependencies so that setuptools is requirement but 'distribute'
-  specifically.  Previously installation could fail if users had plain
-  setuptools installed and not distribute
-
-0.2 (2011-08-23)
-------------------
-
-- Initial public release
diff --git a/required_pkgs/stsci.distutils/LICENSE.txt b/required_pkgs/stsci.distutils/LICENSE.txt
deleted file mode 100644
index 7e8019a..0000000
--- a/required_pkgs/stsci.distutils/LICENSE.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (C) 2005 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-      endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-
diff --git a/required_pkgs/stsci.distutils/MANIFEST.in b/required_pkgs/stsci.distutils/MANIFEST.in
deleted file mode 100644
index cc2f756..0000000
--- a/required_pkgs/stsci.distutils/MANIFEST.in
+++ /dev/null
@@ -1,2 +0,0 @@
-include CHANGES.txt
-include tox.ini
diff --git a/required_pkgs/stsci.distutils/README.txt b/required_pkgs/stsci.distutils/README.txt
deleted file mode 100644
index 9f454f6..0000000
--- a/required_pkgs/stsci.distutils/README.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-Introduction
-============
-This package contains utilities used to package some of STScI's Python
-projects; specifically those projects that comprise stsci_python_ and
-Astrolib_.
-
-It currently consists mostly of some setup_hook scripts meant for use with
-`distutils2/packaging`_ and/or d2to1_, and a customized easy_install command
-meant for use with distribute_.
-
-This package is not meant for general consumption, though it might be worth
-looking at for examples of how to do certain things with your own packages, but
-YMMV.
-
-Features
-========
-
-Hook Scripts
-------------
-Currently the main features of this package are a couple of setup_hook scripts.
-In distutils2, a setup_hook is a script that runs at the beginning of any
-pysetup command, and can modify the package configuration read from setup.cfg.
-There are also pre- and post-command hooks that only run before/after a
-specific setup command (eg. build_ext, install) is run.
-
-stsci.distutils.hooks.use_packages_root
-'''''''''''''''''''''''''''''''''''''''
-If using the ``packages_root`` option under the ``[files]`` section of
-setup.cfg, this hook will add that path to ``sys.path`` so that modules in your
-package can be imported and used in setup.  This can be used even if
-``packages_root`` is not specified--in this case it adds ``''`` to
-``sys.path``.
-
-stsci.distutils.hooks.version_setup_hook
-''''''''''''''''''''''''''''''''''''''''
-Creates a Python module called version.py which currently contains four
-variables:
-
-* ``__version__`` (the release version)
-* ``__svn_revision__`` (the SVN revision info as returned by the ``svnversion``
-  command)
-* ``__svn_full_info__`` (as returned by the ``svn info`` command)
-* ``__setup_datetime__`` (the date and time that setup.py was last run).
-
-These variables can be imported in the package's ``__init__.py`` for degugging
-purposes.  The version.py module will *only* be created in a package that
-imports from the version module in its ``__init__.py``.  It should be noted
-that this is generally preferable to writing these variables directly into
-``__init__.py``, since this provides more control and is less likely to
-unexpectedly break things in ``__init__.py``.
-
-stsci.distutils.hooks.version_pre_command_hook
-''''''''''''''''''''''''''''''''''''''''''''''
-Identical to version_setup_hook, but designed to be used as a pre-command
-hook.
-
-stsci.distutils.hooks.version_post_command_hook
-'''''''''''''''''''''''''''''''''''''''''''''''
-The complement to version_pre_command_hook.  This will delete any version.py
-files created during a build in order to prevent them from cluttering an SVN
-working copy (note, however, that version.py is *not* deleted from the build/
-directory, so a copy of it is still preserved).  It will also not be deleted
-if the current directory is not an SVN working copy.  For example, if source
-code extracted from a source tarball it will be preserved.
-
-stsci.distutils.hooks.tag_svn_revision
-''''''''''''''''''''''''''''''''''''''
-A setup_hook to add the SVN revision of the current working copy path to the
-package version string, but only if the version ends in .dev.
-
-For example, ``mypackage-1.0.dev`` becomes ``mypackage-1.0.dev1234``.  This is
-in accordance with the version string format standardized by PEP 386.
-
-This should be used as a replacement for the ``tag_svn_revision`` option to
-the egg_info command.  This hook is more compatible with packaging/distutils2,
-which does not include any VCS support.  This hook is also more flexible in
-that it turns the revision number on/off depending on the presence of ``.dev``
-in the version string, so that it's not automatically added to the version in
-final releases.
-
-This hook does require the ``svnversion`` command to be available in order to
-work.  It does not examine the working copy metadata directly.
-
-stsci.distutils.hooks.numpy_extension_hook
-''''''''''''''''''''''''''''''''''''''''''
-This is a pre-command hook for the build_ext command.  To use it, add a
-``[build_ext]`` section to your setup.cfg, and add to it::
-
-    pre-hook.numpy-extension-hook = stsci.distutils.hooks.numpy_extension_hook
-
-This hook must be used to build extension modules that use Numpy.   The primary
-side-effect of this hook is to add the correct numpy include directories to
-`include_dirs`.  To use it, add 'numpy' to the 'include-dirs' option of each
-extension module that requires numpy to build.  The value 'numpy' will be
-replaced with the actual path to the numpy includes.
-
-stsci.distutils.hooks.is_display_option
-'''''''''''''''''''''''''''''''''''''''
-This is not actually a hook, but is a useful utility function that can be used
-in writing other hooks.  Basically, it returns ``True`` if setup.py was run
-with a "display option" such as --version or --help.  This can be used to
-prevent your hook from running in such cases.
-
-stsci.distutils.hooks.glob_data_files
-'''''''''''''''''''''''''''''''''''''
-A pre-command hook for the install_data command.  Allows filename wildcards as
-understood by ``glob.glob()`` to be used in the data_files option.  This hook
-must be used in order to have this functionality since it does not normally
-exist in distutils.
-
-This hook also ensures that data files are installed relative to the package
-path.  data_files shouldn't normally be installed this way, but the
-functionality is required for a few special cases.
-
-
-Commands
---------
-build_optional_ext
-''''''''''''''''''
-This serves as an optional replacement for the default built_ext command,
-which compiles C extension modules.  Its purpose is to allow extension modules
-to be *optional*, so that if their build fails the rest of the package is
-still allowed to be built and installed.  This can be used when an extension
-module is not definitely required to use the package.
-
-To use this custom command, add::
-
-    commands = stsci.distutils.command.build_optional_ext.build_optional_ext
-
-under the ``[global]`` section of your package's setup.cfg.  Then, to mark
-an individual extension module as optional, under the setup.cfg section for
-that extension add::
-
-    optional = True
-
-Optionally, you may also add a custom failure message by adding::
-
-    fail_message = The foobar extension module failed to compile.
-                   This could be because you lack such and such headers.
-                   This package will still work, but such and such features
-                   will be disabled.
-
-
-.. _stsci_python: http://www.stsci.edu/resources/software_hardware/pyraf/stsci_python
-.. _Astrolib: http://www.scipy.org/AstroLib/
-.. _distutils2/packaging: http://distutils2.notmyidea.org/
-.. _d2to1: http://pypi.python.org/pypi/d2to1
-.. _distribute: http://pypi.python.org/pypi/distribute
diff --git a/required_pkgs/stsci.distutils/distribute_setup.py b/required_pkgs/stsci.distutils/distribute_setup.py
deleted file mode 100644
index 8f5b063..0000000
--- a/required_pkgs/stsci.distutils/distribute_setup.py
+++ /dev/null
@@ -1,515 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
-    from distribute_setup import use_setuptools
-    use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-try:
-    import subprocess
-
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        return subprocess.call(args) == 0
-
-except ImportError:
-    # will be used for python 2.3
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        # quoting arguments if windows
-        if sys.platform == 'win32':
-            def quote(arg):
-                if ' ' in arg:
-                    return '"%s"' % arg
-                return arg
-            args = [quote(arg) for arg in args]
-        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.28"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball, install_args=()):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # installing
-        log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install', *install_args):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-    finally:
-        os.chdir(old_wd)
-
-
-def _build_egg(egg, tarball, to_dir):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # building an egg
-        log.warn('Building a Distribute egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
-    finally:
-        os.chdir(old_wd)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        tarball = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, tarball, to_dir)
-    sys.path.insert(0, egg)
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                   to_dir=os.curdir, download_delay=15, no_fake=True):
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    was_imported = 'pkg_resources' in sys.modules or \
-        'setuptools' in sys.modules
-    try:
-        try:
-            import pkg_resources
-            if not hasattr(pkg_resources, '_distribute'):
-                if not no_fake:
-                    _fake_setuptools()
-                raise ImportError
-        except ImportError:
-            return _do_download(version, download_base, to_dir, download_delay)
-        try:
-            pkg_resources.require("distribute>=" + version)
-            return
-        except pkg_resources.VersionConflict:
-            e = sys.exc_info()[1]
-            if was_imported:
-                sys.stderr.write(
-                "The required version of distribute (>=%s) is not available,\n"
-                "and can't be installed while this script is running. Please\n"
-                "install a more recent version first, using\n"
-                "'easy_install -U distribute'."
-                "\n\n(Currently using %r)\n" % (version, e.args[0]))
-                sys.exit(2)
-            else:
-                del pkg_resources, sys.modules['pkg_resources']    # reload ok
-                return _do_download(version, download_base, to_dir,
-                                    download_delay)
-        except pkg_resources.DistributionNotFound:
-            return _do_download(version, download_base, to_dir,
-                                download_delay)
-    finally:
-        if not no_fake:
-            _create_fake_setuptools_pkg_info(to_dir)
-
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                        to_dir=os.curdir, delay=15):
-    """Download distribute from a specified location and return its filename
-
-    `version` should be a valid distribute version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    tgz_name = "distribute-%s.tar.gz" % version
-    url = download_base + tgz_name
-    saveto = os.path.join(to_dir, tgz_name)
-    src = dst = None
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        try:
-            log.warn("Downloading %s", url)
-            src = urlopen(url)
-            # Read/write all in one block, so we don't create a corrupt file
-            # if the download is interrupted.
-            data = src.read()
-            dst = open(saveto, "wb")
-            dst.write(data)
-        finally:
-            if src:
-                src.close()
-            if dst:
-                dst.close()
-    return os.path.realpath(saveto)
-
-
-def _no_sandbox(function):
-    def __no_sandbox(*args, **kw):
-        try:
-            from setuptools.sandbox import DirectorySandbox
-            if not hasattr(DirectorySandbox, '_old'):
-                def violation(*args):
-                    pass
-                DirectorySandbox._old = DirectorySandbox._violation
-                DirectorySandbox._violation = violation
-                patched = True
-            else:
-                patched = False
-        except ImportError:
-            patched = False
-
-        try:
-            return function(*args, **kw)
-        finally:
-            if patched:
-                DirectorySandbox._violation = DirectorySandbox._old
-                del DirectorySandbox._old
-
-    return __no_sandbox
-
-
-def _patch_file(path, content):
-    """Will backup the file then patch it"""
-    existing_content = open(path).read()
-    if existing_content == content:
-        # already patched
-        log.warn('Already patched.')
-        return False
-    log.warn('Patching...')
-    _rename_path(path)
-    f = open(path, 'w')
-    try:
-        f.write(content)
-    finally:
-        f.close()
-    return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-
-def _same_content(path, content):
-    return open(path).read() == content
-
-
-def _rename_path(path):
-    new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s into %s', path, new_name)
-    os.rename(path, new_name)
-    return new_name
-
-
-def _remove_flat_installation(placeholder):
-    if not os.path.isdir(placeholder):
-        log.warn('Unkown installation at %s', placeholder)
-        return False
-    found = False
-    for file in os.listdir(placeholder):
-        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
-            found = True
-            break
-    if not found:
-        log.warn('Could not locate setuptools*.egg-info')
-        return
-
-    log.warn('Removing elements out of the way...')
-    pkg_info = os.path.join(placeholder, file)
-    if os.path.isdir(pkg_info):
-        patched = _patch_egg_dir(pkg_info)
-    else:
-        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
-    if not patched:
-        log.warn('%s already patched.', pkg_info)
-        return False
-    # now let's move the files out of the way
-    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
-        element = os.path.join(placeholder, element)
-        if os.path.exists(element):
-            _rename_path(element)
-        else:
-            log.warn('Could not find the %s element of the '
-                     'Setuptools distribution', element)
-    return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-
-def _after_install(dist):
-    log.warn('After install bootstrap.')
-    placeholder = dist.get_command_obj('install').install_purelib
-    _create_fake_setuptools_pkg_info(placeholder)
-
-
-def _create_fake_setuptools_pkg_info(placeholder):
-    if not placeholder or not os.path.exists(placeholder):
-        log.warn('Could not find the install location')
-        return
-    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
-    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
-            (SETUPTOOLS_FAKED_VERSION, pyver)
-    pkg_info = os.path.join(placeholder, setuptools_file)
-    if os.path.exists(pkg_info):
-        log.warn('%s already exists', pkg_info)
-        return
-
-    if not os.access(pkg_info, os.W_OK):
-        log.warn("Don't have permissions to write %s, skipping", pkg_info)
-
-    log.warn('Creating %s', pkg_info)
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-
-    pth_file = os.path.join(placeholder, 'setuptools.pth')
-    log.warn('Creating %s', pth_file)
-    f = open(pth_file, 'w')
-    try:
-        f.write(os.path.join(os.curdir, setuptools_file))
-    finally:
-        f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(
-    _create_fake_setuptools_pkg_info
-)
-
-
-def _patch_egg_dir(path):
-    # let's check if it's already patched
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    if os.path.exists(pkg_info):
-        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
-            log.warn('%s already patched.', pkg_info)
-            return False
-    _rename_path(path)
-    os.mkdir(path)
-    os.mkdir(os.path.join(path, 'EGG-INFO'))
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-    return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-
-def _before_install():
-    log.warn('Before install bootstrap.')
-    _fake_setuptools()
-
-
-def _under_prefix(location):
-    if 'install' not in sys.argv:
-        return True
-    args = sys.argv[sys.argv.index('install') + 1:]
-    for index, arg in enumerate(args):
-        for option in ('--root', '--prefix'):
-            if arg.startswith('%s=' % option):
-                top_dir = arg.split('root=')[-1]
-                return location.startswith(top_dir)
-            elif arg == option:
-                if len(args) > index:
-                    top_dir = args[index + 1]
-                    return location.startswith(top_dir)
-        if arg == '--user' and USER_SITE is not None:
-            return location.startswith(USER_SITE)
-    return True
-
-
-def _fake_setuptools():
-    log.warn('Scanning installed packages')
-    try:
-        import pkg_resources
-    except ImportError:
-        # we're cool
-        log.warn('Setuptools or Distribute does not seem to be installed.')
-        return
-    ws = pkg_resources.working_set
-    try:
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools', replacement=False)
-            )
-    except TypeError:
-        # old distribute API
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools')
-        )
-
-    if setuptools_dist is None:
-        log.warn('No setuptools distribution found')
-        return
-    # detecting if it was already faked
-    setuptools_location = setuptools_dist.location
-    log.warn('Setuptools installation detected at %s', setuptools_location)
-
-    # if --root or --preix was provided, and if
-    # setuptools is not located in them, we don't patch it
-    if not _under_prefix(setuptools_location):
-        log.warn('Not patching, --root or --prefix is installing Distribute'
-                 ' in another location')
-        return
-
-    # let's see if its an egg
-    if not setuptools_location.endswith('.egg'):
-        log.warn('Non-egg installation')
-        res = _remove_flat_installation(setuptools_location)
-        if not res:
-            return
-    else:
-        log.warn('Egg installation')
-        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
-        if (os.path.exists(pkg_info) and
-            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
-            log.warn('Already patched.')
-            return
-        log.warn('Patching...')
-        # let's create a fake egg replacing setuptools one
-        res = _patch_egg_dir(setuptools_location)
-        if not res:
-            return
-    log.warn('Patched done.')
-    _relaunch()
-
-
-def _relaunch():
-    log.warn('Relaunching...')
-    # we have to relaunch the process
-    # pip marker to avoid a relaunch bug
-    _cmd = ['-c', 'install', '--single-version-externally-managed']
-    if sys.argv[:3] == _cmd:
-        sys.argv[0] = 'setup.py'
-    args = [sys.executable] + sys.argv
-    sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
-    """Extract all members from the archive to the current working
-       directory and set owner, modification time and permissions on
-       directories afterwards. `path' specifies a different directory
-       to extract to. `members' is optional and must be a subset of the
-       list returned by getmembers().
-    """
-    import copy
-    import operator
-    from tarfile import ExtractError
-    directories = []
-
-    if members is None:
-        members = self
-
-    for tarinfo in members:
-        if tarinfo.isdir():
-            # Extract directories with a safe mode.
-            directories.append(tarinfo)
-            tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448  # decimal for oct 0700
-        self.extract(tarinfo, path)
-
-    # Reverse sort directories.
-    if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
-        directories.reverse()
-    else:
-        directories.sort(key=operator.attrgetter('name'), reverse=True)
-
-    # Set correct owner, mtime and filemode on directories.
-    for tarinfo in directories:
-        dirpath = os.path.join(path, tarinfo.name)
-        try:
-            self.chown(tarinfo, dirpath)
-            self.utime(tarinfo, dirpath)
-            self.chmod(tarinfo, dirpath)
-        except ExtractError:
-            e = sys.exc_info()[1]
-            if self.errorlevel > 1:
-                raise
-            else:
-                self._dbg(1, "tarfile: %s" % e)
-
-
-def _build_install_args(argv):
-    install_args = []
-    user_install = '--user' in argv
-    if user_install and sys.version_info < (2, 6):
-        log.warn("--user requires Python 2.6 or later")
-        raise SystemExit(1)
-    if user_install:
-        install_args.append('--user')
-    return install_args
-
-
-def main(argv, version=DEFAULT_VERSION):
-    """Install or upgrade setuptools and EasyInstall"""
-    tarball = download_setuptools()
-    _install(tarball, _build_install_args(argv))
-
-
-if __name__ == '__main__':
-    main(sys.argv[1:])
diff --git a/required_pkgs/stsci.distutils/docs/Makefile b/required_pkgs/stsci.distutils/docs/Makefile
deleted file mode 100644
index a4427eb..0000000
--- a/required_pkgs/stsci.distutils/docs/Makefile
+++ /dev/null
@@ -1,153 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = build
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html       to make standalone HTML files"
-	@echo "  dirhtml    to make HTML files named index.html in directories"
-	@echo "  singlehtml to make a single large HTML file"
-	@echo "  pickle     to make pickle files"
-	@echo "  json       to make JSON files"
-	@echo "  htmlhelp   to make HTML files and a HTML help project"
-	@echo "  qthelp     to make HTML files and a qthelp project"
-	@echo "  devhelp    to make HTML files and a Devhelp project"
-	@echo "  epub       to make an epub"
-	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
-	@echo "  text       to make text files"
-	@echo "  man        to make manual pages"
-	@echo "  texinfo    to make Texinfo files"
-	@echo "  info       to make Texinfo files and run them through makeinfo"
-	@echo "  gettext    to make PO message catalogs"
-	@echo "  changes    to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck  to check all external links for integrity"
-	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	-rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
-	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
-	@echo
-	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/stscidistutils.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/stscidistutils.qhc"
-
-devhelp:
-	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
-	@echo
-	@echo "Build finished."
-	@echo "To view the help file:"
-	@echo "# mkdir -p $$HOME/.local/share/devhelp/stscidistutils"
-	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/stscidistutils"
-	@echo "# devhelp"
-
-epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	@echo
-	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make' in that directory to run these through (pdf)latex" \
-	      "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through pdflatex..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
-	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
-	@echo
-	@echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
-	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
-	@echo
-	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo
-	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
-	@echo "Run \`make' in that directory to run these through makeinfo" \
-	      "(use \`make info' here to do that automatically)."
-
-info:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo "Running Texinfo files through makeinfo..."
-	make -C $(BUILDDIR)/texinfo info
-	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
-	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
-	@echo
-	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/required_pkgs/stsci.distutils/docs/source/api.rst b/required_pkgs/stsci.distutils/docs/source/api.rst
deleted file mode 100644
index 2e28715..0000000
--- a/required_pkgs/stsci.distutils/docs/source/api.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-API documentation
-=================
-
-stsci.distutils.hooks
----------------------
-.. automodule:: stsci.distutils.hooks
-   :members:
-
-stsci.distutils.svnutils
-------------------------
-.. automodule:: stsci.distutils.svnutils
-   :members:
-
-stsci.distutils.versionutils
-----------------------------
-.. automodule:: stsci.distutils.versionutils
-   :members:
diff --git a/required_pkgs/stsci.distutils/docs/source/changelog.rst b/required_pkgs/stsci.distutils/docs/source/changelog.rst
deleted file mode 100644
index 61c2860..0000000
--- a/required_pkgs/stsci.distutils/docs/source/changelog.rst
+++ /dev/null
@@ -1 +0,0 @@
-.. include:: ../../CHANGES.txt
diff --git a/required_pkgs/stsci.distutils/docs/source/conf.py b/required_pkgs/stsci.distutils/docs/source/conf.py
deleted file mode 100644
index bf1bbab..0000000
--- a/required_pkgs/stsci.distutils/docs/source/conf.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# stsci.distutils documentation build configuration file, created by
-# sphinx-quickstart on Wed Jan 16 15:48:41 2013.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-from stsci.sphinxext.conf import *
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
-
-# -- General configuration -----------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions += []
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'stsci.distutils'
-copyright = u'2013, Erik Bray, Mark Sienkiewicz, Christine Slocum'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '0.3.2'
-# The full version, including alpha/beta/rc tags.
-release = '0.3.2'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
-#html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'stscidistutilsdoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-  ('index', 'stscidistutils.tex', u'stsci.distutils Documentation',
-   u'Erik Bray, Mark Sienkiewicz, Christine Slocum', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output --------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
-    ('index', 'stscidistutils', u'stsci.distutils Documentation',
-     [u'Erik Bray, Mark Sienkiewicz, Christine Slocum'], 1)
-]
-
-# If true, show URL addresses after external links.
-#man_show_urls = False
-
-
-# -- Options for Texinfo output ------------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-#  dir menu entry, description, category)
-texinfo_documents = [
-  ('index', 'stscidistutils', u'stsci.distutils Documentation',
-   u'Erik Bray, Mark Sienkiewicz, Christine Slocum', 'stscidistutils', 'One line description of project.',
-   'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
-
-# If false, no module index is generated.
-#texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
diff --git a/required_pkgs/stsci.distutils/docs/source/index.rst b/required_pkgs/stsci.distutils/docs/source/index.rst
deleted file mode 100644
index e34484b..0000000
--- a/required_pkgs/stsci.distutils/docs/source/index.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-.. stsci.distutils documentation master file, created by
-   sphinx-quickstart on Wed Jan 16 15:48:41 2013.
-   You can adapt this file completely to your liking, but it should at least
-   contain the root `toctree` directive.
-
-:tocdepth: 3
-
-*******************************************
-Welcome to stsci.distutils's documentation!
-*******************************************
-
-.. include:: ../../README.txt
-
-Appendix
-========
-
-.. toctree::
-   :maxdepth: 2
-
-   api
-   changelog
-
diff --git a/required_pkgs/stsci.distutils/setup.cfg b/required_pkgs/stsci.distutils/setup.cfg
deleted file mode 100644
index 999f7a0..0000000
--- a/required_pkgs/stsci.distutils/setup.cfg
+++ /dev/null
@@ -1,45 +0,0 @@
-[metadata]
-name = stsci.distutils
-version = 0.3.8.dev
-author = Erik M. Bray
-author-email = embray at stsci.edu
-home-page = http://www.stsci.edu/resources/software_hardware/stsci_python
-summary = distutils/packaging-related utilities used by some of STScI's packages
-description-file = 
-    README.txt
-    CHANGES.txt
-requires-python = >=2.5
-requires-dist = 
-    setuptools
-    d2to1 (>=0.2.5)
-classifier = 
-    Development Status :: 3 - Alpha
-    Intended Audience :: Developers
-    License :: OSI Approved :: BSD License
-    Programming Language :: Python
-    Topic :: Scientific/Engineering
-    Topic :: Software Development :: Build Tools
-    Topic :: Software Development :: Libraries :: Python Modules
-    Topic :: System :: Archiving :: Packaging
-
-[files]
-packages = 
-    stsci
-    stsci.distutils
-    stsci.distutils.command
-    stsci.distutils.tests
-
-[global]
-setup-hooks = 
-    stsci.distutils.hooks.tag_svn_revision
-    stsci.distutils.hooks.version_setup_hook
-
-[backwards_compat]
-use-2to3 = True
-zip-safe = False
-
-[entry_points]
-zest.releaser.releaser.before =
-    fix_sdist_format = stsci.distutils.release:fix_sdist_format
-zest.releaser.postreleaser.before =
-    fix_dev_version_template = stsci.distutils.release:fix_dev_version_template
diff --git a/required_pkgs/stsci.distutils/setup.py b/required_pkgs/stsci.distutils/setup.py
deleted file mode 100755
index a41bdfc..0000000
--- a/required_pkgs/stsci.distutils/setup.py
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/env python
-
-try:
-    from setuptools import setup
-except ImportError:
-    from distribute_setup import use_setuptools
-    use_setuptools()
-    from setuptools import setup
-
-import sys
-
-from pkg_resources import get_distribution, Requirement
-
-
-# If stsci.distutils is being used to install another package in the stsci
-# namespace package, we may need to first re-import the stsci package so that
-# all the entries (including the current path) are added to stsci.__path__
-# Deleting 'stsci' from sys.modules will force such a re-import.
-if 'stsci' in sys.modules:
-    del sys.modules['stsci']
-
-
-# This is a workaround for http://bugs.python.org/setuptools/issue20; most
-# packages that have this package as a setup-requirement also have d2to1 as
-# a setup-requirement, which can lead to bugginess.
-# See also http://mail.python.org/pipermail/distutils-sig/2011-May/017812.html
-# for a description of the problem (in my example, package_A is d2to1 and
-# package_B is stsci.distutils).
-# This issue was fixed in distribute 0.6.17 and in setuptools 0.6c10, but
-# leaving in support for older versions for now.
-requirements = [Requirement.parse('setuptools<0.6c10'),
-                Requirement.parse('distribute<0.6.19')]
-# Distribution will actually convert a requirement for any setuptools version
-# to a requirement for distribute, so if distribute is in use the first
-# requirement is useless.  setuptools does something similar: yes, setuptools
-# and distribute are actually antagonistic toward each other--ridiculous.
-if requirements[0].key == requirements[1].key:
-    del requirements[0]
-try:
-    # Note: If distribute is installed get_distribution('setuptools') returns
-    # the installed distribute distribution
-    has_issue205 = any([get_distribution('setuptools') in req
-                        for req in requirements])
-except:
-    has_issue205 = False
-
-if has_issue205:
-    import sys
-    from pkg_resources import working_set
-    from setuptools import sandbox
-    from setuptools.command import easy_install
-
-    # Monkey patch setuptools so that subsequent calls to run_setup also
-    # have this patch:
-    _old_run_setup = sandbox.run_setup
-    def run_setup(setup_script, args):
-        save_entries = working_set.entries[:]
-        save_entry_keys = working_set.entry_keys.copy()
-        save_by_key = working_set.by_key.copy()
-        save_modules = sys.modules.copy()
-        try:
-            _old_run_setup(setup_script, args)
-        finally:
-            working_set.entries = save_entries
-            working_set.entry_keys = save_entry_keys
-            working_set.by_key = save_by_key
-            sys.modules.update(save_modules)
-            for key in list(sys.modules):
-                if key not in save_modules:
-                    del sys.modules[key]
-    sandbox.run_setup = run_setup
-    easy_install.run_setup = run_setup
-
-    # Patch the current call to run_setup
-    save_entries = working_set.entries[:]
-    save_entry_keys = working_set.entry_keys.copy()
-    save_by_key = working_set.by_key.copy()
-    save_modules = sys.modules.copy()
-try:
-    setup(
-        setup_requires=['d2to1>=0.2.9'],
-        namespace_packages=['stsci'], packages=['stsci'],
-        d2to1=True,
-    )
-finally:
-    if has_issue205:
-        working_set.entries = save_entries
-        working_set.entry_keys = save_entry_keys
-        working_set.by_key = save_by_key
-        sys.modules.update(save_modules)
-        for key in list(sys.modules):
-            if key not in save_modules:
-                del sys.modules[key]
diff --git a/required_pkgs/stsci.distutils/stsci/__init__.py b/required_pkgs/stsci.distutils/stsci/__init__.py
deleted file mode 100644
index e6e3521..0000000
--- a/required_pkgs/stsci.distutils/stsci/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-try:
-    # As long as we're using setuptools/distribute, we need to do this the
-    # setuptools way or else pkg_resources will throw up unncessary and
-    # annoying warnings (even though the namespace mechanism will still
-    # otherwise work without it).
-    # Get rid of this as soon as setuptools/distribute is dead.
-    __import__('pkg_resources').declare_namespace(__name__)
-except ImportError:
-    pass
-__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/__init__.py b/required_pkgs/stsci.distutils/stsci/distutils/__init__.py
deleted file mode 100644
index 74d23cd..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-try:
-    from .version import (__version__, __svn_revision__, __svn_full_info__,
-                          __setup_datetime__)
-except ImportError:
-    # If this happens, I hope this import is happening in the
-    # stsci.distutils setup.py because we are about to use
-    # stsci.distutils.hooks to install stsci.distutils
-    __version__ = 'unspecified during initial install'
-    __svn_revision__ = ''
-    __svn_full_info__ = ''
-    __setup_datetime__ = None
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/astutils.py b/required_pkgs/stsci.distutils/stsci/distutils/astutils.py
deleted file mode 100644
index 97dc06d..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/astutils.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""AST Visitors
-
-Currently only uses one for collecting a list of import statements.
-Unfortunately two versions of this have to be implemented: One for 2.6 and up
-and a different version for 2.5.
-"""
-
-
-import os
-import sys
-
-from distutils import log
-
-try:
-    import ast # Python >= 2.6
-
-    def walk(filename, visitor):
-        """Generate an AST for the given filename and walk over it using
-        the given visitor instance.
-        """
-
-        filename = os.path.abspath(filename)
-
-        try:
-            tree = ast.parse(open(filename, 'r').read())
-        except SyntaxError:
-            if sys.version_info[0] < 3:
-                e = sys.exc_info()[1]
-                log.warn('SyntaxError while parsing file %s: %s' %
-                         (filename, str(e)))
-                return
-            # We're probably in Python 3 and looking at a file intended for
-            # Python 2.  Otherwise there's an unintended SyntaxError in the
-            # file, so there are bigger problems anyways
-            try:
-                import lib2to3.refactor
-
-                tool = StringRefactoringTool(
-                    lib2to3.refactor.get_fixers_from_package('lib2to3.fixes'))
-                tool.refactor_file(filename, write=True)
-                tree = ast.parse(tool.refactored[filename])
-            except ImportError:
-                # Without 2to3 we can't do much more.
-                # TODO: Issue a warning?
-                return
-
-        visitor.visit(tree)
-
-
-    class ImportVisitor(ast.NodeVisitor):
-        def __init__(self):
-            self.imports = set()
-            self.importfroms = set()
-
-        def visit_Import(self, node):
-            for name in node.names:
-                self.imports.add((name.name, name.asname))
-
-        def visit_ImportFrom(self, node):
-            for name in node.names:
-                self.importfroms.add((node.module, name.name, name.asname))
-
-except ImportError:
-    import compiler
-
-    def walk(filename, visitor):
-        tree = compiler.parseFile(filename)
-        compiler.walk(tree, visitor)
-
-    class ImportVisitor(compiler.visitor.ASTVisitor):
-        def __init__(self):
-            self.imports = set()
-            self.importfroms = set()
-
-        def visitImport(self, node):
-            for name in node.names:
-                self.imports.add(name)
-
-        def visitFrom(self, node):
-            for name in node.names:
-                self.importfroms.add((node.modname, name[0], name[1]))
-
-
-if sys.version_info[0] >= 3:
-    try:
-        import lib2to3.refactor
-
-        class StringRefactoringTool(lib2to3.refactor.RefactoringTool):
-            """A RefactoringTool that saves refactored files as strings in the
-            self.refactored dict rather than outputting to actual files.
-
-            This is used in case we're running in Python 3 and need to refactor
-            a file before parsing its syntax tree.
-            """
-
-            def __init__(self, fixer_names, options=None, explicit=None):
-                super(StringRefactoringTool, self).__init__(fixer_names,
-                                                            options,
-                                                            explicit)
-                self.refactored = {}
-
-            def write_file(self, new_text, filename, old_text, encoding=None):
-                self.refactored[filename] = new_text
-
-    except ImportError:
-        pass
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/command/__init__.py b/required_pkgs/stsci.distutils/stsci/distutils/command/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/command/build_optional_ext.py b/required_pkgs/stsci.distutils/stsci/distutils/command/build_optional_ext.py
deleted file mode 100644
index 378fe56..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/command/build_optional_ext.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from distutils import log
-from distutils.command.build_ext import build_ext
-from distutils.errors import DistutilsError, CCompilerError, CompileError
-from distutils.util import strtobool
-
-try:
-    from ConfigParser import ConfigParser
-except ImportError:
-    from configparser import ConfigParser
-
-class build_optional_ext(build_ext):
-    """This is a version of the build_ext command that allows specific
-    extensions to be marked as 'optional'.  If an optional extension fails to
-    build, a warning message is displayed but the build does not cancel.
-
-    It should be noted that this functionality already exists in the Python3
-    versions of distutils and packaging, but we must provide backwards
-    compatibility for it.
-    """
-
-    command_name = 'build_ext'
-
-    def _find_optional_extensions(self, extensions):
-        """Reads the setup.cfg to determine which extensions were set as
-        'optional'.  Optionally, developers may also provide a warning message
-        to display (otherwise a default message is used).  This is one way in
-        which this command improves on the existing functionality on Python 3.
-        """
-
-        # TODO: However, support for the 'optional' extension attribute should
-        # be included in d2to1, since it's a legitimate backwards compatibility
-        # issue.  But until I do another release it will have to be supported
-        # here.
-
-        cfg = ConfigParser()
-        try:
-            cfg.read('setup.cfg')
-        except Exception as e:
-            log.warn('Failed to read setup.cfg: %s; proceeding as though '
-                     'there are no optional extensions' % e)
-            return
-
-        # Map extension names to extension objects
-        extensions = dict((ext.name, ext) for ext in extensions)
-
-        for section in cfg.sections():
-            if not section.startswith('extension='):
-                continue
-
-            # The extension name can be specified with the 'name' option, but
-            # if that's missing the name is taken from the section header for
-            # now
-            if cfg.has_option(section, 'name'):
-                name = cfg.get(section, 'name')
-            else:
-                _, name = section.split('=', 1)
-
-            if name not in extensions:
-                # Could happen, for example, if a setup_hook disabled some
-                # extensions
-                continue
-
-            ext = extensions[name]
-
-            if cfg.has_option(section, 'optional'):
-                ext._optional = strtobool(cfg.get(section, 'optional'))
-            else:
-                ext._optional = False
-
-            if cfg.has_option(section, 'fail_message'):
-                ext._fail_message = cfg.get(section, 'fail_message')
-
-    def check_extensions_list(self, extensions):
-        build_ext.check_extensions_list(self, extensions)
-        self._find_optional_extensions(extensions)
-
-    def build_extension(self, ext):
-        try:
-            build_ext.build_extension(self, ext)
-        except (CCompilerError, DistutilsError, CompileError) as e:
-            if not hasattr(ext, '_optional') or not ext._optional:
-                raise
-            log.warn('building optional extension "%s" failed: %s' %
-                     (ext.name, e))
-            if hasattr(ext, '_fail_message'):
-                log.warn(ext._fail_message)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/command/easier_install.py b/required_pkgs/stsci.distutils/stsci/distutils/command/easier_install.py
deleted file mode 100644
index 3bb1d21..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/command/easier_install.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import os
-
-try:
-    from ConfigParser import ConfigParser
-except ImportError:
-    # This is necessary for when stsci.distutils is being bootstrapped by other
-    # setup.pys in stsci_python--otherwise the ConfigParser import would be
-    # converted by d2to1
-    from configparser import ConfigParser
-from distutils import log
-
-import pkg_resources
-
-from setuptools.command.easy_install import easy_install
-from setuptools.package_index import PackageIndex
-
-
-def distro_from_setup_cfg(filename):
-    """
-    Read a source checkout's distutils2 setup.cfg and create a Distribution for
-    that checkout.
-
-    filename can either be the path to the setup.cfg itself, or checkout
-    directory containing the setup.cfg.
-    """
-
-    if os.path.isdir(filename):
-        path = filename
-        filename = os.path.join(filename, 'setup.cfg')
-        if not os.path.exists(filename):
-            return None
-    else:
-        path, basename = os.path.split(filename)
-        if basename != 'setup.cfg':
-            return None
-    cfg = ConfigParser()
-    cfg.read(filename)
-    if not cfg.has_option('metadata', 'name'):
-        return None
-    name = cfg.get('metadata', 'name')
-    if cfg.has_option('metadata', 'version'):
-        version = cfg.get('metadata', 'version')
-    else:
-        version = None
-    return pkg_resources.Distribution(
-               location=path, project_name=name, version=version,
-               precedence=pkg_resources.CHECKOUT_DIST)
-
-
-class LocalSourcesPackageIndex(PackageIndex):
-    """
-    Like PackageIndex, but can also install packages from local source
-    checkouts, the locations of which are added via add_find_links().
-
-    Although PackageIndex supports installing for source distributions on the
-    local filesystem, they must be in a tar/zip/etc.  This allows installing
-    from an existing source checkout on the local filesystem.
-    """
-
-    def process_filename(self, fn, nested=False):
-        PackageIndex.process_filename(self, fn, nested)
-        dist = distro_from_setup_cfg(fn)
-        if dist:
-            self.add(dist)
-
-    def fetch_distribution(self, requirement, tmpdir, force_scan=False,
-            source=False, develop_ok=False, local_index=None):
-        distribute_req = pkg_resources.Requirement.parse('distribute>=0.6.14')
-        if pkg_resources.get_distribution('distribute') in distribute_req:
-            # The local_index parameter is only in distribute>=0.6.14
-            dist = PackageIndex.fetch_distribution(self, requirement, tmpdir,
-                                                   force_scan, source,
-                                                   develop_ok, local_index)
-        else:
-            dist = PackageIndex.fetch_distribution(self, requirement, tmpdir,
-                                                   force_scan, source,
-                                                   develop_ok)
-        if dist:
-            log.info('Using %s from %s' % (dist, dist.location))
-        return dist
-
-
-class easier_install(easy_install):
-    """
-    Extension to the easy_install command that uses LocalSourcesPackageIndex as
-    its default PackageIndex implementation.
-    """
-
-    command_name = 'easy_install'
-
-    create_index = LocalSourcesPackageIndex
-
-    def process_distribution(self, requirement, dist, deps=True, *info):
-        """This version of process_distribution will force the package index to
-        search for local distributions before going out to PyPI when processing
-        a package's dependency.
-
-        It will already do this for the first dependency, but not for
-        subsequent dependencies--something I would consider a bug.
-        """
-
-        if self.package_index.to_scan is None:
-            self.package_index.to_scan = []
-        return easy_install.process_distribution(self, requirement, dist, deps,
-                                                 *info)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/hooks.py b/required_pkgs/stsci.distutils/stsci/distutils/hooks.py
deleted file mode 100644
index a6317a0..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/hooks.py
+++ /dev/null
@@ -1,476 +0,0 @@
-from __future__ import with_statement
-
-import datetime
-import glob
-import os
-import string
-import sys
-
-from distutils import log
-
-
-try:
-    from packaging.util import split_multiline
-except ImportError:
-    try:
-        from distutils2.util import split_multiline
-    except ImportError:
-        from d2to1.util import split_multiline
-
-
-try:
-    # python 2
-    reload
-except NameError:
-    # python 3
-    from imp import reload
-
-if sys.version_info[0] < 3:
-    string_types = basestring
-else:
-    string_types = str
-
-from .svnutils import get_svn_info, get_svn_version
-from .versionutils import (package_uses_version_py, clean_version_py,
-                           update_setup_datetime, VERSION_PY_TEMPLATE)
-
-# For each version.py that we create, we will note the version of
-# stsci.distutils (this package) that created it.  The problem is
-# when the package being installed is stsci.distutils -- we don't
-# know the version yet.  So, if we can't import the version (because
-# it does not exist yet), we declare it to be None and special case
-# it later.
-try :
-    from . import version as my_version
-except ImportError :
-    my_version = None
-
-
-def is_display_option(ignore=None):
-    """A hack to test if one of the arguments passed to setup.py is a display
-    argument that should just display a value and exit.  If so, don't bother
-    running this hook (this capability really ought to be included with
-    distutils2).
-
-    Optionally, ignore may contain a list of display options to ignore in this
-    check.  Each option in the ignore list must contain the correct number of
-    dashes.
-    """
-
-    from setuptools.dist import Distribution
-
-    # If there were no arguments in argv (aside from the script name) then this
-    # is an implied display opt
-    if len(sys.argv) < 2:
-        return True
-
-    display_opts = ['--command-packages', '--help', '-h']
-
-    for opt in Distribution.display_options:
-        display_opts.append('--' + opt[0])
-
-    for arg in sys.argv:
-        if arg in display_opts and arg not in ignore:
-            return True
-
-    return False
-
-
-# TODO: With luck this can go away soon--packaging now supports adding the cwd
-# to sys.path for running setup_hooks.  But it also needs to support adding
-# packages_root.  Also, it currently does not support adding cwd/packages_root
-# to sys.path for pre/post-command hooks, so that needs to be fixed.
-def use_packages_root(config):
-    """
-    Adds the path specified by the 'packages_root' option, or the current path
-    if 'packages_root' is not specified, to sys.path.  This is particularly
-    useful, for example, to run setup_hooks or add custom commands that are in
-    your package's source tree.
-
-    Use this when the root of your package source tree is not in
-    the same directory with the setup.py
-
-    Config Usage::
-
-        [files]
-        packages_root = lib
-        # for installing pkgname from lib/pkgname/*.py
-
-        [global]
-        setup_hooks = stsci.distutils.hooks.use_packages_root
-
-    """
-
-    if 'files' in config and 'packages_root' in config['files']:
-        root = config['files']['packages_root']
-    else:
-        root = ''
-
-    if root not in sys.path:
-        if root and sys.path[0] == '':
-            sys.path.insert(1, root)
-        else:
-            sys.path.insert(0, root)
-
-    # Reload the stsci namespace package in case any new paths can be added to
-    # it from the new sys.path entry; depending on how the namespace packages
-    # are installed this may fail--specifically if it's using the old
-    # setuptools-based .pth approach there is not importable package called
-    # 'stsci' that can be imported by itself.
-    if 'stsci' in sys.modules:
-        mod = sys.modules['stsci']
-        if sys.version_info[:2] >= (3, 3) and not hasattr(mod, '__loader__'):
-            # Workaround for Python bug #17099 on Python 3.3, where reload()
-            # crashes if a module doesn't have an __loader__ attribute
-            del sys.modules['stsci']
-            try:
-                import stsci
-            except ImportError:
-                pass
-        else:
-            try :
-                reload(sys.modules['stsci'])
-            except ImportError:
-                # doesn't seem to bother anything when this reload() fails
-                pass
-
-
-def tag_svn_revision(config):
-    """
-    A setup_hook to add the SVN revision of the current working copy path to
-    the package version string, but only if the version ends in .dev.
-
-    For example, ``mypackage-1.0.dev`` becomes ``mypackage-1.0.dev1234``.  This
-    is in accordance with the version string format standardized by PEP 386.
-
-    This should be used as a replacement for the ``tag_svn_revision`` option to
-    the egg_info command.  This hook is more compatible with
-    packaging/distutils2, which does not include any VCS support.  This hook is
-    also more flexible in that it turns the revision number on/off depending on
-    the presence of ``.dev`` in the version string, so that it's not
-    automatically added to the version in final releases.
-
-    This hook does require the ``svnversion`` command to be available in order
-    to work.  It does not examine the working copy metadata directly.
-
-
-    Config Usage::
-
-        [global]
-        setup_hooks = stsci.distutils.hooks.tag_svn_revision
-
-    You should write exactly this in your package's ``__init__.py``::
-
-        from .version import *
-
-    """
-
-    if 'metadata' in config and 'version' in config['metadata']:
-        metadata = config['metadata']
-        version = metadata['version']
-
-        # Don't add an svn revision unless the version ends with .dev
-        if not version.endswith('.dev'):
-            return
-
-        # First try to get the revision by checking for it in an existing
-        # .version module
-        package_dir = config.get('files', {}).get('packages_root', '')
-        packages = config.get('files', {}).get('packages', '')
-        packages = split_multiline(packages)
-        rev = None
-        for package in packages:
-            version_py = package_uses_version_py(package_dir, package)
-            if not version_py:
-                continue
-            try:
-                mod = __import__(package + '.version',
-                                 fromlist='__svn_revision__')
-            except ImportError:
-                mod = None
-            if mod is not None and hasattr(mod, '__svn_revision__'):
-                rev = mod.__svn_revision__
-                break
-
-        # Cleanup
-        for modname in list(sys.modules):
-            if modname == package or modname.startswith(package + '.'):
-                del sys.modules[modname]
-
-        if rev is None:
-            # A .version module didn't exist or was incomplete; try calling
-            # svnversion directly
-            rev = get_svn_version()
-
-        if not rev:
-            return
-        if ':' in rev:
-            rev, _ = rev.split(':', 1)
-        while rev and rev[-1] not in string.digits:
-            rev = rev[:-1]
-        try:
-            rev = int(rev)
-        except (ValueError, TypeError):
-            return
-
-        metadata['version'] ='%s%d' % (version, rev)
-
-
-def _version_hook(function_name, package_dir, packages, name, version, vdate):
-    """This command hook creates an version.py file in each package that
-    requires it.  This is by determining if the package's ``__init__.py`` tries
-    to import or import from the version module.
-
-    version.py will not be created in packages that don't use it.  It should
-    only be used by the top-level package of the project.
-
-    Don't use this function directly--instead use :func:`version_setup_hook` or
-    :func:`version_pre_command_hook` which know how to retrieve the required
-    metadata depending on the context they are run in.
-
-    Not called directly from the config file.  See :func:`version_setup_hook`.
-
-    """
-
-    # Strip any revision info from version; that will be handled separately
-    if '-' in version:
-        version = version.split('-', 1)[0]
-
-    for package in packages:
-        version_py = package_uses_version_py(package_dir, package)
-        if not version_py:
-            continue
-
-        rev = get_svn_version()
-        if ((not rev or not rev[0] in string.digits) and
-                os.path.exists(version_py)):
-            # If were unable to determine an SVN revision and the version.py
-            # already exists, just update the __setup_datetime__ and leave the
-            # rest of the file untouched
-            update_setup_datetime(version_py)
-            return
-        elif rev is None:
-            rev = 'Unable to determine SVN revision'
-
-        svn_info = get_svn_info()
-
-        # Wrap version, rev, and svn_info in str() to ensure that Python 2
-        # unicode literals aren't written, which will break things in Python 3
-        template_variables = {
-                'hook_function': function_name,
-                'name': name,
-                'version': str(version),
-                'vdate': str(vdate),
-                'svn_revision': str(rev),
-                'svn_full_info': str(svn_info),
-                'setup_datetime': datetime.datetime.now(),
-        }
-
-        # my_version is version.py for the stsci.distutils package.
-        # It can be None if we are called during the install of
-        # stsci.distutils; we are creating the version.py, so it was
-        # not available to import yet.  If this is what is happening,
-        # we construct it specially.
-        if my_version is None :
-            if  package == 'stsci.distutils' :
-                template_variables['stsci_distutils_version'] = version
-            else:
-                # It should never happen that version.py does not
-                # exist when we are installing any other package.
-                raise RuntimeError('Internal consistency error')
-        else :
-            template_variables['stsci_distutils_version'] = \
-                    my_version.__version__
-
-        with open(version_py, 'w') as f:
-            f.write(VERSION_PY_TEMPLATE % template_variables)
-
-
-def version_setup_hook(config):
-    """Creates a Python module called version.py which contains these variables:
-
-    * ``__version__`` (the release version)
-    * ``__svn_revision__`` (the SVN revision info as returned by the ``svnversion``
-      command)
-    * ``__svn_full_info__`` (as returned by the ``svn info`` command)
-    * ``__setup_datetime__`` (the date and time that setup.py was last run).
-    * ``__vdate__`` (the release date)
-
-    These variables can be imported in the package's ``__init__.py`` for
-    degugging purposes.  The version.py module will *only* be created in a
-    package that imports from the version module in its ``__init__.py``.  It
-    should be noted that this is generally preferable to writing these
-    variables directly into ``__init__.py``, since this provides more control
-    and is less likely to unexpectedly break things in ``__init__.py``.
-
-    Config Usage::
-
-        [global]
-        setup-hooks = stsci.distutils.hooks.version_setup_hook
-
-    You should write exactly this in your package's ``__init__.py``::
-
-        from .version import *
-
-    """
-
-    if is_display_option(ignore=['--version']):
-        return
-
-    name = config['metadata'].get('name')
-    version = config['metadata'].get('version', '0.0.0')
-    vdate = config['metadata'].get('vdate', 'unspecified')
-    package_dir = config.get('files', {}).get('packages_root', '')
-    packages = config.get('files', {}).get('packages', '')
-
-    packages = split_multiline(packages)
-
-    _version_hook(__name__ + '.version_setup_hook', package_dir, packages,
-                 name, version, vdate)
-
-
-def version_pre_command_hook(command_obj):
-    """
-    .. deprecated:: 0.3
-        Use :func:`version_setup_hook` instead; it's generally safer to
-        check/update the version.py module on every setup.py run instead of on
-        specific commands.
-
-    This command hook creates an version.py file in each package that requires
-    it.  This is by determining if the package's ``__init__.py`` tries to
-    import or import from the version module.
-
-    version.py will not be created in packages that don't use it.  It should
-    only be used by the top-level package of the project.
-    """
-
-    if is_display_option():
-        return
-
-    package_dir = command_obj.distribution.package_dir.get('', '.')
-    packages = command_obj.distribution.packages
-    name = command_obj.distribution.metadata.name
-    version = command_obj.distribution.metadata.version
-
-    _version_hook(__name__ + '.version_pre_command_hook',package_dir, packages,
-                 name, version, vdate=None)
-
-
-def version_post_command_hook(command_obj):
-    """
-    .. deprecated:: 0.3
-        This hook was meant to complement :func:`version_pre_command_hook`,
-        also deprecated.
-
-    Cleans up a previously generated version.py in order to avoid
-    clutter.
-
-    Only removes the file if we're in an SVN working copy and the file is not
-    already under version control.
-    """
-
-    package_dir = command_obj.distribution.package_dir.get('', '.')
-    packages = command_obj.distribution.packages
-
-    for package in packages:
-        clean_version_py(package_dir, package)
-
-
-def numpy_extension_hook(command_obj):
-    """A distutils2 pre-command hook for the build_ext command needed for
-    building extension modules that use NumPy.
-
-    To use this hook, add 'numpy' to the list of include_dirs in setup.cfg
-    section for an extension module.  This hook will replace 'numpy' with the
-    necessary numpy header paths in the include_dirs option for that extension.
-
-    Note: Although this function uses numpy, stsci.distutils does not depend on
-    numpy.  It is up to the distribution that uses this hook to require numpy
-    as a dependency.
-
-    Config Usage::
-
-        [extension=mypackage.extmod]
-        sources =
-            foo.c
-            bar.c
-        include_dirs = numpy
-
-        [build_ext]
-        pre-hook.numpy-extension = stsci.distutils.hooks.numpy_extension_hook
-    """
-
-    command_name = command_obj.get_command_name()
-    if command_name != 'build_ext':
-        log.warn('%s is meant to be used with the build_ext command only; '
-                 'it is not for use with the %s command.' %
-                 (__name__, command_name))
-    try:
-        import numpy
-    except ImportError:
-        # It's virtually impossible to automatically install numpy through
-        # setuptools; I've tried.  It's not pretty.
-        # Besides, we don't want users complaining that our software doesn't
-        # work just because numpy didn't build on their system.
-        sys.stderr.write('\n\nNumpy is required to build this package.\n'
-                         'Please install Numpy on your system first.\n\n')
-        sys.exit(1)
-
-    includes = [numpy.get_include()]
-    #includes = [numpy.get_numarray_include(), numpy.get_include()]
-    for extension in command_obj.extensions:
-        if 'numpy' not in extension.include_dirs:
-            continue
-        idx = extension.include_dirs.index('numpy')
-        for inc in includes:
-            extension.include_dirs.insert(idx, inc)
-        extension.include_dirs.remove('numpy')
-
-
-def glob_data_files(command_obj):
-    """A pre-command hook for the install_data command allowing wildcard
-    patterns to be used in the data_files option.
-
-    Also ensures that data files with relative paths as their targets are
-    installed relative install_lib.
-
-    Config Usage::
-
-        [files]
-        data_files =
-            target_directory = source_directory/*.foo
-            other_target_directory = other_source_directory/*
-
-        [install_data]
-        pre-hook.glob-data-files = stsci.distutils.hooks.glob_data_files
-
-
-    """
-
-    command_name = command_obj.get_command_name()
-    if command_name != 'install_data':
-        log.warn('%s is meant to be used with the install_data command only; '
-                 'it is not for use with the %s command.' %
-                 (__name__, command_name))
-
-    data_files = command_obj.data_files
-
-    for idx, val in enumerate(data_files[:]):
-        if isinstance(val, string_types):
-            # Support the rare, deprecated case where just a filename is given
-            filenames = glob.glob(val)
-            del data_files[idx]
-            data_files.extend(filenames)
-            continue
-
-        dest, filenames = val
-        filenames = sum((glob.glob(item) for item in filenames), [])
-        data_files[idx] = (dest, filenames)
-
-    # Ensure the correct install dir; this is the default behavior for
-    # installing with distribute, but when using
-    # --single-version-externally-managed we need to to tweak this
-    install_cmd = command_obj.get_finalized_command('install')
-    if command_obj.install_dir == install_cmd.install_data:
-        install_lib_cmd = command_obj.get_finalized_command('install_lib')
-        command_obj.install_dir = install_lib_cmd.install_dir
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/release.py b/required_pkgs/stsci.distutils/stsci/distutils/release.py
deleted file mode 100644
index 80c9aec..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/release.py
+++ /dev/null
@@ -1,195 +0,0 @@
-from __future__ import print_function
-
-"""Hooks for zest.releaser specifically for STScI software"""
-
-
-import glob
-import os
-import shutil
-import sys
-
-if sys.version_info[0] < 3:
-    from ConfigParser import ConfigParser
-    input = raw_input
-else:
-    from configParser import ConfigParser
-
-from setuptools.dist import Distribution
-from zest.releaser.utils import ask
-
-
-DEFAULT_PACKAGE_INDEX_PATH = '/eng/ssb/web/download/packages'
-PACKAGE_INDEX_URL = 'http://stsdas.stsci.edu/download/packages/index'
-
-
-def is_stsci_project(workingdir):
-    """
-    Returns True if the product being released is from STScI and is using the
-    d2to1 + stsci.distutils build/install platform.
-
-    This is determined via some basic introspection of the project layout;
-    namely that it contains a setup.cfg, and that the author-email value
-    contains '@stsci.edu'.  It's ham-fisted but it should do for now.
-    """
-
-    setup_cfg = os.path.join(workingdir, 'setup.cfg')
-    if not os.path.exists(setup_cfg):
-        return False
-
-    cfg = ConfigParser()
-    cfg.read(setup_cfg)
-    if cfg.has_option('metadata', 'author-email'):
-        author_email = cfg.get('metadata', 'author-email')
-    elif cfg.has_option('metadata', 'author_email'):
-        author_email = cfg.get('metadata', 'author_email')
-    else:
-        author_email = ''
-
-    return '@stsci.edu' in author_email
-
-
-def fix_dev_version_template(data):
-    """
-    A postreleaser.before hook to change the dev_version_template from the
-    annoying default of 'x.y.z.dev0' to just 'x.y.z.dev' without the 0.
-    """
-
-    if not is_stsci_project(data['workingdir']):
-        return
-
-    data['dev_version_template'] = '%(new_version)s.dev'
-
-
-def fix_sdist_format(data):
-    """
-    Recent versions of zest.releaser have an annoyance that it creates .zip
-    sdists instead of .tar.gz.  This is supposedly to work around a bug in
-    Python 2.4 with .tar.gz sdists, but none of our software supports Python
-    2.4 anyways.
-
-    Unfortunately the only way to disable this behavior, for now, is with
-    monkey-patching zest.releaser.
-    """
-
-    if not is_stsci_project(data['workingdir']):
-        return
-
-    from zest.releaser.release import Releaser
-
-    def _my_sdist_options(self):
-        return ''
-
-    Releaser._sdist_options = _my_sdist_options
-
-
-# TODO: This package index is no longer being maintained, so this hook can
-# be removed in the next version or so.  I don't think anyone is using it
-# though.
-def add_to_stsci_package_index(data):
-    """
-    A releaser.after hook to copy the source distribution to STScI's local
-    package index and update the index using basketweaver.
-    """
-
-    if not is_stsci_project(data['workingdir']):
-        return
-
-    if not data['tagdir'] or not os.path.exists(data['tagdir']):
-        # Do nothing if a tag checkout was not performed
-        return
-
-    if not ask('Copy source package to STScI package index'):
-        return
-
-    package_path = DEFAULT_PACKAGE_INDEX_PATH
-    if not os.path.exists(package_path):
-        package_path = ''
-
-    question = 'Path to package directory'
-    if package_path:
-        # A default exists; let the user know
-        question += ' [%s]' % package_path
-    question += ': '
-
-    answer = ''
-    while not answer:
-        try:
-            answer = input(question).strip()
-            if not answer:
-                if package_path:
-                    # The user simple pressed enter, so use the supplied
-                    # default
-                    answer = package_path
-                else:
-                    continue
-            if not os.path.exists(answer):
-                print ('The supplied path %s does not exist.  Please enter a '
-                       'different path or press Ctrl-C to cancel.' % answer)
-            if not os.access(answer, os.W_OK):
-                print ('The supplied path %s is not writeable.  Either change '
-                       'the permissions of the directory or have someone '
-                       'grant you access and try again, enter a different '
-                       'directory, or press Ctrl-C to cancel.' % answer)
-            package_path = answer
-            break
-            # The default was not supplied, so keep asking
-        except KeyboardInterrupt:
-            return
-
-    # A tag checkout was made and an sdist created, this is where it would be
-    # (the sdist is a .zip on Windows, a .tar.gz elsewhere--normally this
-    # should be .tar.gz; don't make releases on Windows)
-    sdist_file = ''
-    while not sdist_file:
-        try:
-            sdist_file = glob.glob(os.path.join(data['tagdir'], 'dist',
-                                                '*.tar.gz'))[0]
-        except IndexError:
-            try:
-                sdist_file = glob.glob(os.path.join(data['tagdir'], 'dist',
-                                                    '*.zip'))[0]
-            except IndexError:
-                try:
-                    print (
-                        "Could not find a source distribution in %s; did you "
-                        "do a source checkout for upload?  If possible, try "
-                        "to cd to %s and manually create a source "
-                        "distribution by running `python setup.py sdist`.  "
-                        "Then press enter to try again (or hit Ctrl-C to "
-                        "cancel).  Go ahead, I'll wait..." %
-                        (data['tagdir'], data['tagdir']))
-                    input()
-                except KeyboardInterrupt:
-                    return
-
-    # Almost ready go to--now we just need to check if basketweaver is
-    # available, and get it if not.
-    try:
-        import basketweaver.makeindex
-    except ImportError:
-        # Use setuptools' machinery to fetch a package and add it to the path;
-        # we could do this without using setuptools directly, but it would
-        # basically end up reimplementing much of the same code.
-        dist = Distribution({'dependency_links': [PACKAGE_INDEX_URL]})
-        try:
-            dist.fetch_build_eggs(['basketweaver'])
-        except:
-            # There are so many things that could possibly go wrong here...
-            print ('Failed to get basketweaver, which is required to rebuild '
-                   'the package index.  To manually complete the release, '
-                   'install basketweaver manually, then copy %s into %s, cd '
-                   'to %s, and then run `makeindex *`, where makeindex is the '
-                   'command installed by basketweaver.' %
-                   (sdist_file, package_path, package_path))
-        import basketweaver.makeindex
-
-    # Now we should have everything we need...
-    shutil.copy(sdist_file, package_path)
-    old_cwd = os.getcwd()
-    os.chdir(package_path)
-    try:
-        basketweaver.makeindex.main(glob.glob('*'))
-    finally:
-        os.chdir(old_cwd)
-
-    print ('Finished adding package to %s.' % PACKAGE_INDEX_URL)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/svnutils.py b/required_pkgs/stsci.distutils/stsci/distutils/svnutils.py
deleted file mode 100644
index b2bf332..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/svnutils.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""Functions for getting and saving SVN info for distribution."""
-
-
-from __future__ import with_statement
-
-import os
-import subprocess
-
-def get_svn_version(path='.'):
-    """Uses ``svnversion`` to get just the latest revision at the given
-    path.
-    """
-
-    try:
-        pipe = subprocess.Popen(['svnversion', path], stdout=subprocess.PIPE,
-                                stderr=subprocess.PIPE)
-    except OSError:
-        return None
-
-    if pipe.wait() != 0:
-        return None
-
-    return pipe.stdout.read().decode('latin1').strip()
-
-
-def get_svn_info(path='.'):
-    """Uses ``svn info`` to get the full information about the working copy at
-    the given path.
-    """
-
-    path = os.path.abspath(path)
-
-    try:
-        pipe = subprocess.Popen(['svn', 'info', path], stdout=subprocess.PIPE,
-                                stderr=subprocess.PIPE)
-        # stderr is redirected in order to squelch it.  Later Python versions
-        # have subprocess.DEVNULL for this purpose, but it's not available in
-        # 2.5
-    except OSError:
-        return 'unknown'
-
-    if pipe.wait() != 0:
-        return 'unknown'
-
-    lines = []
-    for line in pipe.stdout.readlines():
-        line = line.decode('latin1').strip()
-        if not line:
-            continue
-        if line.startswith('Path:'):
-            line = 'Path: %s' % os.path.basename(path)
-        lines.append(line)
-
-    if not lines:
-        return 'unknown'
-
-    return '\n'.join(lines)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/__init__.py b/required_pkgs/stsci.distutils/stsci/distutils/tests/__init__.py
deleted file mode 100644
index c94fdfc..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/__init__.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-
-import nose
-
-from .util import reload, rmtree
-
-
-TESTPACKAGE_URL = ('https://svn.stsci.edu/svn/ssb/stsci_python/'
-                   'stsci.distutils/trunk/stsci/distutils/tests/testpackage')
-TESTPACKAGE_REV = '17597'  # The last known 'good' revision of this package
-
-
-class StsciDistutilsTestCase(object):
-    @classmethod
-    def setup_class(cls):
-        cls.wc_dir = tempfile.mkdtemp(prefix='stsci-distutils-test-')
-        try:
-            p = subprocess.Popen(['svn', '-r', TESTPACKAGE_REV,
-                                  '--non-interactive', '--trust-server-cert',
-                                  'checkout', TESTPACKAGE_URL, cls.wc_dir],
-                                  stdout=subprocess.PIPE,
-                                  stderr=subprocess.PIPE)
-        except OSError as e:
-            raise nose.SkipTest('svn unavailable to checkout out test '
-                                'package: %s' % e)
-
-        if p.wait() != 0:
-            raise nose.SkipTest('svn failed to check out the test package: '
-                                '%s; tests will not be able to run' %
-                                p.stderr.read().decode('latin1'))
-
-    @classmethod
-    def teardown_class(cls):
-        rmtree(cls.wc_dir)
-
-    def setup(self):
-        self.temp_dir = tempfile.mkdtemp(prefix='stsci-distutils-test-')
-        self.package_dir = os.path.join(self.temp_dir, 'testpackage')
-        shutil.copytree(self.wc_dir, self.package_dir)
-        self.oldcwd = os.getcwd()
-        os.chdir(self.package_dir)
-
-        # We need to manually add the test package's path to the stsci
-        # package's __path__ since it's already been imported.
-        if 'stsci' in sys.modules:
-            # Clean the existing __path__ up
-            reload(sys.modules['stsci'])
-            sys.modules['stsci'].__path__.insert(
-                0, os.path.join(self.package_dir, 'stsci'))
-
-    def teardown(self):
-        os.chdir(self.oldcwd)
-        # Remove stsci.testpackage from sys.modules so that it can be freshly
-        # re-imported by the next test
-        for k in list(sys.modules):
-            if k == 'stsci.testpackage' or k.startswith('stsci.testpackage.'):
-                del sys.modules[k]
-        rmtree(self.temp_dir)
-
-    def run_setup(self, *args):
-        return self._run_cmd(sys.executable, ('setup.py',) + args)
-
-    def run_svn(self, *args):
-        return self._run_cmd('svn', args)
-
-    def _run_cmd(self, cmd, args):
-        """
-        Runs a command, with the given argument list, in the root of the test
-        working copy--returns the stdout and stderr streams and the exit code
-        from the subprocess.
-        """
-
-        os.chdir(self.package_dir)
-        p = subprocess.Popen([cmd] + list(args), stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
-
-        streams = tuple(s.decode('latin1').strip() for s in p.communicate())
-
-        return (streams) + (p.returncode,)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/test_commands.py b/required_pkgs/stsci.distutils/stsci/distutils/tests/test_commands.py
deleted file mode 100644
index 5b4ef9f..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/test_commands.py
+++ /dev/null
@@ -1,71 +0,0 @@
-from __future__ import with_statement
-
-import os
-import sys
-
-try:
-    import numpy
-except ImportError:
-    numpy = None
-
-from nose import SkipTest
-
-from . import StsciDistutilsTestCase
-from .util import get_compiler_command, open_config
-
-
-class TestCommands(StsciDistutilsTestCase):
-    def test_build_optional_ext(self):
-        if numpy is None:
-            raise SkipTest("numpy is required to run this test")
-        # The test extension in the test package is already configured to be
-        # "optional" by default--we'll do one test build to make sure that goes
-        # smoothly
-        compiler_cmd = get_compiler_command()
-
-        _, _, exit_code = self.run_setup('build')
-
-        # Make sure the build went successfully; a zero exit code should be
-        # good enough for our purposes
-        assert exit_code == 0
-
-        # Now let's try breaking the build
-        with open(os.path.join('src', 'testext.c'), 'a') as f:
-            f.write('1/0')
-
-        # We leave off the exit status from the compiler--in most cases it will
-        # say "exit status 1" but that can't be guaranteed for all compilers
-        msg = ('building optional extension "stsci.testpackage.testext" '
-               'failed: command \'%s\' failed with exit status' % compiler_cmd)
-        # Prior to Python 2.7, distutils.log output everything to stdout; now
-        # warnings and errors are output to stderr
-        if sys.version_info[:2] < (2, 7):
-            stderr, _, exit_code = self.run_setup('build', '--force')
-        else:
-            _, stderr, exit_code = self.run_setup('build', '--force')
-        assert exit_code == 0
-        assert stderr.splitlines()[-1].startswith(msg)
-
-        # Test a custom fail message
-        with open_config('setup.cfg') as cfg:
-            cfg.set('extension=stsci.testpackage.testext', 'fail_message',
-                    'Custom fail message.')
-
-        if sys.version_info[:2] < (2, 7):
-            stderr, _, exit_code = self.run_setup('build', '--force')
-        else:
-            _, stderr, exit_code = self.run_setup('build', '--force')
-        assert exit_code == 0
-        assert stderr.splitlines()[-1] == 'Custom fail message.'
-
-        # Finally, make sure the extension is *not* treated as optional if not
-        # marked as such in the config
-        with open_config('setup.cfg') as cfg:
-            cfg.remove_option('extension=stsci.testpackage.testext',
-                              'optional')
-
-        # This error message comes out on stderr for all Python versions AFAICT
-        msg = "error: command '%s' failed with exit status" % compiler_cmd
-        _, stderr, exit_code = self.run_setup('build', '--force')
-        assert exit_code != 0
-        assert stderr.splitlines()[-1].startswith(msg)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/test_hooks.py b/required_pkgs/stsci.distutils/stsci/distutils/tests/test_hooks.py
deleted file mode 100644
index b2fc5ed..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/test_hooks.py
+++ /dev/null
@@ -1,256 +0,0 @@
-from __future__ import with_statement
-
-
-import glob
-import os
-import sys
-import tarfile
-import time
-import zipfile
-
-from datetime import datetime
-from setuptools import Distribution
-
-try:
-    import numpy
-except ImportError:
-    numpy = None
-
-from nose import SkipTest
-
-from . import StsciDistutilsTestCase, TESTPACKAGE_REV
-from .util import reload, get_compiler_command, open_config, rmtree
-
-
-VERSION = '0.1.dev' + TESTPACKAGE_REV
-
-
-class TestHooks(StsciDistutilsTestCase):
-    def test_setup_py_version(self):
-        """
-        Test that the `./setupy.py --version` command returns the correct
-        value without balking.
-        """
-
-        self.run_setup('egg_info')
-        stdout, _, _ = self.run_setup('--version')
-        assert stdout == VERSION
-
-    def test_version_with_rev(self):
-        """Test that the version string contains the correct SVN revision."""
-
-        # Build the package
-        self.run_setup('build')
-        self.run_setup('sdist')
-
-        import stsci.testpackage
-
-        assert hasattr(stsci.testpackage, '__version__')
-        assert stsci.testpackage.__version__ == VERSION
-
-        assert hasattr(stsci.testpackage, '__svn_revision__')
-        assert stsci.testpackage.__svn_revision__ == TESTPACKAGE_REV
-
-        filenames = [os.path.join('dist',
-                                  'stsci.testpackage-%s.%s' % (VERSION, ext))
-                     for ext in ('tar.gz', 'zip')]
-
-        assert os.path.exists(filenames[0]) or os.path.exists(filenames[1])
-
-    def test_release_version(self):
-        """
-        Ensure that the SVN revision is not appended to release versions
-        (i.e. not ending with '.dev'.
-        """
-
-        with open_config('setup.cfg') as cfg:
-            cfg.set('metadata', 'version', '0.1')
-
-        self.run_setup('egg_info')
-        stdout, _, _ = self.run_setup('--version')
-        assert stdout == '0.1'
-
-    def test_inline_svn_update(self):
-        """Test version.py's capability of updating the SVN info at runtime."""
-
-        self.run_setup('build')
-
-        import stsci.testpackage
-
-        assert hasattr(stsci.testpackage, '__svn_revision__')
-        assert stsci.testpackage.__svn_revision__ == TESTPACKAGE_REV
-
-        with open('TEST', 'w') as f:
-            # Create an empty file
-            pass
-
-        self.run_svn('add', 'TEST')
-        # The working copy has been modified, so now svnversion (which is used
-        # to generate __svn_revision__) should be the revision + 'M'
-        reload(stsci.testpackage.version)
-        reload(stsci.testpackage)
-
-        assert stsci.testpackage.__svn_revision__ == TESTPACKAGE_REV + 'M'
-
-    def test_setup_datetime(self):
-        """
-        Test that the setup datetime is present, and is updated by subsequent
-        setup.py runs.
-        """
-
-        # Build the package
-        self.run_setup('build')
-
-        import stsci.testpackage
-
-        assert hasattr(stsci.testpackage, '__setup_datetime__')
-        prev = stsci.testpackage.__setup_datetime__
-        now = datetime.now()
-        # Rebuild
-        # So that there's less chance for ambiguity
-        time.sleep(1)
-        self.run_setup('build')
-
-        reload(stsci.testpackage.version)
-        reload(stsci.testpackage)
-
-        import stsci.testpackage
-
-        assert hasattr(stsci.testpackage, '__setup_datetime__')
-        assert stsci.testpackage.__setup_datetime__ > now
-        assert stsci.testpackage.__setup_datetime__ > prev
-
-    def test_numpy_extension_hook(self):
-        """Test basic functionality of the Numpy extension hook."""
-
-        if numpy is None:
-            raise SkipTest("numpy is required to run this test")
-
-        compiler_cmd = get_compiler_command()
-
-        stdout, _, _ = self.run_setup('build')
-        for line in stdout.splitlines():
-            # Previously this used shlex.split(), but that's broken for unicode
-            # strings prior to Python 3.x, and it doesn't matter too much since
-            # we only care about the first argument
-            args = line.split()
-            if not args:
-                continue
-            if args[0] != compiler_cmd:
-                continue
-
-            # The first output from the compiler should be an attempt to
-            # compile a c file to an object, so that should include all the
-            # include paths.  This is of course not universally true, but it
-            # should hold true for this test case
-            for path in [numpy.get_include()]:
-            #for path in [numpy.get_include(), numpy.get_numarray_include()]:
-                assert '-I' + path in args
-            break
-
-        # And for the heck of it, let's ensure that this doesn't happen if
-        # 'numpy' is not listed in include_dirs
-        with open_config('setup.cfg') as cfg:
-            cfg.remove_option('extension=stsci.testpackage.testext',
-                              'include_dirs')
-
-        rmtree('build')
-
-        stdout, _, _ = self.run_setup('build')
-        for line in stdout.splitlines():
-            args = line.split()
-            if not args:
-                continue
-            if args[0] != compiler_cmd:
-                continue
-            for path in [numpy.get_include()]:
-            #for path in [numpy.get_include(), numpy.get_numarray_include()]:
-                assert '-I' + path not in args
-
-    def test_glob_data_files(self):
-        """
-        Test the glob_data_files hook by ensuring that all the correct data
-        files are included in the source distribution, and that they are
-        installed to the correct location in the package.
-        """
-
-        data_files = os.path.join('stsci', 'testpackage', 'data_files')
-
-        # First test the source distribution
-        self.run_setup('sdist')
-
-        # There can be only one
-        try:
-            tf = glob.glob(os.path.join('dist', '*.tar.gz'))[0]
-        except IndexError:
-            # No tar.gz files found?  On Windows sdist creates a .zip file, so
-            # let's look for that
-            tf = glob.glob(os.path.join('dist', '*.zip'))[0]
-            # If that failed then I don't know--I guess the test should fail
-
-        if tf.endswith('.tar.gz'):
-            tf = tarfile.open(tf)
-            # Tarfiles created by sdist kindly place all contents in a
-            # top-level directory with the same name as the file minus
-            # extension, so as to kindly not bomb you when you extract it.  But
-            # we don't care about that top level directory
-            names = ['/'.join(p.split('/')[1:]) for p in tf.getnames()]
-        else:
-            with zipfile.ZipFile(tf) as zf:
-                names = ['/'.join(p.split('/')[1:]) for p in zf.namelist()]
-
-        # Sdists should place the data_files at the root, just like in the
-        # normal source layout; even files that aren't normally installed
-        # should be included
-        for filename in ['a.txt', 'b.txt', 'c.rst']:
-            # Don't use os.path.join -- zipfile/tarfile always return paths
-            # with / as path sep
-            assert ('data_files/' + filename) in names
-
-        # Now we test that data_files go to the right place in various install
-        # schemes
-        def get_install_lib(args):
-            # This helper uses the distutils/setuptools machinery to determine
-            # where a command will install files based on the arguments passed
-            # to setup.py
-            dist = Distribution({'script_args': args})
-            dist.parse_command_line()
-            install_cmd = dist.get_command_obj('install')
-            install_cmd.ensure_finalized()
-            return install_cmd.install_lib
-
-        def test_install_scheme(args):
-            if numpy is None:
-                raise SkipTest("numpy is required to run this test")
-            # This general code should work to test the files in a variety of
-            # install schemes depending on args
-            if os.path.exists('temp'):
-                rmtree('temp')
-            install_lib = get_install_lib(args)
-            os.makedirs(install_lib)
-            old_pythonpath = os.environ.get('PYTHONPATH')
-            # For a setuptools/easy_install-stype install to an alternate
-            # prefix we have to have the new install dir on the PYTHONPATH or
-            # easy_install will balk
-            os.environ['PYTHONPATH'] = (
-                install_lib + os.pathsep +
-                (old_pythonpath if old_pythonpath else ''))
-
-            try:
-                self.run_setup(*(args + ['--record=installed.txt']))
-            finally:
-                if old_pythonpath is not None:
-                    os.environ['PYTHONPATH'] = old_pythonpath
-
-            found_files = 0
-            with open('installed.txt') as f:
-                # installed.txt, however, contains OS-specific paths
-                for line in f:
-                    for name in ['a.txt', 'b.txt', 'c.rst']:
-                        if line.strip().endswith(os.sep + name):
-                            found_files += 1
-            assert found_files == 2
-
-        test_install_scheme(['install', '--prefix=temp'])
-        test_install_scheme(['install', '--root=temp'])
-        test_install_scheme(['install', '--install-lib=temp'])
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/CHANGES.txt b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/CHANGES.txt
deleted file mode 100644
index 709b9d4..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/CHANGES.txt
+++ /dev/null
@@ -1,86 +0,0 @@
-Changelog
-===========
-
-0.3 (unreleased)
-------------------
-
-- The ``glob_data_files`` hook became a pre-command hook for the install_data
-  command instead of being a setup-hook.  This is to support the additional
-  functionality of requiring data_files with relative destination paths to be
-  install relative to the package's install path (i.e. site-packages).
-
-- Dropped support for and deprecated the easier_install custom command.
-  Although it should still work, it probably won't be used anymore for
-  stsci_python packages.
-
-- Added support for the ``build_optional_ext`` command, which replaces/extends
-  the default ``build_ext`` command.  See the README for more details.
-
-- Added the ``tag_svn_revision`` setup_hook as a replacement for the
-  setuptools-specific tag_svn_revision option to the egg_info command.  This
-  new hook is easier to use than the old tag_svn_revision option: It's
-  automatically enabled by the presence of ``.dev`` in the version string, and
-  disabled otherwise.
-
-- The ``svn_info_pre_hook`` and ``svn_info_post_hook`` have been replaced with
-  ``version_pre_command_hook`` and ``version_post_command_hook`` respectively.
-  However, a new ``version_setup_hook``, which has the same purpose, has been
-  added.  It is generally easier to use and will give more consistent results
-  in that it will run every time setup.py is run, regardless of which command
-  is used.  ``stsci.distutils`` itself uses this hook--see the `setup.cfg` file
-  and `stsci/distutils/__init__.py` for example usage.
-
-- Instead of creating an `svninfo.py` module, the new ``version_`` hooks create
-  a file called `version.py`.  In addition to the SVN info that was included
-  in `svninfo.py`, it includes a ``__version__`` variable to be used by the
-  package's `__init__.py`.  This allows there to be a hard-coded
-  ``__version__`` variable included in the source code, rather than using
-  pkg_resources to get the version.
-
-- In `version.py`, the variables previously named ``__svn_version__`` and
-  ``__full_svn_info__`` are now named ``__svn_revision__`` and
-  ``__svn_full_info__``.
-
-- Fixed a bug when using stsci.distutils in the installation of other packages
-  in the ``stsci.*`` namespace package.  If stsci.distutils was not already
-  installed, and was downloaded automatically by distribute through the
-  setup_requires option, then ``stsci.distutils`` would fail to import.  This
-  is because the way the namespace package (nspkg) mechanism currently works,
-  all packages belonging to the nspkg *must* be on the import path at initial
-  import time.
-
-  So when installing stsci.tools, for example, if ``stsci.tools`` is imported
-  from within the source code at install time, but before ``stsci.distutils``
-  is downloaded and added to the path, the ``stsci`` package is already
-  imported and can't be extended to include the path of ``stsci.distutils``
-  after the fact.  The easiest way of dealing with this, it seems, is to
-  delete ``stsci`` from ``sys.modules``, which forces it to be reimported, now
-  the its ``__path__`` extended to include ``stsci.distutil``'s path.
-
-
-0.2.2 (2011-11-09)
-------------------
-
-- Fixed check for the issue205 bug on actual setuptools installs; before it
-  only worked on distribute.  setuptools has the issue205 bug prior to version
-  0.6c10.
-
-- Improved the fix for the issue205 bug, especially on setuptools.
-  setuptools, prior to 0.6c10, did not back of sys.modules either before
-  sandboxing, which causes serious problems.  In fact, it's so bad that it's
-  not enough to add a sys.modules backup to the current sandbox: It's in fact
-  necessary to monkeypatch setuptools.sandbox.run_setup so that any subsequent
-  calls to it also back up sys.modules.
-
-
-0.2.1 (2011-09-02)
-------------------
-
-- Fixed the dependencies so that setuptools is requirement but 'distribute'
-  specifically.  Previously installation could fail if users had plain
-  setuptools installed and not distribute
-
-0.2 (2011-08-23)
-------------------
-
-- Initial public release
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/LICENSE.txt b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/LICENSE.txt
deleted file mode 100644
index 7e8019a..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/LICENSE.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (C) 2005 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-      endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/MANIFEST.in b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/MANIFEST.in
deleted file mode 100644
index cdc95ea..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/MANIFEST.in
+++ /dev/null
@@ -1 +0,0 @@
-include data_files/*
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/README.txt b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/README.txt
deleted file mode 100644
index 4f00d32..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/README.txt
+++ /dev/null
@@ -1,148 +0,0 @@
-Introduction
-============
-This package contains utilities used to package some of STScI's Python
-projects; specifically those projects that comprise stsci_python_ and
-Astrolib_.
-
-It currently consists mostly of some setup_hook scripts meant for use with
-`distutils2/packaging`_ and/or d2to1_, and a customized easy_install command
-meant for use with distribute_.
-
-This package is not meant for general consumption, though it might be worth
-looking at for examples of how to do certain things with your own packages, but
-YMMV.
-
-Features
-========
-
-Hook Scripts
-------------
-Currently the main features of this package are a couple of setup_hook scripts.
-In distutils2, a setup_hook is a script that runs at the beginning of any
-pysetup command, and can modify the package configuration read from setup.cfg.
-There are also pre- and post-command hooks that only run before/after a
-specific setup command (eg. build_ext, install) is run.
-
-stsci.distutils.hooks.use_packages_root
-'''''''''''''''''''''''''''''''''''''''
-If using the ``packages_root`` option under the ``[files]`` section of
-setup.cfg, this hook will add that path to ``sys.path`` so that modules in your
-package can be imported and used in setup.  This can be used even if
-``packages_root`` is not specified--in this case it adds ``''`` to
-``sys.path``.
-
-stsci.distutils.hooks.version_setup_hook
-''''''''''''''''''''''''''''''''''''''''
-Creates a Python module called version.py which currently contains four
-variables:
-
-* ``__version__`` (the release version)
-* ``__svn_revision__`` (the SVN revision info as returned by the ``svnversion``
-  command)
-* ``__svn_full_info__`` (as returned by the ``svn info`` command)
-* ``__setup_datetime__`` (the date and time that setup.py was last run).
-
-These variables can be imported in the package's `__init__.py` for degugging
-purposes.  The version.py module will *only* be created in a package that
-imports from the version module in its `__init__.py`.  It should be noted that
-this is generally preferable to writing these variables directly into
-`__init__.py`, since this provides more control and is less likely to
-unexpectedly break things in `__init__.py`.
-
-stsci.distutils.hooks.version_pre_command_hook
-''''''''''''''''''''''''''''''''''''''''''''''
-Identical to version_setup_hook, but designed to be used as a pre-command
-hook.
-
-stsci.distutils.hooks.version_post_command_hook
-'''''''''''''''''''''''''''''''''''''''''''''''
-The complement to version_pre_command_hook.  This will delete any version.py
-files created during a build in order to prevent them from cluttering an SVN
-working copy (note, however, that version.py is *not* deleted from the build/
-directory, so a copy of it is still preserved).  It will also not be deleted
-if the current directory is not an SVN working copy.  For example, if source
-code extracted from a source tarball it will be preserved.
-
-stsci.distutils.hooks.tag_svn_revision
-''''''''''''''''''''''''''''''''''''''
-A setup_hook to add the SVN revision of the current working copy path to the
-package version string, but only if the version ends in .dev.
-
-For example, ``mypackage-1.0.dev`` becomes ``mypackage-1.0.dev1234``.  This is
-in accordance with the version string format standardized by PEP 386.
-
-This should be used as a replacement for the ``tag_svn_revision`` option to
-the egg_info command.  This hook is more compatible with packaging/distutils2,
-which does not include any VCS support.  This hook is also more flexible in
-that it turns the revision number on/off depending on the presence of ``.dev``
-in the version string, so that it's not automatically added to the version in
-final releases.
-
-This hook does require the ``svnversion`` command to be available in order to
-work.  It does not examine the working copy metadata directly.
-
-stsci.distutils.hooks.numpy_extension_hook
-''''''''''''''''''''''''''''''''''''''''''
-This is a pre-command hook for the build_ext command.  To use it, add a
-``[build_ext]`` section to your setup.cfg, and add to it::
-
-    pre-hook.numpy-extension-hook = stsci.distutils.hooks.numpy_extension_hook
-
-This hook must be used to build extension modules that use Numpy.   The primary
-side-effect of this hook is to add the correct numpy include directories to
-`include_dirs`.  To use it, add 'numpy' to the 'include-dirs' option of each
-extension module that requires numpy to build.  The value 'numpy' will be
-replaced with the actual path to the numpy includes.
-
-stsci.distutils.hooks.is_display_option
-'''''''''''''''''''''''''''''''''''''''
-This is not actually a hook, but is a useful utility function that can be used
-in writing other hooks.  Basically, it returns ``True`` if setup.py was run
-with a "display option" such as --version or --help.  This can be used to
-prevent your hook from running in such cases.
-
-stsci.distutils.hooks.glob_data_files
-'''''''''''''''''''''''''''''''''''''
-A pre-command hook for the install_data command.  Allows filename wildcards as
-understood by ``glob.glob()`` to be used in the data_files option.  This hook
-must be used in order to have this functionality since it does not normally
-exist in distutils.
-
-This hook also ensures that data files are installed relative to the package
-path.  data_files shouldn't normally be installed this way, but the
-functionality is required for a few special cases.
-
-
-Commands
---------
-build_optional_ext
-''''''''''''''''''
-This serves as an optional replacement for the default built_ext command,
-which compiles C extension modules.  Its purpose is to allow extension modules
-to be *optional*, so that if their build fails the rest of the package is
-still allowed to be built and installed.  This can be used when an extension
-module is not definitely required to use the package.
-
-To use this custom command, add::
-
-    commands = stsci.distutils.command.build_optional_ext.build_optional_ext
-
-under the ``[global]`` section of your package's setup.cfg.  Then, to mark
-an individual extension module as optional, under the setup.cfg section for
-that extension add::
-
-    optional = True
-
-Optionally, you may also add a custom failure message by adding::
-
-    fail_message = The foobar extension module failed to compile.
-                   This could be because you lack such and such headers.
-                   This package will still work, but such and such features
-                   will be disabled.
-
-
-.. _stsci_python: http://www.stsci.edu/resources/software_hardware/pyraf/stsci_python
-.. _Astrolib: http://www.scipy.org/AstroLib/
-.. _distutils2/packaging: http://distutils2.notmyidea.org/
-.. _d2to1: http://pypi.python.org/pypi/d2to1
-.. _distribute: http://pypi.python.org/pypi/distribute
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/data_files/a.txt b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/data_files/a.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/data_files/b.txt b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/data_files/b.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/data_files/c.rst b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/data_files/c.rst
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/distribute_setup.py b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/distribute_setup.py
deleted file mode 100644
index bbb6f3c..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/distribute_setup.py
+++ /dev/null
@@ -1,485 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
-    from distribute_setup import use_setuptools
-    use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-try:
-    import subprocess
-
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        return subprocess.call(args) == 0
-
-except ImportError:
-    # will be used for python 2.3
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        # quoting arguments if windows
-        if sys.platform == 'win32':
-            def quote(arg):
-                if ' ' in arg:
-                    return '"%s"' % arg
-                return arg
-            args = [quote(arg) for arg in args]
-        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.19"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # installing
-        log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install'):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-    finally:
-        os.chdir(old_wd)
-
-
-def _build_egg(egg, tarball, to_dir):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # building an egg
-        log.warn('Building a Distribute egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
-    finally:
-        os.chdir(old_wd)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        tarball = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, tarball, to_dir)
-    sys.path.insert(0, egg)
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                   to_dir=os.curdir, download_delay=15, no_fake=True):
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    was_imported = 'pkg_resources' in sys.modules or \
-        'setuptools' in sys.modules
-    try:
-        try:
-            import pkg_resources
-            if not hasattr(pkg_resources, '_distribute'):
-                if not no_fake:
-                    _fake_setuptools()
-                raise ImportError
-        except ImportError:
-            return _do_download(version, download_base, to_dir, download_delay)
-        try:
-            pkg_resources.require("distribute>="+version)
-            return
-        except pkg_resources.VersionConflict:
-            e = sys.exc_info()[1]
-            if was_imported:
-                sys.stderr.write(
-                "The required version of distribute (>=%s) is not available,\n"
-                "and can't be installed while this script is running. Please\n"
-                "install a more recent version first, using\n"
-                "'easy_install -U distribute'."
-                "\n\n(Currently using %r)\n" % (version, e.args[0]))
-                sys.exit(2)
-            else:
-                del pkg_resources, sys.modules['pkg_resources']    # reload ok
-                return _do_download(version, download_base, to_dir,
-                                    download_delay)
-        except pkg_resources.DistributionNotFound:
-            return _do_download(version, download_base, to_dir,
-                                download_delay)
-    finally:
-        if not no_fake:
-            _create_fake_setuptools_pkg_info(to_dir)
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                        to_dir=os.curdir, delay=15):
-    """Download distribute from a specified location and return its filename
-
-    `version` should be a valid distribute version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    tgz_name = "distribute-%s.tar.gz" % version
-    url = download_base + tgz_name
-    saveto = os.path.join(to_dir, tgz_name)
-    src = dst = None
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        try:
-            log.warn("Downloading %s", url)
-            src = urlopen(url)
-            # Read/write all in one block, so we don't create a corrupt file
-            # if the download is interrupted.
-            data = src.read()
-            dst = open(saveto, "wb")
-            dst.write(data)
-        finally:
-            if src:
-                src.close()
-            if dst:
-                dst.close()
-    return os.path.realpath(saveto)
-
-def _no_sandbox(function):
-    def __no_sandbox(*args, **kw):
-        try:
-            from setuptools.sandbox import DirectorySandbox
-            if not hasattr(DirectorySandbox, '_old'):
-                def violation(*args):
-                    pass
-                DirectorySandbox._old = DirectorySandbox._violation
-                DirectorySandbox._violation = violation
-                patched = True
-            else:
-                patched = False
-        except ImportError:
-            patched = False
-
-        try:
-            return function(*args, **kw)
-        finally:
-            if patched:
-                DirectorySandbox._violation = DirectorySandbox._old
-                del DirectorySandbox._old
-
-    return __no_sandbox
-
-def _patch_file(path, content):
-    """Will backup the file then patch it"""
-    existing_content = open(path).read()
-    if existing_content == content:
-        # already patched
-        log.warn('Already patched.')
-        return False
-    log.warn('Patching...')
-    _rename_path(path)
-    f = open(path, 'w')
-    try:
-        f.write(content)
-    finally:
-        f.close()
-    return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-def _same_content(path, content):
-    return open(path).read() == content
-
-def _rename_path(path):
-    new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s into %s', path, new_name)
-    os.rename(path, new_name)
-    return new_name
-
-def _remove_flat_installation(placeholder):
-    if not os.path.isdir(placeholder):
-        log.warn('Unkown installation at %s', placeholder)
-        return False
-    found = False
-    for file in os.listdir(placeholder):
-        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
-            found = True
-            break
-    if not found:
-        log.warn('Could not locate setuptools*.egg-info')
-        return
-
-    log.warn('Removing elements out of the way...')
-    pkg_info = os.path.join(placeholder, file)
-    if os.path.isdir(pkg_info):
-        patched = _patch_egg_dir(pkg_info)
-    else:
-        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
-    if not patched:
-        log.warn('%s already patched.', pkg_info)
-        return False
-    # now let's move the files out of the way
-    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
-        element = os.path.join(placeholder, element)
-        if os.path.exists(element):
-            _rename_path(element)
-        else:
-            log.warn('Could not find the %s element of the '
-                     'Setuptools distribution', element)
-    return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-def _after_install(dist):
-    log.warn('After install bootstrap.')
-    placeholder = dist.get_command_obj('install').install_purelib
-    _create_fake_setuptools_pkg_info(placeholder)
-
-def _create_fake_setuptools_pkg_info(placeholder):
-    if not placeholder or not os.path.exists(placeholder):
-        log.warn('Could not find the install location')
-        return
-    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
-    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
-            (SETUPTOOLS_FAKED_VERSION, pyver)
-    pkg_info = os.path.join(placeholder, setuptools_file)
-    if os.path.exists(pkg_info):
-        log.warn('%s already exists', pkg_info)
-        return
-
-    log.warn('Creating %s', pkg_info)
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-
-    pth_file = os.path.join(placeholder, 'setuptools.pth')
-    log.warn('Creating %s', pth_file)
-    f = open(pth_file, 'w')
-    try:
-        f.write(os.path.join(os.curdir, setuptools_file))
-    finally:
-        f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
-
-def _patch_egg_dir(path):
-    # let's check if it's already patched
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    if os.path.exists(pkg_info):
-        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
-            log.warn('%s already patched.', pkg_info)
-            return False
-    _rename_path(path)
-    os.mkdir(path)
-    os.mkdir(os.path.join(path, 'EGG-INFO'))
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-    return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-def _before_install():
-    log.warn('Before install bootstrap.')
-    _fake_setuptools()
-
-
-def _under_prefix(location):
-    if 'install' not in sys.argv:
-        return True
-    args = sys.argv[sys.argv.index('install')+1:]
-    for index, arg in enumerate(args):
-        for option in ('--root', '--prefix'):
-            if arg.startswith('%s=' % option):
-                top_dir = arg.split('root=')[-1]
-                return location.startswith(top_dir)
-            elif arg == option:
-                if len(args) > index:
-                    top_dir = args[index+1]
-                    return location.startswith(top_dir)
-        if arg == '--user' and USER_SITE is not None:
-            return location.startswith(USER_SITE)
-    return True
-
-
-def _fake_setuptools():
-    log.warn('Scanning installed packages')
-    try:
-        import pkg_resources
-    except ImportError:
-        # we're cool
-        log.warn('Setuptools or Distribute does not seem to be installed.')
-        return
-    ws = pkg_resources.working_set
-    try:
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
-                                  replacement=False))
-    except TypeError:
-        # old distribute API
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
-
-    if setuptools_dist is None:
-        log.warn('No setuptools distribution found')
-        return
-    # detecting if it was already faked
-    setuptools_location = setuptools_dist.location
-    log.warn('Setuptools installation detected at %s', setuptools_location)
-
-    # if --root or --preix was provided, and if
-    # setuptools is not located in them, we don't patch it
-    if not _under_prefix(setuptools_location):
-        log.warn('Not patching, --root or --prefix is installing Distribute'
-                 ' in another location')
-        return
-
-    # let's see if its an egg
-    if not setuptools_location.endswith('.egg'):
-        log.warn('Non-egg installation')
-        res = _remove_flat_installation(setuptools_location)
-        if not res:
-            return
-    else:
-        log.warn('Egg installation')
-        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
-        if (os.path.exists(pkg_info) and
-            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
-            log.warn('Already patched.')
-            return
-        log.warn('Patching...')
-        # let's create a fake egg replacing setuptools one
-        res = _patch_egg_dir(setuptools_location)
-        if not res:
-            return
-    log.warn('Patched done.')
-    _relaunch()
-
-
-def _relaunch():
-    log.warn('Relaunching...')
-    # we have to relaunch the process
-    # pip marker to avoid a relaunch bug
-    if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
-        sys.argv[0] = 'setup.py'
-    args = [sys.executable] + sys.argv
-    sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
-    """Extract all members from the archive to the current working
-       directory and set owner, modification time and permissions on
-       directories afterwards. `path' specifies a different directory
-       to extract to. `members' is optional and must be a subset of the
-       list returned by getmembers().
-    """
-    import copy
-    import operator
-    from tarfile import ExtractError
-    directories = []
-
-    if members is None:
-        members = self
-
-    for tarinfo in members:
-        if tarinfo.isdir():
-            # Extract directories with a safe mode.
-            directories.append(tarinfo)
-            tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448 # decimal for oct 0700
-        self.extract(tarinfo, path)
-
-    # Reverse sort directories.
-    if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
-        directories.reverse()
-    else:
-        directories.sort(key=operator.attrgetter('name'), reverse=True)
-
-    # Set correct owner, mtime and filemode on directories.
-    for tarinfo in directories:
-        dirpath = os.path.join(path, tarinfo.name)
-        try:
-            self.chown(tarinfo, dirpath)
-            self.utime(tarinfo, dirpath)
-            self.chmod(tarinfo, dirpath)
-        except ExtractError:
-            e = sys.exc_info()[1]
-            if self.errorlevel > 1:
-                raise
-            else:
-                self._dbg(1, "tarfile: %s" % e)
-
-
-def main(argv, version=DEFAULT_VERSION):
-    """Install or upgrade setuptools and EasyInstall"""
-    tarball = download_setuptools()
-    _install(tarball)
-
-
-if __name__ == '__main__':
-    main(sys.argv[1:])
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/setup.cfg b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/setup.cfg
deleted file mode 100644
index 39448ad..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/setup.cfg
+++ /dev/null
@@ -1,49 +0,0 @@
-[metadata]
-name = stsci.testpackage
-version = 0.1.dev
-author = Erik M. Bray
-author-email = embray at stsci.edu
-home-page = http://www.stsci.edu/resources/software_hardware/stsci_python
-summary = Test package for testing stsci.distutils
-description-file =
-    README.txt
-    CHANGES.txt
-requires-python = >=2.5
-
-requires-dist =
-    setuptools
-    d2to1 (>=0.2.5)
-
-classifier =
-    Development Status :: 3 - Alpha
-    Intended Audience :: Developers
-    License :: OSI Approved :: BSD License
-    Programming Language :: Python
-    Topic :: Scientific/Engineering
-    Topic :: Software Development :: Build Tools
-    Topic :: Software Development :: Libraries :: Python Modules
-    Topic :: System :: Archiving :: Packaging
-
-[files]
-packages =
-    stsci
-    stsci.testpackage
-package_data = stsci.testpackage = package_data/*.txt
-data_files = stsci/testpackage/data_files = data_files/*.txt
-
-[extension=stsci.testpackage.testext]
-sources = src/testext.c
-include_dirs = numpy
-optional = True
-
-[global]
-setup-hooks =
-    stsci.distutils.hooks.tag_svn_revision
-    stsci.distutils.hooks.version_setup_hook
-commands = stsci.distutils.command.build_optional_ext.build_optional_ext
-
-[build_ext]
-pre-hook.numpy-extension-hook = stsci.distutils.hooks.numpy_extension_hook
-
-[install_data]
-pre-hook.glob-data-files = stsci.distutils.hooks.glob_data_files
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/setup.py b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/setup.py
deleted file mode 100755
index a1f68a5..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/setup.py
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env python
-
-try:
-    from setuptools import setup
-except ImportError:
-    from distribute_setup import use_setuptools
-    use_setuptools()
-    from setuptools import setup
-
-setup(
-    setup_requires=['d2to1>=0.2.5', 'stsci.distutils>=0.3.dev'],
-    namespace_packages=['stsci'], packages=['stsci'],
-    dependency_links=['http://stsdas.stsci.edu/download/packages'],
-    d2to1=True,
-    use_2to3=True,
-    zip_safe=False,
-)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/src/testext.c b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/src/testext.c
deleted file mode 100644
index 872d43c..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/src/testext.c
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <Python.h>
-
-
-static PyMethodDef TestextMethods[] = {
-    {NULL, NULL, 0, NULL}
-};
-
-
-#if PY_MAJOR_VERSION >=3
-static struct PyModuleDef testextmodule = {
-    PyModuleDef_HEAD_INIT,
-    "testext",
-    -1,
-    TestextMethods
-};
-
-PyObject*
-PyInit_testext(void)
-{
-    return PyModule_Create(&testextmodule);
-}
-#else
-PyMODINIT_FUNC
-inittestext(void)
-{
-    Py_InitModule("testext", TestextMethods);
-}
-#endif
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/__init__.py b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/__init__.py
deleted file mode 100644
index e6e3521..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-try:
-    # As long as we're using setuptools/distribute, we need to do this the
-    # setuptools way or else pkg_resources will throw up unncessary and
-    # annoying warnings (even though the namespace mechanism will still
-    # otherwise work without it).
-    # Get rid of this as soon as setuptools/distribute is dead.
-    __import__('pkg_resources').declare_namespace(__name__)
-except ImportError:
-    pass
-__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/testpackage/__init__.py b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/testpackage/__init__.py
deleted file mode 100644
index 9f599d6..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/testpackage/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-try:
-    from .version import (__version__, __svn_revision__, __svn_full_info__,
-                          __setup_datetime__)
-except ImportError:
-    __version__ = ''
-    __svn_revision__ = ''
-    __svn_full_info__ = ''
-    __setup_datetime__ = None
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/testpackage/package_data/1.txt b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/testpackage/package_data/1.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/testpackage/package_data/2.txt b/required_pkgs/stsci.distutils/stsci/distutils/tests/testpackage/stsci/testpackage/package_data/2.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/tests/util.py b/required_pkgs/stsci.distutils/stsci/distutils/tests/util.py
deleted file mode 100644
index 8c3befd..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/tests/util.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from __future__ import with_statement
-
-import contextlib
-import os
-import shutil
-import stat
-
-try:
-    reload = reload
-except NameError:
-    from imp import reload
-
-try:
-    from ConfigParser import ConfigParser
-except ImportError:
-    from configparser import ConfigParser
-
-from distutils.ccompiler import new_compiler
-from distutils.msvccompiler import MSVCCompiler
-from distutils.sysconfig import customize_compiler, get_config_vars
-
-
- at contextlib.contextmanager
-def open_config(filename):
-    cfg = ConfigParser()
-    cfg.read(filename)
-    yield cfg
-    with open(filename, 'w') as fp:
-        cfg.write(fp)
-
-
-def get_compiler_command():
-    """
-    Returns the name of the executable used by the default compiler on the
-    system used by distutils to build C extensions.
-    """
-
-    get_config_vars()
-    compiler = new_compiler()
-    customize_compiler(compiler)
-    if isinstance(compiler, MSVCCompiler):
-        compiler.initialize()
-        # Normally the compiler path will be quoted as it contains spaces
-        return '"%s"' % compiler.cc
-    return compiler.compiler[0]
-
-
-def rmtree(path):
-    """
-    shutil.rmtree() with error handler for 'access denied' from trying to
-    delete read-only files.
-    """
-
-    def onerror(func, path, exc_info):
-        if not os.access(path, os.W_OK):
-            os.chmod(path, stat.S_IWUSR)
-            func(path)
-        else:
-            raise
-
-    return shutil.rmtree(path, onerror=onerror)
diff --git a/required_pkgs/stsci.distutils/stsci/distutils/versionutils.py b/required_pkgs/stsci.distutils/stsci/distutils/versionutils.py
deleted file mode 100644
index 8429db1..0000000
--- a/required_pkgs/stsci.distutils/stsci/distutils/versionutils.py
+++ /dev/null
@@ -1,195 +0,0 @@
-"""Utilities for dealing with package version info.
-
-See also stsci.distutils.svnutils which specifically deals with adding SVN
-info to version.py modules.
-"""
-
-
-from __future__ import with_statement
-
-import datetime
-import os
-import subprocess
-
-from .astutils import ImportVisitor, walk
-
-
-VERSION_PY_TEMPLATE = """
-\"\"\"This is an automatically generated file created by %(hook_function)s.
-Do not modify this file by hand.
-\"\"\"
-
-__all__ = ['__version__', '__vdate__', '__svn_revision__', '__svn_full_info__',
-           '__setup_datetime__']
-
-import datetime
-
-__version__ = %(version)r
-__vdate__ = %(vdate)r
-__svn_revision__ = %(svn_revision)r
-__svn_full_info__ = %(svn_full_info)r
-__setup_datetime__ = %(setup_datetime)r
-
-# what version of stsci.distutils created this version.py
-stsci_distutils_version = %(stsci_distutils_version)r
-
-if '.dev' in __version__:
-    def update_svn_info():
-        \"\"\"Update the SVN info if running out of an SVN working copy.\"\"\"
-
-        import os
-        import string
-        import subprocess
-
-        global __svn_revision__
-        global __svn_full_info__
-
-        path = os.path.abspath(os.path.dirname(__file__))
-
-        run_svnversion = True
-
-        try:
-            pipe = subprocess.Popen(['svn', 'info', path],
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            stdout, _ = pipe.communicate()
-            if pipe.returncode == 0:
-                lines = []
-                for line in stdout.splitlines():
-                    line = line.decode('latin1').strip()
-                    if not line:
-                        continue
-                    lines.append(line)
-
-                if not lines:
-                    __svn_full_info__ = ['unknown']
-                else:
-                    __svn_full_info__ = lines
-            else:
-                run_svnversion = False
-        except OSError:
-            run_svnversion = False
-
-        if run_svnversion:
-            # If updating the __svn_full_info__ succeeded then use its output
-            # to find the base of the working copy and use svnversion to get
-            # the svn revision.
-            for line in __svn_full_info__:
-                if line.startswith('Working Copy Root Path'):
-                    path = line.split(':', 1)[1].strip()
-                    break
-
-            try:
-                pipe = subprocess.Popen(['svnversion', path],
-                                        stdout=subprocess.PIPE,
-                                        stderr=subprocess.PIPE)
-                stdout, _ = pipe.communicate()
-                if pipe.returncode == 0:
-                    stdout = stdout.decode('latin1').strip()
-                    if stdout and stdout[0] in string.digits:
-                        __svn_revision__ = stdout
-            except OSError:
-                pass
-
-        # Convert __svn_full_info__ back to a string
-        if isinstance(__svn_full_info__, list):
-            __svn_full_info__ = '\\n'.join(__svn_full_info__)
-
-
-    update_svn_info()
-    del update_svn_info
-"""[1:]
-
-
-def package_uses_version_py(package_root, package, module_name='version'):
-    """Determines whether or not a version.py module should exist in the given
-    package.  Returns the full path to the version.py module, regardless of
-    whether it already exists.  Otherwise returns False.
-
-    This works by checking whether or not there are any imports from the
-    'version' module in the package's ``__init__.py``.
-
-    You should write this in your package's ``__init__.py``::
-
-        from .version import *
-
-    """
-
-    pdir = os.path.join(package_root, *(package.split('.')))
-    init = os.path.join(pdir, '__init__.py')
-    if not os.path.exists(init):
-        raise Exception('Not a valid package - no __init__.py')
-
-    try:
-        visitor = ImportVisitor()
-        walk(init, visitor)
-    except:
-        raise SyntaxError('Not able to parse %s' % init)
-
-    found = False
-    # Check the import statements parsed from the file for an import of or
-    # from the svninfo module in this package
-    for imp in visitor.imports:
-        if imp[0] in (module_name, '.'.join((package, module_name))):
-            found = True
-            break
-    for imp in visitor.importfroms:
-        mod = imp[0]
-        name = imp[1]
-        if (mod in (module_name, '.'.join((package, module_name))) or
-            (mod == package and name == module_name)):
-            found = True
-            break
-
-    if not found:
-        return False
-
-    return os.path.join(pdir, module_name + '.py')
-
-
-def clean_version_py(package_dir, package):
-    """Removes the generated version.py module from a package, but only if
-    we're in an SVN working copy.
-    """
-
-    pdir = os.path.join(package_root, *(package.split('.')))
-    version_py = os.path.join(pdir, 'version.py')
-    if not os.path.exists(svninfo):
-        return
-
-    try:
-        pipe = subprocess.Popen(['svn', 'status', svninfo],
-                                stdout=subprocess.PIPE,
-                                stderr=subprocess.PIPE)
-    except OSError:
-        return
-
-    if pipe.wait() != 0:
-        return
-
-    # TODO: Maybe don't assume ASCII here.  Find out the best way to handle
-    # this.
-    if not pipe.stdout.read().decode('latin1').startswith('?'):
-        return
-
-    os.remove(version_py)
-
-
-def update_setup_datetime(filename='version.py'):
-    """Update the version.py with the last time a setup command was run."""
-
-    if not os.path.exists(filename):
-        return
-
-    d = datetime.datetime.now()
-
-    lines = []
-    with open(filename, 'r') as f:
-        lines = f.readlines()
-
-    with open(filename, 'w') as f:
-        for line in lines:
-            if not line.startswith('__setup_datetime__'):
-                f.write(line)
-            else:
-                f.write('__setup_datetime__ = %r\n' % d)
diff --git a/required_pkgs/stsci.distutils/tox.ini b/required_pkgs/stsci.distutils/tox.ini
deleted file mode 100644
index 8b9771d..0000000
--- a/required_pkgs/stsci.distutils/tox.ini
+++ /dev/null
@@ -1,13 +0,0 @@
-[tox]
-envlist = py25,py26,py27,py32,py33
-
-[testenv]
-deps =
-    nose
-    numpy
-    setuptools-subversion
-commands =
-    python setup.py clean -a
-    python setup.py build
-    python setup.py nosetests
-sitepackages = True
diff --git a/required_pkgs/stsci.tools b/required_pkgs/stsci.tools
new file mode 160000
index 0000000..a2f7f46
--- /dev/null
+++ b/required_pkgs/stsci.tools
@@ -0,0 +1 @@
+Subproject commit a2f7f4618e3975a857c8c65a6fa1d49d8c66d482
diff --git a/required_pkgs/stsci.tools/LICENSE.txt b/required_pkgs/stsci.tools/LICENSE.txt
deleted file mode 100644
index 7e8019a..0000000
--- a/required_pkgs/stsci.tools/LICENSE.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright (C) 2005 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-      endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-
diff --git a/required_pkgs/stsci.tools/doc/Makefile b/required_pkgs/stsci.tools/doc/Makefile
deleted file mode 100644
index db44b5e..0000000
--- a/required_pkgs/stsci.tools/doc/Makefile
+++ /dev/null
@@ -1,89 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = build
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-
-.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html      to make standalone HTML files"
-	@echo "  dirhtml   to make HTML files named index.html in directories"
-	@echo "  pickle    to make pickle files"
-	@echo "  json      to make JSON files"
-	@echo "  htmlhelp  to make HTML files and a HTML help project"
-	@echo "  qthelp    to make HTML files and a qthelp project"
-	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  changes   to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck to check all external links for integrity"
-	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	-rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pytools.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pytools.qhc"
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
-	      "run these through (pdf)latex."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/required_pkgs/stsci.tools/doc/make.bat b/required_pkgs/stsci.tools/doc/make.bat
deleted file mode 100644
index 3c85599..0000000
--- a/required_pkgs/stsci.tools/doc/make.bat
+++ /dev/null
@@ -1,113 +0,0 @@
- at ECHO OFF
-
-REM Command file for Sphinx documentation
-
-set SPHINXBUILD=sphinx-build
-set BUILDDIR=build
-set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
-if NOT "%PAPER%" == "" (
-	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
-)
-
-if "%1" == "" goto help
-
-if "%1" == "help" (
-	:help
-	echo.Please use `make ^<target^>` where ^<target^> is one of
-	echo.  html      to make standalone HTML files
-	echo.  dirhtml   to make HTML files named index.html in directories
-	echo.  pickle    to make pickle files
-	echo.  json      to make JSON files
-	echo.  htmlhelp  to make HTML files and a HTML help project
-	echo.  qthelp    to make HTML files and a qthelp project
-	echo.  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter
-	echo.  changes   to make an overview over all changed/added/deprecated items
-	echo.  linkcheck to check all external links for integrity
-	echo.  doctest   to run all doctests embedded in the documentation if enabled
-	goto end
-)
-
-if "%1" == "clean" (
-	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
-	del /q /s %BUILDDIR%\*
-	goto end
-)
-
-if "%1" == "html" (
-	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
-	goto end
-)
-
-if "%1" == "dirhtml" (
-	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
-	goto end
-)
-
-if "%1" == "pickle" (
-	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
-	echo.
-	echo.Build finished; now you can process the pickle files.
-	goto end
-)
-
-if "%1" == "json" (
-	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
-	echo.
-	echo.Build finished; now you can process the JSON files.
-	goto end
-)
-
-if "%1" == "htmlhelp" (
-	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
-	echo.
-	echo.Build finished; now you can run HTML Help Workshop with the ^
-.hhp project file in %BUILDDIR%/htmlhelp.
-	goto end
-)
-
-if "%1" == "qthelp" (
-	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
-	echo.
-	echo.Build finished; now you can run "qcollectiongenerator" with the ^
-.qhcp project file in %BUILDDIR%/qthelp, like this:
-	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\pytools.qhcp
-	echo.To view the help file:
-	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\pytools.ghc
-	goto end
-)
-
-if "%1" == "latex" (
-	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
-	echo.
-	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
-	goto end
-)
-
-if "%1" == "changes" (
-	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
-	echo.
-	echo.The overview file is in %BUILDDIR%/changes.
-	goto end
-)
-
-if "%1" == "linkcheck" (
-	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
-	echo.
-	echo.Link check complete; look for any errors in the above output ^
-or in %BUILDDIR%/linkcheck/output.txt.
-	goto end
-)
-
-if "%1" == "doctest" (
-	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
-	echo.
-	echo.Testing of doctests in the sources finished, look at the ^
-results in %BUILDDIR%/doctest/output.txt.
-	goto end
-)
-
-:end
diff --git a/required_pkgs/stsci.tools/doc/source/analysis.rst b/required_pkgs/stsci.tools/doc/source/analysis.rst
deleted file mode 100644
index 00d8e4a..0000000
--- a/required_pkgs/stsci.tools/doc/source/analysis.rst
+++ /dev/null
@@ -1,32 +0,0 @@
-**********************
-Data Analysis Routines
-**********************
-These modules provide basic data analysis or data fitting functionality.
-
-linefit
-=======
-.. automodule:: stsci.tools.linefit
-   :members:
-
-nmpfit
-======   
-.. automodule:: stsci.tools.nmpfit
-   :members:
-   
-xyinterp
-========
-.. automodule:: stsci.tools.xyinterp
-   :members:
-
-gfit
-====
-.. automodule:: stsci.tools.gfit
-   :members:
-   
-Image Combination Modules
-*************************
-The `numcombine` module serves as a limited replacement for IRAF's 'imcombine' task.
-
-.. automodule:: stsci.image.numcombine
-   :members:
-   :undoc-members:
diff --git a/required_pkgs/stsci.tools/doc/source/asnutil.rst b/required_pkgs/stsci.tools/doc/source/asnutil.rst
deleted file mode 100644
index 0c4e3b2..0000000
--- a/required_pkgs/stsci.tools/doc/source/asnutil.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-*******************************
-Association File Interpretation
-*******************************
-Association files serve as FITS tables which relate a set of input files to the generation of calibrated or combined products.  
-
-.. automodule:: stsci.tools.asnutil
-   :members:
-   :undoc-members:
-
diff --git a/required_pkgs/stsci.tools/doc/source/basicutils.rst b/required_pkgs/stsci.tools/doc/source/basicutils.rst
deleted file mode 100644
index 1b7fca5..0000000
--- a/required_pkgs/stsci.tools/doc/source/basicutils.rst
+++ /dev/null
@@ -1,26 +0,0 @@
-************************
-General Python Utilities
-************************
-The modules and functions described here provide support for many of the most general operations used through out STScI_Python. 
-
-.. automodule:: stsci.tools.fileutil
-   :members:
-   :undoc-members:
-
-.. automodule:: stsci.tools.parseinput
-   :members:
-   :undoc-members:
-   
-.. automodule:: stsci.tools.irafglob
-   :members:
-   :undoc-members:
-   
-
-STScI_Python Help Support
-*************************
-The `versioninfo` module reports the version information for a defined set of packages installed as part of STScI_Python which can be sent in as part of a help call.  This information can then be used to help identify what software has been installed so that the source of the reported problem can be more easily identified.
-   
-.. automodule:: stsci.tools.versioninfo
-   :members:
-   :undoc-members:
- 
diff --git a/required_pkgs/stsci.tools/doc/source/bitmask.rst b/required_pkgs/stsci.tools/doc/source/bitmask.rst
deleted file mode 100644
index 00be52c..0000000
--- a/required_pkgs/stsci.tools/doc/source/bitmask.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-*********************************************************
-Utility functions for handling bit masks and mask arrays.
-*********************************************************
-
-.. moduleauthor:: Mihai Cara <help at stsci.edu>
-
-.. currentmodule:: stsci.tools.bitmask
-
-.. automodule:: stsci.tools.bitmask
-   :members:
diff --git a/required_pkgs/stsci.tools/doc/source/conf.py b/required_pkgs/stsci.tools/doc/source/conf.py
deleted file mode 100644
index 008ac5a..0000000
--- a/required_pkgs/stsci.tools/doc/source/conf.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# stsci.tools documentation build configuration file, created by
-# sphinx-quickstart on Thu Oct  7 13:09:39 2010.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-from stsci.sphinxext.conf import *
-
-# Check Sphinx version
-import sphinx
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.append(os.path.abspath('.'))
-
-# -- General configuration -----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions += ['sphinx.ext.autodoc', 'sphinx.ext.pngmath','numpydoc',
-                'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
-                'sphinx.ext.autosummary',
-                'sphinx.ext.doctest']
-
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'stsci.tools'
-copyright = u'2010, SSB'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '2.9'
-# The full version, including alpha/beta/rc tags.
-release = '2.9'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-default_role = 'autolink'
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-add_function_parentheses = False
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-#html_theme = 'sphinxdoc'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-html_static_path = ['_static']
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_use_modindex = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'stsci.toolsdoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-  ('index', 'stsci.tools.tex', u'stsci.tools Documentation',
-   u'SSB', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_use_modindex = True
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/required_pkgs/stsci.tools/doc/source/convert.rst b/required_pkgs/stsci.tools/doc/source/convert.rst
deleted file mode 100644
index 325828b..0000000
--- a/required_pkgs/stsci.tools/doc/source/convert.rst
+++ /dev/null
@@ -1,26 +0,0 @@
-********************
-Conversion Utilities
-********************
-
-Convertwaiveredfits
-*******************
-.. automodule:: stsci.tools.convertwaiveredfits
-   :members:
-   :undoc-members:
-
-ReadGEIS
-********
-.. automodule:: stsci.tools.readgeis
-   :members:
-
-Check_files
-***********
-The `check_files` module provides functions to perform verification of
-input file formats for use in betadrizzle.  This set of functions also 
-includes format conversion functions to convert GEIS or waiver-FITS HST
-images into multi-extension FITS (MEF) files. 
-
-.. automodule:: stsci.tools.check_files
-   :members:
-   :undoc-members:
-   
diff --git a/required_pkgs/stsci.tools/doc/source/fitsdiff.rst b/required_pkgs/stsci.tools/doc/source/fitsdiff.rst
deleted file mode 100644
index 8119681..0000000
--- a/required_pkgs/stsci.tools/doc/source/fitsdiff.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-********
-FITSDIFF
-********
-This module serves as a large library of helpful file operations, both for 
-I/O of files and to extract information about the files. 
-
-.. automodule:: stsci.tools.fitsdiff
-   :members:
-
diff --git a/required_pkgs/stsci.tools/doc/source/fitsutil.rst b/required_pkgs/stsci.tools/doc/source/fitsutil.rst
deleted file mode 100644
index 9fcab61..0000000
--- a/required_pkgs/stsci.tools/doc/source/fitsutil.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-********************
-FITS/Image Utilities
-********************
-These modules provide support for working with FITS images, WCS information, or conversion of images to FITS format.  
-
-.. toctree::
-   :maxdepth: 3
-   
-   stpyfits
-   fitsdiff
-   wcsutil
-   convert
-   asnutil
-
diff --git a/required_pkgs/stsci.tools/doc/source/imgutils.rst b/required_pkgs/stsci.tools/doc/source/imgutils.rst
deleted file mode 100644
index c752138..0000000
--- a/required_pkgs/stsci.tools/doc/source/imgutils.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-********************
-Image Access Modules
-********************
-These modules all provide the capability to access sections of a FITS image 
-using a scrolling buffer. 
-   
-.. automodule:: stsci.tools.imageiter
-   :members:
-   
-.. automodule:: stsci.tools.nimageiter
-   :members:
-   
-.. automodule:: stsci.tools.iterfile
-   :members:
-   
diff --git a/required_pkgs/stsci.tools/doc/source/index.rst b/required_pkgs/stsci.tools/doc/source/index.rst
deleted file mode 100644
index a701baf..0000000
--- a/required_pkgs/stsci.tools/doc/source/index.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-.. stsci.tools documentation master file, created by
-   sphinx-quickstart on Thu Oct  7 13:09:39 2010.
-   You can adapt this file completely to your liking, but it should at least
-   contain the root `toctree` directive.
-
-Welcome to stsci.tools's documentation!
-=======================================
-
-The STSCI.TOOLS package in STScI_Python provides many functions for use by multiple software packages.
-
-Contents:
-
-.. toctree::
-   :maxdepth: 2
-
-   basicutils
-   fitsutil
-   imgutils
-   analysis
-   bitmask
-
-Building a TEAL Interface for Tasks
------------------------------------
-.. toctree::
-
-   teal_guide
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-
diff --git a/required_pkgs/stsci.tools/doc/source/stpyfits.rst b/required_pkgs/stsci.tools/doc/source/stpyfits.rst
deleted file mode 100644
index 876c1ae..0000000
--- a/required_pkgs/stsci.tools/doc/source/stpyfits.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-********
-STPyFITS
-********
-The `stpyfits` module serves as a layer on top of PyFITS to support the use of single-valued arrays in extensions using the NPIX/PIXVALUE convention developed at STScI. The standard PyFITS module implements the strict FITS conventions, and these single-valued arrays are not part of the FITS standard.
-
-.. automodule:: stsci.tools.stpyfits
-   :members:
-   :undoc-members:
-   :show-inheritance:
-
diff --git a/required_pkgs/stsci.tools/doc/source/teal_guide.rst b/required_pkgs/stsci.tools/doc/source/teal_guide.rst
deleted file mode 100644
index bcab4c7..0000000
--- a/required_pkgs/stsci.tools/doc/source/teal_guide.rst
+++ /dev/null
@@ -1,356 +0,0 @@
-=====================================
-Cookbook for Building TEAL Interfaces
-=====================================
-
-.. abstract::
-   :author: Warren J. Hack, Chris Sontag, Pey Lian Lim
-   :date: 30 January 2014
-
-   The release of the Task Editor And Launcher(TEAL) with STScI_Python
-   v2.10 in June 2010 provided the tools for building powerful GUI
-   interfaces for editing the parameters of complex tasks and running those
-   tasks with minimal effort. Learning how to use something new always
-   takes a special effort, and this document provides a step-by-step
-   walkthrough of how to build TEAL interfaces for any Python task to
-   make this effort as easy as possible.
-
-------------
-Introduction
-------------
-
-The new TEAL GUI can be added to nearly any Python task that allows users to set parameters to control the operation of the task. Adding a TEAL interface to a Python task requires some minor updates to the task's code in order to allow TEAL to create and control the GUI for setting all the necessary parameters. TEAL itself relies on the `ConfigObj module`_ for the basic parameter handling functions, with additional commands for implementing enhanced logic for controlling the GUI itself b [...]
-
-This document does not assume the user has any familiarity with using configobj in any manner and as as result includes very basic information which developers with some experience with configobj can simply skip over.
-
-The development of the TEAL interface for the task `resetbits` in the `betadrizzle` package is used as an example.  More elaborate examples will be explained after the development of the TEAL interface for `resetbits` has been described.
-
-----------------------
-Building the Interface
-----------------------
-
-The order of operations provided by this document is not the only order in which these steps can be performed.  This order starts with the simplest operation then leads the developer into what needs to be done next with the least amount of iteration.
-
-
-Step 1: Defining the Parameters
-===============================
-
-The primary purpose for developing a TEAL interface is to provide a GUI which can be used to set the values for the task's parameters. This requires that the developer identify the full set of task parameters which the user will be required to provide when running the task. The signature for the task `resetbits` is::
-
-    def reset_dq_bits(input,bits,extver=None,extname='dq')
-
-These parameters now have to be described in a pair of configobj parameter files in order to define the parameters, their types and any validation that may need to be performed on the input values.
-
-Default Values for the Parameters
----------------------------------
-The first file which needs to be defined provides the default values for each parameter.  Default values can be any string or numerical value, including "" or None.
-
-This task will simply need::
-
-    _task_name_ = resetbits
-    input = "*flt.fits"
-    bits = 4096
-    extver = None
-    extname = "dq"
-
-The first line tells TEAL what task should be associated with this file. The default values for `extver` and `extname` simply match the defaults provided in the function signature. No default values were required for the other parameters, but these values were provided to support the most common usage of this task.
-
-This file needs to be saved with a filename extension of `.cfg` in a `pars/` subdirectory of the task's package. For `resetbits`, this file would be saved in the installation directory as the file::
-
-    betadrizzle/lib/pars/resetbits.cfg
-
-This file will then get installed in the directory `betadrizzle/pars/resetbits.cfg` with the instructions on how to set that up coming in the last step of this process.
-
-Parameter Validation Rules
---------------------------
-The type for the parameter values, along with the definition of any range of valid values, is defined in the second configobj file: the configobj specification (configspec) file or `cfgspc` file.  This file can also provide rules for how the GUI should respond to input values as well, turning the TEAL GUI into an active assistant for the user when editing large or complex sets of parameters.
-
-For this example, we have a very basic set of parameters to define without any advance logic required. TEAL provides validators for a wide range of parameter types, including:
-
-  * `strings`: matches any string
-        Defined using `string_kw()`
-  * `integer`: matches any integer when a value is always required
-        Defined using `integer_kw()`
-  * `integer` or `None`: matches any integer or a value of None
-        Defined using `integer_or_none_kw()`
-  * `float`: matches any floating point value, when a value is always required
-        Defined using  `float_kw()`
-  * `float` or `None`: matches any floating point value or a value of None
-        Defined using `float_or_none_kw()`
-  * `boolean`: matches boolean values - ``True`` or ``False``
-        Defined using `boolean_kw()`
-  * `option`: matches only those values provided in the list of valid options
-        Defined using `option_kw()` command with the list of valid values as a parameter
-
-ConfigObj also has support for IP addresses as input parameters, and lists or tuples of any of these basic parameter types. Information on how to use those types, though, can be found within the `ConfigObj module`_ documentation.
-
-With these available parameter types in mind, the parameters for the task can be defined in the configspec file. For the `resetbits` task, we would need::
-
-    _task_name_ = string_kw(default="resetbits")
-    input = string_kw(default="*flt.fits", comment="Input files (name, suffix, or @list)")
-    bits = integer_kw(default=4096, comment="Bit value in array to be reset to 0")
-    extver = integer_or_none_kw(default=None, comment="EXTVER for arrays to be reset")
-    extname = string_kw(default="dq", comment= "EXTNAME for arrays to be reset")
-    mode = string_kw(default="all")
-
-Each of these parameter types includes a description of the parameter as the `comment` parameter, while default values can also be set using the `default` parameter value. This configspec file would then need to be saved alongside the .cfg file we just created as::
-
-    betadrizzle/lib/pars/resetbits.cfgspc
-
-.. note:: If you find that you need or want to add logic to have the GUI respond to various parameter inputs, this can always be added later by updating the parameter definitions in this file.  A more advanced example demonstrating how this can be done is provided in later sections.
-
-
-Step 2: TEAL Functions for the Task
-===================================
-TEAL requires that a couple of functions be defined within the task in order for the GUI to know how to get the help for the task and to run the task.  The functions that need to be defined are:
-
-  * ``run(configObj)``
-      This function serves as the hook to allow the GUI to run the task
-  * ``getHelpAsString()``
-      This function returns a long string which provides the help for the task
-
-The sole input from TEAL will be a ConfigObj instance, a class which provides all the input parameters and their values after validation by the configobj validators.  This instance gets passed by TEAL to the task's ``run()`` function and needs to be interpreted by that function in order to run the task.
-
-.. note:: The ``run()`` and ``getHelpAsString()`` functions, along with the task's primary user interface function, all need to be in the module with the same name as the task, as TEAL finds the task by importing the taskname. Or, these two functions may instead be arranged as methods of a task class, if desired.
-
-Defining the Help String
-------------------------
-The help information presented by the TEAL GUI comes from the ``getHelpAsString()`` function and gets displayed in a simple ASCII window.  The definition of this function can rely on help information included in the source code as docstrings or from an entirely separate file for tasks which have a large number of parameters or require long explanations to understand how to use the task.  The example from the `resetbits` task was simply::
-
-    def getHelpAsString():
-        helpString = 'resetbits Version '+__version__+__vdate__+'\n'
-        helpString += __doc__+'\n'
-
-        return helpString
-
-This function simply relies on the module level docstring to describe how to use this task, since it is a simple enough task with only a small number of parameters.
-
-.. note:: The formatting for the docstrings or help files read in by this function can use the numpy documentation restructured text markup format to be compatible with Sphinx when automatically generating documentation on this task using Sphinx. The numpy extension results in simple enough formatting that works well in the TEAL Help window without requiring any translation. More information on this format can be found in the `Numpy Documentation`_ pages.
-
-More complex tasks may require the documentation which would be too long to comfortably fit within docstrings in the code itself.  In those cases, separate files with extended discussions formatted using the numpy restructured text (reST) markup can be written and saved using the naming convention of ```<taskname>.help``` in the same directory as the module. The function can then simply use Python file operations to read it in as a list of strings which are concatenated together and pass [...]
-
-    def getHelpAsString():
-        helpString = 'resetbits Version '+__version__+__vdate__+'\n'
-        helpString += __doc__+'\n'
-        helpString += teal.getHelpFileAsString(__taskname__,__file__)
-
-        return helpString
-
-The parameter ``__taskname__`` should already have been defined for the task and gets used to find the file on disk with the name ``__taskname__.help``. The parameter ``__file__`` specifies where the task's module has been installed with the assumption that the help file has been installed in the same directory.  The task `betadrizzle` uses separate files and can be used as an example of how this can be implemented.
-
-Defining How to Run the Task
-----------------------------
-The ConfigObj instance passed by TEAL into the module needs to be interpreted and used to run the application.  There are a couple of different models which can be used to define the interface between the ``run()`` function and the task's primary user interface function (i.e. how it would be called in a script).
-
-  #. The ``run()`` function interprets the ConfigObj instance and calls the user interface
-     function. This works well for tasks which have a small number of parameters.
-
-  #. The ``run()`` function serves as the primary driver for the task and a separate function
-     gets defined to provide a simpler interface for the user to call interactively. This
-     works well for tasks which have a large number of parameters or sets of parameters
-     defined in the ConfigObj interface.
-
-Our simple example for the task ``resetbits`` uses the first model, since it only has the 4 parameters as input. The ``run()`` function can simply be defined in this case as::
-
-    def run(configobj=None):
-        ''' Teal interface for running this code. '''
-
-        reset_dq_bits(configobj['input'],configobj['bits'],
-                      extver=configobj['extver'],extname=configobj['extname'])
-
-    def reset_dq_bits(input,bits,extver=None,extname='dq'):
-
-Interactive use of this function would use the function ``reset_dq_bits()``.  The TEAL interface would pass the parameter values in through the run function to parse out the parameters and send it to that same function as if it were run interactively.
-
-
-Step 3: Advertising TEAL-enabled Tasks
-======================================
-Any task which has a TEAL interface implemented can be advertised to users of the package through the use of a ``teal`` function: ``teal.print_tasknames()``.  This function call can be added to the package's `__init__.py` module so that everytime the package gets imported, or reloaded, interactively, it will print out a message listing all the tasks which have TEAL GUI's available for use.  This listing will not be printed out when importing the package from another task.  The `__init__. [...]
-
-    # These lines allow TEAL to print out the names of TEAL-enabled tasks
-    # upon importing this package.
-    from stsci.tools import teal
-    teal.print_tasknames(__name__, os.path.dirname(__file__))
-
-
-Step 4: Setting Up Installation
-===============================
-The additional files which have been added to the package with the task now need to be installed alongside the module for the task.  Packages in the `STScI_Python` release get installed using Python's `distutils` mechanisms defined through the ``defsetup.py`` module. This file includes a dictionary for `setupargs` that describe the package and the files which need to be installed.  This needs to be updated to include all the new files as ``data_files`` by adding the following line to the [...]
-
-  'data_files':  [(pkg+"/pars",['lib/pars/*']),( pkg, ['lib/*.help'])],
-
-This will add the ConfigObj files in the `pars/` directory to the package while copying any ``.help`` files that were added to the same directory as the module.
-
-
-Step 5: Testing the GUI
-=======================
-Upon installing the new code, the TEAL interface will be available for the task.  There are a couple of ways of starting the GUI along with a way to grab the ConfigObj instance directly without starting up the GUI at all.
-
-Running the GUI under PYRAF
----------------------------
-The TEAL GUI can be started under PYRAF as if it were a standard IRAF task with the syntax::
-
-    >>> import <package>
-    >>> epar <taskname>
-
-For example, our task ``resetbits`` was installed as part of the ``betadrizzle`` package, so we could start the GUI using::
-
-    >>> import betadrizzle
-    >>> epar resetbits
-
-The fact that this task has a valid TEAL interface can be verified by insuring that the taskname gets printed out after the `import` statement.
-
-Running the GUI using Python
-----------------------------
-Fundamentally, TEAL is a Python GUI that can be run interactively under any Python interpreter, not just PyRAF.  It can be called for our example task using the syntax::
-
-    >>> from stsci.tools import teal
-    >>> cobj = teal.teal('resetbits')
-
-Getting the ConfigObj Without Starting the GUI
-----------------------------------------------
-The function for starting the TEAL GUI, ``teal.teal()``, has a parameter to control whether or not to start the GUI at all.  The ConfigObj instance can be returned for the task without starting the GUI by using the `loadOnly` parameter. For our example task, we would use the command::
-
-    >>> cobj = teal.teal('resetbits',loadOnly=True)
-
-The output variable `cobj` can then be passed along or examined depending on what needs to be done at the time.
-
----------------
-Advanced Topics
----------------
-The topics presented here describe how to take advantage of some of TEAL's more advanced functions for controlling the behavior of the GUI and for working with complex sets of parameters.
-
-Most of the examples for these advanced topics use the ConfgObj files and code defined for betadrizzle.
-
-
-Parameter Sections
-==================
-The ConfigObj specification allows for parameters to be organized into sections of related parameters.  The parameters defined in these sections remain together in a single dictionary within the ConfigObj instance so that they can be passed into tasks or interpreted as a single unit.  Use of sections within TEAL provides for the opportunity to control the GUI's behaviors based on whether or not the parameters in a given section need to be edited by the user.
-
-A parameter section can be defined simply by providing a title using the following syntax in both the .cfg and .cfgspc files::
-
-    [<title>]
-
-In betadrizzle, multiple sections are defined within the parameter interface.  One section has been defined in the .cfg file as::
-
-    [STEP 1: STATIC MASK]
-    static = True
-    static_sig = 4.0
-
-The .cfgspc definition for this section was specified as::
-
-    [STEP 1: STATIC MASK ]
-    static = boolean_kw(default=True, triggers='_section_switch_', comment="Create static bad-pixel mask from the data?")
-    static_sig = float_kw(default=4.0, comment= "Sigma*rms below mode to clip for static mask")
-
-These two sets of definitions work together to define the 'STEP 1: STATIC MASK' parameter section within the ConfigObj instance.  A program can then access the parameters in that section using the name of the section as the index in the ConfigObj instance.  The `static` and `static_sig` parameters would be accessed as::
-
-     >>> cobj = teal.teal('betadrizzle',loadOnly=True)
-     >>> step1 = cobj['STEP 1: STATIC MASK']
-     >>> step1
-     {'static': True, 'static_sig': 4.0}
-     >>> step1['static']
-     True
-
-
-Section Triggers
-================
-The behavior of the TEAL GUI can be controlled for each section in a number of ways, primarily as variations on the behavior of turning off the ability to edit the parameters in a section based on another parameters value.  A section parameter can be defined to allow the user to explicitly specify whether or not they need to work with those parameters.  This can the control whether or not the remainder of the parameters are editable through the use of the `triggers` argument in the .cfgs [...]
-
-The supported values for the `triggers` argument currently understood by TEAL are:
-
-    * ``_section_switch_``: Activates/Deactivates the ability to edit the values of the parameters in this section
-    * ``_rule<#>_``: Runs the code in this rule (defined elsewhere in the .cfgspc file) to automatically set this parameter, and control the behavior of other parameters like section defintions as well.
-
-The example for defining the section 'STEP 1: STATIC MASK' illustrates how to use the ``_section_switch_`` trigger to control the editing of the parameters in that section.
-
-Another argument defined as ``is_set_by="_rule<#>"`` allows the user to define when this section trigger can be set by other parameters using code and logic provided by the user. The value, ``_rule<#>_`` refers to code in the specified rule (defined at the end of the `.cfgspc` file) to determine what to do. The code which will be run must be found in the configspec file itself, although that code could reference other packages which are already installed.
-
-Use of Rules
-------------
-A special section can be appended to the end of the ConfigObj files (.cfg and .cfgspc files) to define rules which can implement nearly arbitrary code to determine how the GUI should treat parameter sections or even individual parameter settings. The return value for a rule should always be a boolean value that can be used in the logic of setting parameter values.
-
-This capability has been implemented in `betadrizzle` to control whether or not whole sections of parameters are even editable (used) to safeguard the user from performing steps which need more than 1 input when only 1 input is provided. The use of the ``_rule<#>_`` trigger can be seen in the `betadrizzle` .cfgspc file::
-
-    _task_name_ = string_kw(default="betadrizzle")
-    input = string_kw(default="*flt.fits", triggers='_rule1_', comment="Input files (name, suffix, or @list)")
-
-    <other parameters removed...>
-
-    [STEP 3: DRIZZLE SEPARATE IMAGES]
-    driz_separate = boolean_kw(default=True, triggers='_section_switch_', is_set_by='_rule1_', comment= "Drizzle onto separate output images?")
-    driz_sep_outnx = float_or_none_kw(default=None, comment="Size of separate output frame's X-axis (pixels)" )
-
-    <more parameters removed, until we get to the end of the file...>
-
-    [ _RULES_ ]
-    _rule1_ = string_kw(default='', when='defaults,entry', code='from stsci.tools import check_files; ans={ True:"yes",False:"no"}; OUT = ans[check_files.countInput(VAL) > 1]')
-
-In this case, ``_rule1_`` gets defined in the special parameter section ``[_RULES_]`` and triggered upon the editing of the parameter ``input``.  The result of this logic will then automatically set the value of any section parameter with the ``is_set_by=_rule1_`` argument, such as the parameter ``driz_separate`` in the section ``[STEP 3: DRIZZLE SEPARATE IMAGES]``
-
-The rule is executed within Python via two reserved words: ``VAL``, and ``OUT``.  ``VAL`` is automatically set to the value of the parameter which was used to trigger the execution of the rule, right before the rule is executed.  ``OUT`` will be the outcome of the rule code - the way it returns data to the rule execution machinery without calling a Python `return`.
-
-For the rule itself, one can optionally state (via the ``when`` argument) when the rule will be evaluated.  The currently supported options for the ``when`` argument (used for rules only) are:
-
-   * ``init``: Evaluate the rule upon starting the GUI
-   * ``defaults``: Evaluate the rule when the parameter value changes because the user clicked the "Defaults" button
-   * ``entry``: Evaluate the rule any time the value is changed in the GUI by the user manually
-   * ``fopen``: Evaluate the rule any time a saved file is opened by the user, changing the value
-   * ``always``: Evaluate the rule under any of these circumstances
-
-These options can be provided as a comma-separated list for combinations, although care should be taken to avoid any logic problems for when the rule gets evaluated.  If a ``when`` argument is not supplied, the value of ``always`` is assumed.
-
-Tricky Rules
-------------
-A parameter can also be controlled by multiple other parameters using the same
-rule. The example below shows how to get ``par1`` to be grayed out if
-``do_step1`` and ``do_step2`` are both disabled.
-
-In the .cfgspc file::
-
-    _task_name_ = string_kw(default="mytask")
-    par1 = string_kw(default="", active_if="_rule1_", comment="Shared parameter")
-
-    <other parameters removed...>
-
-    [STEP 1: FOO]
-    do_step1 = boolean_kw(default=True, triggers='_section_switch_', triggers='_rule1_', comment="Do Step 1?")
-
-    <other parameters removed...>
-
-    [STEP 2: BAR]
-    do_step2 = boolean_kw(default=True, triggers='_section_switch_', triggers='_rule1_', comment="Do Step 2?")
-
-    <more parameters removed, until we get to the end of the file...>
-
-    [ _RULES_ ]
-
-    _rule1_ = string_kw(default='', code='import mytask; OUT = mytask.tricky_rule(NAME, VAL)')
-
-In mytask.py file::
-
-    MY_FLAGS = {'do_step1': 'yes', 'do_step2': 'yes'}
-
-    def tricky_rule(in_name, in_val):
-        global MY_FLAGS
-        MY_FLAGS[in_name] = in_val
-        if MY_FLAGS['do_step1'] == 'yes' or MY_FLAGS['do_step2'] == 'yes':
-            ans = True
-        else:
-            ans = False
-        return ans
-
-For the rule itself, each rule has access to:
-
-    * ``SCOPE``
-    * ``NAME`` - Parameter name.
-    * ``VAL`` - Parameter value.
-    * ``TEAL`` - Reference to the main TEAL object, which knows the value
-      of all of its parameters. However, ``TEAL.getValue(NAME)`` returns
-      its value *before* it is updated.
-
-To debug your tricky rule, you can add print-out lines to your rule. TEAL log
-under ``Help`` menu also shows you what it is doing.
-
-
-.. _`ConfigObj module`: http://www.voidspace.org.uk/python/configobj.html
-.. _`Numpy Documentation`: http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines
diff --git a/required_pkgs/stsci.tools/doc/source/wcsutil.rst b/required_pkgs/stsci.tools/doc/source/wcsutil.rst
deleted file mode 100644
index 082e75e..0000000
--- a/required_pkgs/stsci.tools/doc/source/wcsutil.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-*******
-WCSUTIL
-*******
-The `wcsutil` module provides a stand-alone implementation of a WCS object which provides a number of basic transformations and query methods.  Most (if not all) of these functions can be obtained from the use of the PyWCS or STWCS WCS object if those packages have been installed.
-
-.. automodule:: stsci.tools.wcsutil
-   :members:
-   :undoc-members:
diff --git a/required_pkgs/stsci.tools/ez_setup.py b/required_pkgs/stsci.tools/ez_setup.py
deleted file mode 100644
index 23ea9a2..0000000
--- a/required_pkgs/stsci.tools/ez_setup.py
+++ /dev/null
@@ -1,332 +0,0 @@
-#!/usr/bin/env python
-"""Bootstrap setuptools installation
-
-To use setuptools in your package's setup.py, include this
-file in the same directory and add this to the top of your setup.py::
-
-    from ez_setup import use_setuptools
-    use_setuptools()
-
-To require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, simply supply
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import shutil
-import sys
-import tempfile
-import zipfile
-import optparse
-import subprocess
-import platform
-import textwrap
-import contextlib
-
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-DEFAULT_VERSION = "3.5.1"
-DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
-
-def _python_cmd(*args):
-    """
-    Return True if the command succeeded.
-    """
-    args = (sys.executable,) + args
-    return subprocess.call(args) == 0
-
-
-def _install(archive_filename, install_args=()):
-    with archive_context(archive_filename):
-        # installing
-        log.warn('Installing Setuptools')
-        if not _python_cmd('setup.py', 'install', *install_args):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-            # exitcode will be 2
-            return 2
-
-
-def _build_egg(egg, archive_filename, to_dir):
-    with archive_context(archive_filename):
-        # building an egg
-        log.warn('Building a Setuptools egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def get_zip_class():
-    """
-    Supplement ZipFile class to support context manager for Python 2.6
-    """
-    class ContextualZipFile(zipfile.ZipFile):
-        def __enter__(self):
-            return self
-        def __exit__(self, type, value, traceback):
-            self.close
-    return zipfile.ZipFile if hasattr(zipfile.ZipFile, '__exit__') else \
-        ContextualZipFile
-
-
- at contextlib.contextmanager
-def archive_context(filename):
-    # extracting the archive
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        with get_zip_class()(filename) as archive:
-            archive.extractall()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-        yield
-
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        archive = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, archive, to_dir)
-    sys.path.insert(0, egg)
-
-    # Remove previously-imported pkg_resources if present (see
-    # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
-    if 'pkg_resources' in sys.modules:
-        del sys.modules['pkg_resources']
-
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-        to_dir=os.curdir, download_delay=15):
-    to_dir = os.path.abspath(to_dir)
-    rep_modules = 'pkg_resources', 'setuptools'
-    imported = set(sys.modules).intersection(rep_modules)
-    try:
-        import pkg_resources
-    except ImportError:
-        return _do_download(version, download_base, to_dir, download_delay)
-    try:
-        pkg_resources.require("setuptools>=" + version)
-        return
-    except pkg_resources.DistributionNotFound:
-        return _do_download(version, download_base, to_dir, download_delay)
-    except pkg_resources.VersionConflict as VC_err:
-        if imported:
-            msg = textwrap.dedent("""
-                The required version of setuptools (>={version}) is not available,
-                and can't be installed while this script is running. Please
-                install a more recent version first, using
-                'easy_install -U setuptools'.
-
-                (Currently using {VC_err.args[0]!r})
-                """).format(VC_err=VC_err, version=version)
-            sys.stderr.write(msg)
-            sys.exit(2)
-
-        # otherwise, reload ok
-        del pkg_resources, sys.modules['pkg_resources']
-        return _do_download(version, download_base, to_dir, download_delay)
-
-def _clean_check(cmd, target):
-    """
-    Run the command to download target. If the command fails, clean up before
-    re-raising the error.
-    """
-    try:
-        subprocess.check_call(cmd)
-    except subprocess.CalledProcessError:
-        if os.access(target, os.F_OK):
-            os.unlink(target)
-        raise
-
-def download_file_powershell(url, target):
-    """
-    Download the file at url to target using Powershell (which will validate
-    trust). Raise an exception if the command cannot complete.
-    """
-    target = os.path.abspath(target)
-    cmd = [
-        'powershell',
-        '-Command',
-        "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
-    ]
-    _clean_check(cmd, target)
-
-def has_powershell():
-    if platform.system() != 'Windows':
-        return False
-    cmd = ['powershell', '-Command', 'echo test']
-    devnull = open(os.path.devnull, 'wb')
-    try:
-        try:
-            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
-        except Exception:
-            return False
-    finally:
-        devnull.close()
-    return True
-
-download_file_powershell.viable = has_powershell
-
-def download_file_curl(url, target):
-    cmd = ['curl', url, '--silent', '--output', target]
-    _clean_check(cmd, target)
-
-def has_curl():
-    cmd = ['curl', '--version']
-    devnull = open(os.path.devnull, 'wb')
-    try:
-        try:
-            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
-        except Exception:
-            return False
-    finally:
-        devnull.close()
-    return True
-
-download_file_curl.viable = has_curl
-
-def download_file_wget(url, target):
-    cmd = ['wget', url, '--quiet', '--output-document', target]
-    _clean_check(cmd, target)
-
-def has_wget():
-    cmd = ['wget', '--version']
-    devnull = open(os.path.devnull, 'wb')
-    try:
-        try:
-            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
-        except Exception:
-            return False
-    finally:
-        devnull.close()
-    return True
-
-download_file_wget.viable = has_wget
-
-def download_file_insecure(url, target):
-    """
-    Use Python to download the file, even though it cannot authenticate the
-    connection.
-    """
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    src = dst = None
-    try:
-        src = urlopen(url)
-        # Read/write all in one block, so we don't create a corrupt file
-        # if the download is interrupted.
-        data = src.read()
-        dst = open(target, "wb")
-        dst.write(data)
-    finally:
-        if src:
-            src.close()
-        if dst:
-            dst.close()
-
-download_file_insecure.viable = lambda: True
-
-def get_best_downloader():
-    downloaders = [
-        download_file_powershell,
-        download_file_curl,
-        download_file_wget,
-        download_file_insecure,
-    ]
-
-    for dl in downloaders:
-        if dl.viable():
-            return dl
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-        to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
-    """
-    Download setuptools from a specified location and return its filename
-
-    `version` should be a valid setuptools version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-
-    ``downloader_factory`` should be a function taking no arguments and
-    returning a function for downloading a URL to a target.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    zip_name = "setuptools-%s.zip" % version
-    url = download_base + zip_name
-    saveto = os.path.join(to_dir, zip_name)
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        log.warn("Downloading %s", url)
-        downloader = downloader_factory()
-        downloader(url, saveto)
-    return os.path.realpath(saveto)
-
-def _build_install_args(options):
-    """
-    Build the arguments to 'python setup.py install' on the setuptools package
-    """
-    return ['--user'] if options.user_install else []
-
-def _parse_args():
-    """
-    Parse the command line for options
-    """
-    parser = optparse.OptionParser()
-    parser.add_option(
-        '--user', dest='user_install', action='store_true', default=False,
-        help='install in user site package (requires Python 2.6 or later)')
-    parser.add_option(
-        '--download-base', dest='download_base', metavar="URL",
-        default=DEFAULT_URL,
-        help='alternative URL from where to download the setuptools package')
-    parser.add_option(
-        '--insecure', dest='downloader_factory', action='store_const',
-        const=lambda: download_file_insecure, default=get_best_downloader,
-        help='Use internal, non-validating downloader'
-    )
-    parser.add_option(
-        '--version', help="Specify which version to download",
-        default=DEFAULT_VERSION,
-    )
-    options, args = parser.parse_args()
-    # positional arguments are ignored
-    return options
-
-def main():
-    """Install or upgrade setuptools and EasyInstall"""
-    options = _parse_args()
-    archive = download_setuptools(
-        version=options.version,
-        download_base=options.download_base,
-        downloader_factory=options.downloader_factory,
-    )
-    return _install(archive, _build_install_args(options))
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/required_pkgs/stsci.tools/lib/stsci/__init__.py b/required_pkgs/stsci.tools/lib/stsci/__init__.py
deleted file mode 100644
index e6e3521..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-try:
-    # As long as we're using setuptools/distribute, we need to do this the
-    # setuptools way or else pkg_resources will throw up unncessary and
-    # annoying warnings (even though the namespace mechanism will still
-    # otherwise work without it).
-    # Get rid of this as soon as setuptools/distribute is dead.
-    __import__('pkg_resources').declare_namespace(__name__)
-except ImportError:
-    pass
-__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/__init__.py b/required_pkgs/stsci.tools/lib/stsci/tools/__init__.py
deleted file mode 100644
index 7defa85..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from __future__ import division  # confidence high
-
-from .version import *
-
-import stsci.tools.tester
-def test(*args,**kwds):
-    stsci.tools.tester.test(modname=__name__, *args, **kwds)
-
-
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/alert.py b/required_pkgs/stsci.tools/lib/stsci/tools/alert.py
deleted file mode 100644
index f66d2dc..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/alert.py
+++ /dev/null
@@ -1,133 +0,0 @@
-####
-#       Class AlertDialog
-#
-#       Purpose
-#       -------
-#
-#       AlertDialog's are widgets that allow one to pop up warnings, one line
-#       questions etc. They return a set of standard action numbers being :-
-#       0 => Cancel was pressed
-#       1 => Yes was pressed
-#       2 => No was pressed
-#
-#       Standard Usage
-#       --------------
-#
-#       F = AlertDialog(widget, message)
-#       action = F.Show()
-####
-"""
-$Id: alert.py 38909 2015-04-08 17:41:07Z bsimon $
-"""
-
-from __future__ import absolute_import, division # confidence high
-
-from .dialog import *
-
-class AlertDialog(ModalDialog):
-
-    def __init__(self, widget, msg):
-        self.widget = widget
-        self.msgString = msg
-        Dialog.__init__(self, widget)
-
-    def SetupDialog(self):
-        upperFrame = Frame(self.top)
-        upperFrame['relief'] = 'raised'
-        upperFrame['bd']         = 1
-        upperFrame.pack({'expand':'yes', 'side':'top', 'fill':'both' })
-        self.bitmap = Label(upperFrame)
-        self.bitmap.pack({'side':'left'})
-        msgList = self.msgString.split("\n")
-        for i in range(len(msgList)):
-            msgText = Label(upperFrame)
-            msgText["text"]   = msgList[i]
-            msgText.pack({'expand':'yes', 'side':'top', 'anchor':'nw',
-                    'fill':'x' })
-        self.lowerFrame = Frame(self.top)
-        self.lowerFrame['relief'] = 'raised'
-        self.lowerFrame['bd']    = 1
-        self.lowerFrame.pack({'expand':'yes', 'side':'top', 'pady':'2',
-                'fill':'both' })
-
-    def OkPressed(self):
-        self.TerminateDialog(1)
-
-    def CancelPressed(self):
-        self.TerminateDialog(0)
-
-    def NoPressed(self):
-        self.TerminateDialog(2)
-
-    def CreateButton(self, text, command):
-        self.button = Button(self.lowerFrame)
-        self.button["text"]       = text
-        self.button["command"]   = command
-        self.button.pack({'expand':'yes', 'pady':'2', 'side':'left'})
-
-####
-#       Class ErrorDialog
-#
-#       Purpose
-#       -------
-#
-#       To pop up an error message
-####
-
-class ErrorDialog(AlertDialog):
-
-    def SetupDialog(self):
-        AlertDialog.SetupDialog(self)
-        self.bitmap['bitmap'] = 'error'
-        self.CreateButton("OK", self.OkPressed)
-
-####
-#       Class WarningDialog
-#
-#       Purpose
-#       -------
-#
-#       To pop up a warning message.
-####
-
-class WarningDialog(AlertDialog):
-
-    def SetupDialog(self):
-        AlertDialog.SetupDialog(self)
-        self.bitmap['bitmap'] = 'warning'
-        self.CreateButton("Yes", self.OkPressed)
-        self.CreateButton("No", self.CancelPressed)
-
-####
-#       Class QuestionDialog
-#
-#       Purpose
-#       -------
-#
-#       To pop up a simple question
-####
-
-class QuestionDialog(AlertDialog):
-
-    def SetupDialog(self):
-        AlertDialog.SetupDialog(self)
-        self.bitmap['bitmap'] = 'question'
-        self.CreateButton("Yes", self.OkPressed)
-        self.CreateButton("No", self.NoPressed)
-        self.CreateButton("Cancel", self.CancelPressed)
-
-####
-#       Class MessageDialog
-#
-#       Purpose
-#       -------
-#
-#       To pop up a message.
-####
-
-class MessageDialog(AlertDialog):
-
-    def SetupDialog(self):
-        AlertDialog.SetupDialog(self)
-        self.bitmap['bitmap'] = 'warning'
-        self.CreateButton("Dismiss", self.CancelPressed)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/asnutil.py b/required_pkgs/stsci.tools/lib/stsci/tools/asnutil.py
deleted file mode 100644
index 9c0f1a8..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/asnutil.py
+++ /dev/null
@@ -1,698 +0,0 @@
-"""
-A module which provides utilities for reading, writing, creating and updating
-association tables and shift files.
-
-:author: Warren Hack, Nadia Dencheva
-:version: '0.1 (2008-01-03)'
-"""
-
-from __future__ import absolute_import, division, print_function # confidence high
-
-from . import fileutil as fu
-from . import wcsutil
-import astropy
-from astropy.io import fits
-import numpy as N
-import os.path, time
-
-
-__version__ = '0.2(2015-06-23)'
-
-
-def readASNTable(fname, output=None, prodonly=False):
-    """
-    Given a fits filename repesenting an association table reads in the table as a
-    dictionary which can be used by pydrizzle and multidrizzle.
-
-    An association table is a FITS binary table with 2 required columns: 'MEMNAME',
-    'MEMTYPE'. It checks 'MEMPRSNT' column and removes all files for which its value is 'no'.
-
-    Parameters
-    ----------
-    fname : str
-        name of association table
-    output : str
-        name of output product - if not specified by the user,
-        the first PROD-DTH name is used if present,
-        if not, the first PROD-RPT name is used if present,
-        if not, the rootname of the input association table is used.
-    prodonly : bool
-        what files should be considered as input
-        if True - select only MEMTYPE=PROD* as input
-        if False - select only MEMTYPE=EXP as input
-
-    Returns
-    -------
-    asndict : dict
-        A dictionary-like object with all the association information.
-
-    Examples
-    --------
-    An association table can be read from a file using the following commands::
-
-    >>> from stsci.tools import asnutil
-    >>> asntab = asnutil.readASNTable('j8bt06010_shifts_asn.fits', prodonly=False)
-
-    The `asntab` object can now be passed to other code to provide relationships
-    between input and output images defined by the association table.
-
-    """
-
-    try:
-        f = fits.open(fu.osfn(fname))
-    except:
-        raise IOError("Can't open file %s\n" % fname)
-
-    colnames = f[1].data.names
-    try:
-        colunits = f[1].data.units
-    except AttributeError: pass
-
-    hdr = f[0].header
-
-    if 'MEMNAME' not in colnames or 'MEMTYPE' not in colnames:
-        msg = 'Association table incomplete: required column(s) MEMNAME/MEMTYPE NOT found!'
-        raise ValueError(msg)
-
-    d = {}
-    for n in colnames:
-        d[n]=f[1].data.field(n)
-    f.close()
-
-    valid_input = d['MEMPRSNT'].copy()
-    memtype = d['MEMTYPE'].copy()
-    prod_dth = (memtype.find('PROD-DTH')==0).nonzero()[0]
-    prod_rpt = (memtype.find('PROD-RPT')==0).nonzero()[0]
-    prod_crj = (memtype.find('PROD-CRJ')==0).nonzero()[0]
-
-    # set output name
-    if output == None:
-        if prod_dth:
-            output = d['MEMNAME'][prod_dth[0]]
-        elif prod_rpt:
-            output = d['MEMNAME'][prod_rpt[0]]
-        elif prod_crj:
-            output = d['MEMNAME'][prod_crj[0]]
-        else:
-            output = fname.split('_')[0]
-
-    if prodonly:
-        input = d['MEMTYPE'].find('PROD')==0
-        if prod_dth:
-            input[prod_dth] = False
-    else:
-        input = (d['MEMTYPE'].find('EXP')==0)
-    valid_input *= input
-
-    for k in d:
-        d[k] = d[k][valid_input]
-
-    infiles = list(d['MEMNAME'].lower())
-    if not infiles:
-        print("No valid input specified")
-        return None
-
-    if ('XOFFSET' in colnames and d['XOFFSET'].any()) or ('YOFFSET' in colnames and d['YOFFSET'].any()):
-        abshift = True
-        dshift = False
-        try:
-            units=colunits[colnames.index('XOFFSET')]
-        except: units='pixels'
-        xshifts = list(d['XOFFSET'])
-        yshifts = list(d['YOFFSET'])
-    elif ('XDELTA' in colnames and d['XDELTA'].any()) or  ('YDELTA' in colnames and d['YDELTA'].any()):
-        abshift = False
-        dshift = True
-        try:
-            units=colunits[colnames.index('XDELTA')]
-        except: units='pixels'
-        xshifts = list(d['XDELTA'])
-        yshifts = list(d['YDELTA'])
-    else:
-        abshift = False
-        dshift = False
-    members = {}
-
-    if not abshift and not dshift:
-        asndict = ASNTable(infiles,output=output)
-        asndict.create()
-        return asndict
-    else:
-        try:
-            refimage = hdr['refimage']
-        except KeyError: refimage = None
-        try:
-            frame = hdr['shframe']
-        except KeyError: frame = 'input'
-        if 'ROTATION' in colnames:
-            rots = list(d['ROTATION'])
-        if 'SCALE' in colnames:
-            scales = list(d['SCALE'])
-
-        for r in range(len(infiles)):
-            row = r
-            xshift = xshifts[r]
-            yshift = yshifts[r]
-            if rots: rot = rots[r]
-            if scales: scale = scales[r]
-            members[infiles[r]] = ASNMember(row=row, dshift=dshift, abshift=abshift, rot=rot, xshift=xshift,
-                                      yshift=yshift, scale=scale, refimage=refimage, shift_frame=frame,
-                                      shift_units=units)
-
-
-        asndict= ASNTable(infiles, output=output)
-        asndict.create()
-        asndict['members'].update(members)
-        return asndict
-
-
-class ASNTable(dict):
-    """
-    A dictionary like object which represents an association table.
-    An ASNTable object looks like this::
-
-        {'members':
-                {'j8bt06nyq': {'abshift': False,
-                           'dshift': True,
-                           'refimage': 'j8bt06010_shifts_asn.fits[wcs]',
-                           'rot': 0.0,
-                           'row': 0,
-                           'scale': 1.0,
-                           'shift_frame': 'input',
-                           'shift_units': 'pixels',
-                           'xoff': 0.0,
-                           'xshift': 0.0,
-                           'yoff': 0.0,
-                           'yshift': 0.0},
-                'j8bt06nzq': {'abshift': False,
-                           'dshift': True,
-                           'refimage': 'j8bt06010_shifts_asn.fits[wcs]',
-                           'rot': 359.99829,
-                           'row': 1,
-                           'scale': 1.000165,
-                           'shift_frame': 'input',
-                           'shift_units': 'pixels',
-                           'xoff': 0.0,
-                           'xshift': 0.4091132,
-                           'yoff': 0.0,
-                           'yshift': -0.56702018}},
-                'order': ['j8bt06nyq', 'j8bt06nzq'],
-                'output': 'j8bt06nyq'}
-
-    Examples
-    --------
-    Creating an ASNTable object from 3 filenames and a shift file would be done using::
-
-    >>> asnt=ASNTable([fname1,fname2,  fname3], shiftfile='shifts.txt')
-
-    The ASNTable object would have the 'members' and 'order'
-    in the association table populated based on `infiles` and `shiftfile`.
-
-    This creates a blank association table from the ASNTable object::
-
-    >>> asnt.create()
-
-    """
-    def __init__(self, inlist=None, output=None, shiftfile=None):
-        """
-        Parameters
-        ----------
-        inlist : list
-            A list of filenames.
-        output :  str
-            A user specified output name or 'final'.
-        shiftfile : str
-            A name of a shift file, if given, the association table will be
-            updated with the values in the shift file.
-
-        """
-
-        if output == None:
-            if len(inlist) == 1:
-                self.output = fu.buildNewRootname(inlist[0])
-            else:
-                self.output = 'final'
-        else:
-            self.output = fu.buildNewRootname(output)
-            # Ensure that output name does not already contain '_drz'
-            _indx = self.output.find('_drz')
-            if _indx > 0:
-                self.output = self.output[:_indx]
-
-        self.order = []
-        if inlist != None:
-            for fn in inlist:
-                if fu.findFile(fu.buildRootname(fn)):
-                    self.order.append(fu.buildNewRootname(fn))
-                else:
-                    # This may mean corrupted asn table in which a file is listed as present
-                    # when it is missing.
-                    raise IOError('File %s not found.\n' %fn)
-        dict.__init__(self, output=self.output, order=[], members={})
-        if inlist != None:
-            self.input = [fu.buildRootname(f) for f in inlist]
-        self.shiftfile = shiftfile
-
-    def create(self, shiftfile=None):
-        members = {}
-        row = 0
-        dshift = False
-        abshift = False
-
-        # Parse out shift file, if provided
-        if shiftfile != None:
-            sdict = ShiftFile(shiftfile)
-        elif self.shiftfile != None:
-            sdict = ShiftFile(self.shiftfile)
-
-            shift_frame = sdict['frame']
-            shift_units = sdict['units']
-            refimage = sdict['refimage']
-            if sdict['form']=='delta':
-                dshift = True
-            else:
-                abshift = True
-
-            for f in self.input:
-                xshift = sdict[f][0]
-                yshift = sdict[f][1]
-                rot = sdict[f][2]
-                scale = sdict[f][3]
-                #This may not be the right thing to do, may want to keep _flt in rootname
-                # to distinguish between _c0h.fits, _c0f.fits and '.c0h'
-                fname = fu.buildNewRootname(f)
-                members[fname] = ASNMember(row=row, dshift=dshift, abshift=abshift, rot=rot, xshift=xshift,
-                                  yshift=yshift, scale=scale, refimage=refimage, shift_frame=shift_frame,
-                                  shift_units=shift_units)
-                row+=1
-        else:
-            for f in self.input:
-                # also here
-
-                fname = fu.buildNewRootname(f)
-                members[fname] = ASNMember(row=row)
-                row+=1
-
-        self['members'].update(members)
-        self['order']=self.order
-
-
-    def update(self, members=None, shiftfile=None, replace=False):
-        __help_update="""
-        Update an existing association table.
-
-        Parameters
-        ----------
-        members : dict
-            A dictionary representing asndict['members'].
-        shiftfile : str
-            The name of a shift file
-            If given, shiftfile will replace shifts in an asndict.
-        replace : bool False(default)
-            A flag which indicates whether the 'members' item
-            of an association table should be updated or replaced.
-            default: False
-            If True, it's up to the user to replace also asndict['order']
-        """
-        if members and isinstance(members, dict):
-            if not replace:
-                self['members'].update(members=members)
-            else:
-                self['members'] = members
-        elif shiftfile:
-            members = {}
-            abshift = False
-            dshift = False
-            row = 0
-            sdict = ShiftFile(shiftfile)
-            shift_frame = sdict['frame']
-            shift_units = sdict['units']
-            refimage = sdict['refimage']
-            if sdict['form']=='delta':
-                dshift = True
-            else:
-                abshift = True
-
-            for f in self.order:
-                fullname = fu.buildRootname(f)
-                xshift = sdict[fullname][0]
-                yshift = sdict[fullname][1]
-                rot = sdict[fullname][2]
-                scale = sdict[fullname][3]
-                members[f] = ASNMember(row=row, dshift=dshift, abshift=abshift, rot=rot, xshift=xshift,
-                                  yshift=yshift, scale=scale, refimage=refimage, shift_frame=shift_frame,
-                                  shift_units=shift_units)
-                row+=1
-            self['members'].update(members)
-        else:
-            #print __help_update
-            pass
-
-    def write(self, output=None):
-        """
-        Write association table to a file.
-
-        """
-        if not output:
-            outfile = self['output']+'_asn.fits'
-            output = self['output']
-        else:
-            outfile = output
-
-        # Delete the file if it exists.
-        if os.path.exists(outfile):
-            warningmsg =  "\n#########################################\n"
-            warningmsg += "#                                       #\n"
-            warningmsg += "# WARNING:                              #\n"
-            warningmsg += "#  The existing association table,      #\n"
-            warningmsg += "           " + str(outfile) + '\n'
-            warningmsg += "#  is being replaced.                   #\n"
-            warningmsg += "#                                       #\n"
-            warningmsg += "#########################################\n\n"
-        fasn = fits.HDUList()
-
-        # Compute maximum length of MEMNAME for table column definition
-        _maxlen = 0
-        for _fname in self['order']:
-            if len(_fname) > _maxlen: _maxlen = len(_fname)
-        # Enforce a mimimum size of 24
-        if _maxlen < 24: _maxlen = 24
-        namelen_str = str(_maxlen+2)+'A'
-        self.buildPrimary(fasn, output=output)
-
-        mname = self['order'][:]
-        mname.append(output)
-        mtype = ['EXP-DTH' for l in self['order']]
-        mtype.append('PROD-DTH')
-        mprsn = [True for l in self['order']]
-        mprsn.append(False)
-        xoff = [self['members'][l]['xoff'] for l in self['order']]
-        xoff.append(0.0)
-        yoff = [self['members'][l]['yoff'] for l in self['order']]
-        yoff.append(0.0)
-        xsh = [self['members'][l]['xshift'] for l in self['order']]
-        xsh.append(0.0)
-        ysh = [self['members'][l]['yshift'] for l in self['order']]
-        ysh.append(0.0)
-        rot = [self['members'][l]['rot'] for l in self['order']]
-        rot.append(0.0)
-        scl = [self['members'][l]['scale'] for l in self['order']]
-        scl.append(1.0)
-
-        memname = fits.Column(name='MEMNAME',format=namelen_str,array=N.char.array(mname))
-        memtype = fits.Column(name='MEMTYPE',format='14A',array=N.char.array(mtype))
-        memprsn = fits.Column(name='MEMPRSNT', format='L', array=N.array(mprsn).astype(N.uint8))
-        xoffset = fits.Column(name='XOFFSET', format='E', array=N.array(xoff))
-        yoffset = fits.Column(name='YOFFSET', format='E', array=N.array(yoff))
-        xdelta = fits.Column(name='XDELTA', format='E', array=N.array(xsh))
-        ydelta = fits.Column(name='YDELTA', format='E', array=N.array(ysh))
-        rotation = fits.Column(name='ROTATION', format='E', array=N.array(rot))
-        scale = fits.Column(name='SCALE', format='E', array=N.array(scl))
-        cols = fits.ColDefs([memname,memtype,memprsn,xoffset,yoffset,xdelta,ydelta,rotation,scale])
-        hdu = fits.BinTableHDU.from_columns(cols)
-        fasn.append(hdu)
-        fasn.writeto(outfile, clobber=True)
-        fasn.close()
-        mem0 = self['order'][0]
-        refimg = self['members'][mem0]['refimage']
-        if refimg != None:
-            whdu = wcsutil.WCSObject(refimg)
-            whdu.createReferenceWCS(outfile,overwrite=False)
-            ftab = fits.open(outfile)
-            ftab['primary'].header['refimage'] = outfile+"[wcs]"
-            ftab.close()
-        del whdu
-
-
-
-    def buildPrimary(self, fasn, output=None):
-        _prihdr = fits.Header([fits.Card('SIMPLE', True, 'Fits standard'),
-                    fits.Card('BITPIX  ',                    16 ,' Bits per pixel'),
-                    fits.Card('NAXIS   ',                     0 ,' Number of axes'),
-                    fits.Card('ORIGIN  ',  'NOAO-IRAF FITS Image Kernel July 1999' ,'FITS file originator'),
-                    fits.Card('IRAF-TLM',  '18:26:13 (27/03/2000)' ,' Time of last modification'),
-                    fits.Card('EXTEND  ', True ,' File may contain standard extensions'),
-                    fits.Card('NEXTEND ',                     1 ,' Number of standard extensions'),
-                    fits.Card('DATE    ',  '2001-02-14T20:07:57',' date this file was written (yyyy-mm-dd)'),
-                    fits.Card('FILENAME',  'hr_box_asn.fits'            ,' name of file'),
-                    fits.Card('FILETYPE',  'ASN_TABLE'          ,' type of data found in data file'),
-                    fits.Card('TELESCOP',  'HST'                ,' telescope used to acquire data'),
-                    fits.Card('INSTRUME',  'ACS   '             ,' identifier for instrument used to acquire data'),
-                    fits.Card('EQUINOX ',                2000.0 ,' equinox of celestial coord. system'),
-                    fits.Card('ROOTNAME',  'hr_box  '              ,' rootname of the observation set'),
-                    fits.Card('PRIMESI ',  'ACS   '             ,' instrument designated as prime'),
-                    fits.Card('TARGNAME',  'SIM-DITHER'                     ,'proposer\'s target name'),
-                    fits.Card('RA_TARG ',                    0. ,' right ascension of the target (deg) (J2000)'),
-                    fits.Card('DEC_TARG',                    0. ,' declination of the target (deg) (J2000)'),
-                    fits.Card('DETECTOR',  'HRC     '           ,' detector in use: WFC, HRC, or SBC'),
-                    fits.Card('ASN_ID  ',  'hr_box  '           ,' unique identifier assigned to association'),
-                    fits.Card('ASN_TAB ',  'hr_box_asn.fits'         ,' name of the association table')])
-
-        # Format time values for keywords IRAF-TLM, and DATE
-        _ltime = time.localtime(time.time())
-        tlm_str = time.strftime('%H:%M:%S (%d/%m/%Y)',_ltime)
-        date_str = time.strftime('%Y-%m-%dT%H:%M:%S',_ltime)
-        origin_str = 'FITS Version '+ astropy.__version__
-        # Build PRIMARY HDU
-        _hdu = fits.PrimaryHDU(header=_prihdr)
-        fasn.append(_hdu)
-
-        newhdr = fasn['PRIMARY'].header
-        mem0name = self['order'][0]
-        refimg = self['members'][mem0name]['refimage']
-        shframe = self['members'][mem0name]['shift_frame']
-        fullname = fu.buildRootname(mem0name,ext=['_flt.fits', '_c0h.fits', '_c0f.fits'])
-        try:
-            # Open img1 to obtain keyword values for updating template
-            fimg1 = fits.open(fullname)
-        except:
-            print('File %s does not exist' % fullname)
-
-
-        kws = ['INSTRUME', 'PRIMESI', 'TARGNAME', 'DETECTOR', 'RA_TARG', 'DEC_TARG']
-        mem0hdr = fimg1['PRIMARY'].header
-        default = 'UNKNOWN'
-        for kw in kws:
-            try:
-                newhdr[kw] = mem0hdr[kw]
-            except:
-                newhdr[kw] = default
-        fimg1.close()
-
-        if not output:
-            output = self['output']
-
-        outfilename = fu.buildNewRootname(output, extn='_asn.fits')
-        newhdr['IRAF-TLM']=tlm_str
-        newhdr['DATE'] = date_str
-        newhdr['ORIGIN'] = origin_str
-        newhdr['ROOTNAME'] = output
-
-        newhdr['FILENAME'] = outfilename
-        newhdr['ASN_ID'] = output
-        newhdr['ASN_TAB'] = outfilename
-        newhdr['SHFRAME'] = (shframe, "Frame which shifts are measured")
-        newhdr['REFIMAGE'] = (refimg, "Image shifts were measured from")
-
-
-
-class ASNMember(dict):
-    """
-    A dictionary like object representing a member of an association table. It looks like this::
-
-        'j8bt06nzq': {'abshift': False,
-                  'dshift': True,
-                  'refimage': 'j8bt06010_shifts_asn.fits[wcs]',
-                  'rot': 359.99829,
-                  'row': 1,
-                  'scale': 1.000165,
-                  'shift_frame': 'input',
-                  'shift_units': 'pixels',
-                  'xoff': 0.0,
-                  'xshift': 0.4091132,
-                  'yoff': 0.0,
-                  'yshift': -0.56702018}
-
-    If `abshift` is True, shifts, rotation and scale refer to absolute shifts.
-    If `dshift`  is True, they are delta shifts.
-
-    """
-
-    def __init__(self, xoff=0.0, yoff=0.0, rot=0.0, xshift=0.0,
-                 yshift=0.0, scale=1.0, dshift=False, abshift=False, refimage="", shift_frame="",
-                 shift_units='pixels', row=0):
-
-        dict.__init__(self, xoff=xoff, yoff=yoff, xshift=xshift, yshift=yshift, rot=rot, scale=scale,
-                      dshift=dshift, abshift=abshift, refimage=refimage, shift_frame=shift_frame,
-                      shift_units=shift_units, row=row)
-
-class ShiftFile(dict):
-    """
-    A shift file has the following format (name, Xsh, Ysh, Rot, Scale)::
-
-        # frame: output
-        # refimage: tweak_wcs.fits[wcs]
-        # form: delta
-        # units: pixels
-        j8bt06nyq_flt.fits    0.0  0.0    0.0    1.0
-        j8bt06nzq_flt.fits    0.4091132  -0.5670202    359.9983    1.000165
-
-    This object creates a `dict` like object representing a shift file used by Pydrizzle and Mirashift.
-    """
-
-    def __init__(self,filename="", form='delta', frame=None, units='pixels',
-                 order=None, refimage=None, **kw):
-        """
-        :Purpose: Create a dict like ShiftFile object from a shift file on disk or from
-                  variables in memory. If a file name is provided all other parameters are ignored.
-
-        Examples
-        ---------
-        These examples demonstrate a couple of the most common usages.
-
-        Read a shift file on disk using::
-
-        >>> sdict = ShiftFile('shifts.txt')
-
-        Pass values for the fields of the shift file and a dictionary with all files::
-
-        >>> d={'j8bt06nyq_flt.fits': [0.0, 0.0, 0.0, 1.0],
-              'j8bt06nzq_flt.fits': [0.4091132, -0.5670202, 359.9983, 1.000165]}
-
-        >>> sdict = ShiftFile(form='absolute', frame='output', units='pixels', order=['j8bt06nyq_flt.fits',
-                             'j8bt06nzq_flt.fits'], refimage='tweak_wcs.fits[wcs]', **d)
-
-        The return value can then be used to provide the shift information to code in memory.
-
-        Parameters
-        ----------
-        filename : str
-            Name of shift file on disk, see above the expected format
-        form : str
-            Form of shifts (absolute|delta)
-        frame : str
-            Frame in which the shifts should be applied (input|output)
-        units : str
-            Units in which the shifts are measured.
-        order : list
-            Keeps track of the order of the files.
-        refimage : str
-                    name of reference image
-         **d :  dict
-                    keys: file names
-                    values: a list:  [Xsh, Ysh, Rot, Scale]
-                    The keys must match the files in the order parameter.
-
-        Raises
-        ------
-        ValueError
-            If reference file can't be found
-
-        """
-        ## History: This is refactored code which was initially in fileutil.py and
-        ## pydrizzle: buildasn.py and updateasn.py
-
-        dict.__init__(self, form=form, frame=frame, units=units,order=order, refimage=refimage)
-
-        if filename == "":
-            self.update(kw)
-        else:
-            self.readShiftFile(filename)
-
-        if not self.verifyShiftFile():
-            msg = "\nReference image not found.\n "
-            msg += "The keyword in the shift file has changed from 'reference' to 'refimage'.\n"
-            msg += "Make sure this keyword is specified as 'refimage' in %s." %filename
-
-            raise ValueError(msg)
-
-    def readShiftFile(self, filename):
-        """
-        Reads a shift file from disk and populates a dictionary.
-        """
-        order = []
-        fshift = open(filename,'r')
-        flines = fshift.readlines()
-        fshift.close()
-
-        common = [f.strip('#').strip() for f in flines if f.startswith('#')]
-        c=[line.split(': ') for line in common]
-
-        # Remove any line comments in the shift file - lines starting with '#'
-        # but not part of the common block.
-        for l in c:
-            if l[0] not in ['frame', 'refimage', 'form', 'units']:
-                c.remove(l)
-
-        for line in c: line[1]=line[1].strip()
-        self.update(c)
-
-        files = [f.strip().split(' ',1) for f in flines if not (f.startswith('#') or f.strip() == '')]
-        for f in files:
-            order.append(f[0])
-
-        self['order'] = order
-
-        for f in files:
-            # Check to see if filename provided is a full filename that corresponds
-            # to a file on the path.  If not, try to convert given rootname into
-            # a valid filename based on available files.  This may or may not
-            # define the correct filename, which is why it prints out what it is
-            # doing, so that the user can verify and edit the shiftfile if needed.
-            #NOTE:
-            # Supporting the specification of only rootnames in the shiftfile with this
-            # filename expansion is NOT to be documented, but provided solely as
-            # an undocumented, dangerous and not fully supported helper function for
-            # some backwards compatibility.
-            if not os.path.exists(f[0]):
-                f[0] = fu.buildRootname(f[0])
-                print('Defining filename in shiftfile as: ', f[0])
-
-            f[1] = f[1].split()
-            try:
-                f[1] = [float(s) for s in f[1]]
-            except:
-                msg = 'Cannot read in ', s, ' from shiftfile ', filename, ' as a float number'
-                raise ValueError(msg)
-            msg = "At least 2 and at most 4 shift values should be provided in a shiftfile"
-            if len(f[1]) < 2:
-                raise ValueError(msg)
-            elif len(f[1]) == 3:
-                f[1].append(1.0)
-            elif len(f[1]) == 2:
-                f[1].extend([0.0, 1.0])
-            elif len(f[1]) > 4:
-                raise ValueError(msg)
-
-        fdict = dict(files)
-        self.update(fdict)
-
-
-    def verifyShiftFile(self):
-        """
-        Verifies that reference file exists.
-        """
-        if self['refimage'] and fu.findFile(self['refimage']):
-            return True
-        else: return False
-
-    def writeShiftFile(self, filename="shifts.txt"):
-        """
-        Writes a shift file object to a file on disk using the convention for shift file format.
-        """
-        lines = ['# frame: ', self['frame'], '\n',
-                 '# refimage: ', self['refimage'], '\n',
-                 '# form: ', self['form'], '\n',
-                 '# units: ', self['units'], '\n']
-
-        for o in self['order']:
-            ss = " "
-            for shift in self[o]:
-                ss += str(shift) + " "
-            line = str(o) + ss + "\n"
-            lines.append(line)
-
-        fshifts= open(filename, 'w')
-        fshifts.writelines(lines)
-        fshifts.close()
-
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/basicpar.py b/required_pkgs/stsci.tools/lib/stsci/tools/basicpar.py
deleted file mode 100644
index 1c0a257..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/basicpar.py
+++ /dev/null
@@ -1,1630 +0,0 @@
-"""basicpar.py -- General base class for parameter objects.  Broken out
-                  from PyRAF's IrafPar class.
-
-$Id: basicpar.py 38909 2015-04-08 17:41:07Z bsimon $
-"""
-from __future__ import absolute_import, division, print_function # confidence high
-
-import re, sys
-from . import irafutils, minmatch
-from .irafglobals import INDEF, Verbose, yes, no
-
-if sys.version_info[0] > 2:
-    int_types = (int, )
-else:
-    int_types = (int, long)
-    
-# container class used for __deepcopy__ method
-class _EmptyClass: pass
-
-# -----------------------------------------------------
-# Warning (non-fatal) error.  Raise an exception if in
-# strict mode, or print a message if Verbose is on.
-# -----------------------------------------------------
-
-# Verbose (set irafglobals.py) determines
-# whether warning messages are printed when errors are found.  The
-# strict parameter to various methods and functions can be set to
-# raise an exception on errors; otherwise we do our best to work
-# around errors, only raising an exception for really serious,
-# unrecoverable problems.
-
-def warning(msg, strict=0, exception=SyntaxError, level=0):
-    if strict:
-        raise exception(msg)
-    elif Verbose>level:
-        sys.stdout.flush()
-        sys.stderr.write('Warning: %s' % msg)
-        if msg[-1:] != '\n': sys.stderr.write('\n')
-
-# -----------------------------------------------------
-# basic parameter factory
-# -----------------------------------------------------
-
-_string_types = [ 's', 'f', 'struct', 'z' ]
-_real_types = [ 'r', 'd' ]
-
-def parFactory(fields, strict=0):
-
-    """parameter factory function
-
-    fields is a list of the comma-separated fields (as in the .par file).
-    Each entry is a string or None (indicating that field was omitted.)
-
-    Set the strict parameter to a non-zero value to do stricter parsing
-    (to find errors in the input)"""
-
-    if len(fields) < 3 or None in fields[0:3]:
-        raise SyntaxError("At least 3 fields must be given")
-    type = fields[1]
-    if type in _string_types:
-        return IrafParS(fields,strict)
-    elif type == 'R':
-        return StrictParR(fields,1)
-    elif type in _real_types:
-        return IrafParR(fields,strict)
-    elif type == "I":
-        return StrictParI(fields,1)
-    elif type == "i":
-        return IrafParI(fields,strict)
-    elif type == "b":
-        return IrafParB(fields,strict)
-    elif type == "ar":
-        return IrafParAR(fields,strict)
-    elif type == "ai":
-        return IrafParAI(fields,strict)
-    elif type == "as":
-        return IrafParAS(fields,strict)
-    elif type == "ab":
-        return IrafParAB(fields,strict)
-    elif type[:1] == "a":
-        raise SyntaxError("Cannot handle arrays of type %s" % type)
-    else:
-        raise SyntaxError("Cannot handle parameter type %s" % type)
-
-
-# --------------------------------------------------------
-# Publish the (simple) algorithm for combining scope+name
-# --------------------------------------------------------
-
-def makeFullName(parScope, parName):
-    """ Create the fully-qualified name (inclues scope if used) """
-    # Skip scope (and leading dot) if no scope, even in cases where scope
-    # IS used for other pars in the same task.
-    if parScope:
-        return parScope+'.'+parName
-    else:
-        return parName
-
-# -----------------------------------------------------
-# Set up minmatch dictionaries for parameter fields
-# -----------------------------------------------------
-
-flist = ("p_name", "p_xtype", "p_type", "p_mode", "p_prompt", "p_scope",
-         "p_value", "p_default", "p_filename", "p_maximum", "p_minimum")
-_getFieldDict = minmatch.MinMatchDict()
-for field in flist: _getFieldDict.add(field, field)
-
-flist = ("p_prompt", "p_value", "p_filename", "p_maximum", "p_minimum", "p_mode", "p_scope")
-_setFieldDict = minmatch.MinMatchDict()
-for field in flist: _setFieldDict.add(field, field)
-del flist, field
-
-# utility function to check whether string is a parameter field
-
-def isParField(s):
-    """Returns true if string s appears to be a parameter field"""
-    try:
-        return (s[:2] == "p_") and s in _getFieldDict
-    except minmatch.AmbiguousKeyError:
-        # If ambiguous match, assume it is a parameter field.
-        # An exception will doubtless be raised later, but
-        # there's really no good choice here.
-        return 1
-
-# basic IrafPar attributes
-# IrafPar's are protected in setattr against adding arbitrary attributes,
-# and this dictionary is used as a helper in instance initialization
-_IrafPar_attr_dict = {
-        "name" : None,
-        "type" : None,
-        "mode" : None,
-        "value" : None,
-        "min" : None,
-        "max" : None,
-        "choice" : None,
-        "choiceDict" : None,
-        "prompt" : None,
-        "flags" : 0,
-        "scope" : None,
-        }
-
-# flag bits tell whether value has been changed and
-# whether it was set on the command line.
-_changedFlag = 1
-_cmdlineFlag = 2
-
-# -----------------------------------------------------
-# IRAF parameter base class
-# -----------------------------------------------------
-
-class IrafPar:
-
-    """Non-array IRAF parameter base class"""
-
-    def __init__(self,fields,strict=0):
-        orig_len = len(fields)
-        if orig_len < 3 or None in fields[0:3]:
-            raise SyntaxError("At least 3 fields must be given")
-        #
-        # all the attributes that are going to get defined
-        #
-        self.__dict__.update(_IrafPar_attr_dict)
-        self.name   = fields[0]
-        self.type   = fields[1]
-        self.mode   = fields[2]
-        self.scope  = None # simple default; may be unused
-        #
-        # put fields into appropriate attributes
-        #
-        while len(fields) < 7: fields.append(None)
-        #
-        self.value = self._coerceValue(fields[3],strict)
-        if fields[4] is not None and '|' in fields[4]:
-            self._setChoice(fields[4].strip(),strict)
-            if fields[5] is not None:
-                if orig_len < 7:
-                    warning("Max value illegal when choice list given" +
-                                    " for parameter " + self.name +
-                                    " (probably missing comma)",
-                                    strict)
-                    # try to recover by assuming max string is prompt
-                    fields[6] = fields[5]
-                    fields[5] = None
-                else:
-                    warning("Max value illegal when choice list given" +
-                            " for parameter " + self.name, strict)
-        else:
-            #XXX should catch ValueError exceptions here and set to null
-            #XXX could also check for missing comma (null prompt, prompt
-            #XXX in max field)
-            if fields[4] is not None:
-                self.min = self._coerceValue(fields[4],strict)
-            if fields[5] is not None:
-                self.max = self._coerceValue(fields[5],strict)
-        if self.min not in [None, INDEF] and \
-           self.max not in [None, INDEF] and self.max < self.min:
-            warning("Max " + str(self.max) + " is less than minimum " + \
-                    str(self.min) + " for parameter " + self.name,
-                    strict)
-            self.min, self.max = self.max, self.min
-        if fields[6] is not None:
-            self.prompt = irafutils.removeEscapes(
-                                            irafutils.stripQuotes(fields[6]))
-        else:
-            self.prompt = ''
-        #
-        # check attributes to make sure they are appropriate for
-        # this parameter type (e.g. some do not allow choice list
-        # or min/max)
-        #
-        self._checkAttribs(strict)
-        #
-        # check parameter value to see if it is correct
-        #
-        try:
-            self.checkValue(self.value,strict)
-        except ValueError as e:
-            warning("Illegal initial value for parameter\n" + str(e),
-                    strict, exception=ValueError)
-            # Set illegal values to None, just like IRAF
-            self.value = None
-
-    #--------------------------------------------
-    # public accessor methods
-    #--------------------------------------------
-
-    def isLegal(self):
-        """Returns true if current parameter value is legal"""
-        try:
-            # apply a stricter definition of legal here
-            # fixable values have already been fixed
-            # don't accept None values
-            self.checkValue(self.value)
-            return self.value is not None
-        except ValueError:
-            return 0
-
-    def setScope(self,value=''):
-        """Set scope value.  Written this way to not change the
-           standard set of fields in the comma-separated list. """
-        # set through dictionary to avoid extra calls to __setattr__
-        self.__dict__['scope'] = value
-
-    def setCmdline(self,value=1):
-        """Set cmdline flag"""
-        # set through dictionary to avoid extra calls to __setattr__
-        if value:
-            self.__dict__['flags'] = self.flags | _cmdlineFlag
-        else:
-            self.__dict__['flags'] = self.flags & ~_cmdlineFlag
-
-    def isCmdline(self):
-        """Return cmdline flag"""
-        return (self.flags & _cmdlineFlag) == _cmdlineFlag
-
-    def setChanged(self,value=1):
-        """Set changed flag"""
-        # set through dictionary to avoid another call to __setattr__
-        if value:
-            self.__dict__['flags'] = self.flags | _changedFlag
-        else:
-            self.__dict__['flags'] = self.flags & ~_changedFlag
-
-    def isChanged(self):
-        """Return changed flag"""
-        return (self.flags & _changedFlag) == _changedFlag
-
-    def setFlags(self,value):
-        """Set all flags"""
-        self.__dict__['flags'] = value
-
-    def isLearned(self, mode=None):
-        """Return true if this parameter is learned
-
-        Hidden parameters are not learned; automatic parameters inherit
-        behavior from package/cl; other parameters are learned.
-        If mode is set, it determines how automatic parameters behave.
-        If not set, cl.mode parameter determines behavior.
-        """
-        if "l" in self.mode: return 1
-        if "h" in self.mode: return 0
-        if "a" in self.mode:
-            if mode is None: mode = 'ql' # that is, iraf.cl.mode
-            if "h" in mode and "l" not in mode:
-                return 0
-        return 1
-
-    #--------------------------------------------
-    # other public methods
-    #--------------------------------------------
-
-    def getPrompt(self):
-        """Alias for getWithPrompt() for backward compatibility"""
-        return self.getWithPrompt()
-
-    def getWithPrompt(self):
-        """Interactively prompt for parameter value"""
-        if self.prompt:
-            pstring = self.prompt.split("\n")[0].strip()
-        else:
-            pstring = self.name
-        if self.choice:
-            schoice = list(map(self.toString, self.choice))
-            pstring = pstring + " (" + "|".join(schoice) + ")"
-        elif self.min not in [None, INDEF] or \
-                 self.max not in [None, INDEF]:
-            pstring = pstring + " ("
-            if self.min not in [None, INDEF]:
-                pstring = pstring + self.toString(self.min)
-            pstring = pstring + ":"
-            if self.max not in [None, INDEF]:
-                pstring = pstring + self.toString(self.max)
-            pstring = pstring + ")"
-        # add current value as default
-        if self.value is not None:
-            pstring = pstring + " (" + self.toString(self.value,quoted=1) + ")"
-        pstring = pstring + ": "
-        # don't redirect stdin/out unless redirected filehandles are also ttys
-        # or unless originals are NOT ttys
-        stdout = sys.__stdout__
-        try:
-            if sys.stdout.isatty() or not stdout.isatty():
-                stdout = sys.stdout
-        except AttributeError:
-            pass
-        stdin = sys.__stdin__
-        try:
-            if sys.stdin.isatty() or not stdin.isatty():
-                stdin = sys.stdin
-        except AttributeError:
-            pass
-        # print prompt, suppressing both newline and following space
-        stdout.write(pstring)
-        stdout.flush()
-        ovalue = irafutils.tkreadline(stdin)
-        value = ovalue.strip()
-        # loop until we get an acceptable value
-        while (1):
-            try:
-                # null input usually means use current value as default
-                # check it anyway since it might not be acceptable
-                if value == "": value = self._nullPrompt()
-                self.set(value)
-                # None (no value) is not acceptable value after prompt
-                if self.value is not None: return
-                # if not EOF, keep looping
-                if ovalue == "":
-                    stdout.flush()
-                    raise EOFError("EOF on parameter prompt")
-                print("Error: specify a value for the parameter")
-            except ValueError as e:
-                print(str(e))
-            stdout.write(pstring)
-            stdout.flush()
-            ovalue = irafutils.tkreadline(stdin)
-            value = ovalue.strip()
-
-    def get(self, field=None, index=None, lpar=0, prompt=1, native=0, mode="h"):
-        """Return value of this parameter as a string (or in native format
-        if native is non-zero.)"""
-
-        if field and field != "p_value":
-            # note p_value comes back to this routine, so shortcut that case
-            return self._getField(field,native=native,prompt=prompt)
-
-        # may prompt for value if prompt flag is set
-        if prompt: self._optionalPrompt(mode)
-
-        if index is not None:
-            raise SyntaxError("Parameter "+self.name+" is not an array")
-
-        if native:
-            rv = self.value
-        else:
-            rv = self.toString(self.value)
-        return rv
-
-    def set(self, value, field=None, index=None, check=1):
-        """Set value of this parameter from a string or other value.
-        Field is optional parameter field (p_prompt, p_minimum, etc.)
-        Index is optional array index (zero-based).  Set check=0 to
-        assign the value without checking to see if it is within
-        the min-max range or in the choice list."""
-
-        if index is not None:
-            raise SyntaxError("Parameter "+self.name+" is not an array")
-
-        if field:
-            self._setField(value,field,check=check)
-        else:
-            if check:
-                self.value = self.checkValue(value)
-            else:
-                self.value = self._coerceValue(value)
-            self.setChanged()
-
-    def checkValue(self,value,strict=0):
-        """Check and convert a parameter value.
-
-        Raises an exception if the value is not permitted for this
-        parameter.  Otherwise returns the value (converted to the
-        right type.)
-        """
-        v = self._coerceValue(value,strict)
-        return self.checkOneValue(v,strict)
-
-    def checkOneValue(self,v,strict=0):
-        """Checks a single value to see if it is in range or choice list
-
-        Allows indirection strings starting with ")".  Assumes
-        v has already been converted to right value by
-        _coerceOneValue.  Returns value if OK, or raises
-        ValueError if not OK.
-        """
-        if v in [None, INDEF] or (isinstance(v,str) and v[:1] == ")"):
-            return v
-        elif v == "":
-            # most parameters treat null string as omitted value
-            return None
-        elif self.choice is not None and v not in self.choiceDict:
-            schoice = list(map(self.toString, self.choice))
-            schoice = "|".join(schoice)
-            raise ValueError("Parameter %s: "
-                    "value %s is not in choice list (%s)" %
-                    (self.name, str(v), schoice))
-        elif (self.min not in [None, INDEF] and v<self.min):
-            raise ValueError("Parameter %s: "
-                    "value `%s' is less than minimum `%s'" %
-                    (self.name, str(v), str(self.min)))
-        elif (self.max not in [None, INDEF] and v>self.max):
-            raise ValueError("Parameter %s: "
-                    "value `%s' is greater than maximum `%s'" %
-                    (self.name, str(v), str(self.max)))
-        return v
-
-    def dpar(self, cl=1):
-        """Return dpar-style executable assignment for parameter
-
-        Default is to write CL version of code; if cl parameter is
-        false, writes Python executable code instead.
-        """
-        sval = self.toString(self.value, quoted=1)
-        if not cl:
-            if sval == "": sval = "None"
-        s = "%s = %s" % (self.name, sval)
-        return s
-
-    def fullName(self):
-        """ Return the fully-qualified name (inclues scope if used) """
-        return makeFullName(self.scope, self.name) # scope can be None or ''
-
-    def pretty(self,verbose=0):
-        """Return pretty list description of parameter"""
-        # split prompt lines and add blanks in later lines to align them
-        plines = self.prompt.split('\n')
-        for i in range(len(plines)-1): plines[i+1] = 32*' ' + plines[i+1]
-        plines = '\n'.join(plines)
-        namelen = min(len(self.name), 12)
-        pvalue = self.get(prompt=0,lpar=1)
-        alwaysquoted = ['s', 'f', '*gcur', '*imcur', '*ukey', 'pset']
-        if self.type in alwaysquoted and self.value is not None: pvalue = '"' + pvalue + '"'
-        if self.mode == "h":
-            s = "%13s = %-15s %s" % ("("+self.name[:namelen],
-                                    pvalue+")", plines)
-        else:
-            s = "%13s = %-15s %s" % (self.name[:namelen],
-                                    pvalue, plines)
-        if not verbose: return s
-
-        if self.choice is not None:
-            s = s + "\n" + 32*" " + "|"
-            nline = 33
-            for i in range(len(self.choice)):
-                sch = str(self.choice[i]) + "|"
-                s = s + sch
-                nline = nline + len(sch) + 1
-                if nline > 80:
-                    s = s + "\n" + 32*" " + "|"
-                    nline = 33
-        elif self.min not in [None, INDEF] or self.max not in [None, INDEF]:
-            s = s + "\n" + 32*" "
-            if self.min not in [None, INDEF]:
-                s = s + str(self.min) + " <= "
-            s = s + self.name
-            if self.max not in [None, INDEF]:
-                s = s + " <= " + str(self.max)
-        return s
-
-    def save(self, dolist=0):
-        """Return .par format string for this parameter
-
-        If dolist is set, returns fields as a list of strings.  Default
-        is to return a single string appropriate for writing to a file.
-        """
-        quoted = not dolist
-        fields = 7*[""]
-        fields[0] = self.name
-        fields[1] = self.type
-        fields[2] = self.mode
-        fields[3] = self.toString(self.value,quoted=quoted)
-        if self.choice is not None:
-            schoice = list(map(self.toString, self.choice))
-            schoice.insert(0,'')
-            schoice.append('')
-            fields[4] = repr('|'.join(schoice))
-        elif self.min not in [None,INDEF]:
-            fields[4] = self.toString(self.min,quoted=quoted)
-        if self.max not in [None,INDEF]:
-            fields[5] = self.toString(self.max,quoted=quoted)
-        if self.prompt:
-            if quoted:
-                sprompt = repr(self.prompt)
-            else:
-                sprompt = self.prompt
-            # prompt can have embedded newlines (which are printed)
-            sprompt = sprompt.replace(r'\012', '\n')
-            sprompt = sprompt.replace(r'\n', '\n')
-            fields[6] = sprompt
-        # delete trailing null parameters
-        for i in [6,5,4]:
-            if fields[i] != "": break
-            del fields[i]
-        if dolist:
-            return fields
-        else:
-            return ','.join(fields)
-
-    #--------------------------------------------
-    # special methods to give desired object syntax
-    #--------------------------------------------
-
-    # allow parameter object to be used in arithmetic expression
-
-    def __coerce__(self, other):
-        return coerce(self.get(native=1), other)
-
-    # fields are accessible as attributes
-
-    def __getattr__(self,field):
-        if field[:1] == '_':
-            raise AttributeError(field)
-        try:
-            return self._getField(field, native=1)
-        except SyntaxError as e:
-            if field in _IrafPar_attr_dict:
-                # handle odd-ball case of new code accessing par's new
-                # attr (e.g. scope), with old-code-cached version of par
-                return _IrafPar_attr_dict[field] # return unused default
-            else:
-                raise AttributeError(str(e))
-
-    def __setattr__(self,attr,value):
-        # don't allow any new parameters to be added
-        if attr in self.__dict__:
-            self.__dict__[attr] = value
-        elif isParField(attr):
-            #XXX should check=0 be used here?
-            self._setField(value, attr)
-        else:
-            raise AttributeError("No attribute %s for parameter %s" %
-                    (attr, self.name))
-
-    def __deepcopy__(self, memo):
-        """Deep copy of this parameter object"""
-        new = _EmptyClass()
-        # shallow copy of dictionary suffices for most attributes
-        new.__dict__ = self.__dict__.copy()
-        # value, choice may be lists of atomic items
-        if isinstance(self.value, list):
-            new.value = list(self.value)
-        if isinstance(self.choice, list):
-            new.choice = list(self.choice)
-        # choiceDict is OK with shallow copy because it will
-        # always be reset if choices change
-        new.__class__ = self.__class__
-        return new
-
-    def __getstate__(self):
-        """Return state info for pickle"""
-        # choiceDict gets reconstructed
-        if self.choice is None:
-            return self.__dict__
-        else:
-            d = self.__dict__.copy()
-            d['choiceDict'] = None
-            return d
-
-    def __setstate__(self, state):
-        """Restore state info from pickle"""
-        self.__dict__ = state
-        if self.choice is not None:
-            self._setChoiceDict()
-
-    def __str__(self):
-        """Return readable description of parameter"""
-        s = "<" + self.__class__.__name__ + " " + self.name + " " + self.type
-        s = s + " " + self.mode + " " + repr(self.value)
-        if self.choice is not None:
-            schoice = list(map(self.toString, self.choice))
-            s = s + " |" + "|".join(schoice) + "|"
-        else:
-            s = s + " " + repr(self.min) + " " + repr(self.max)
-        s = s + ' "' + self.prompt + '">'
-        return s
-
-    #--------------------------------------------
-    # private methods -- may be used by subclasses, but should
-    # not be needed outside this module
-    #--------------------------------------------
-
-    def _checkAttribs(self,strict=0):
-        # by default no restrictions on attributes
-        pass
-
-    def _setChoice(self,s,strict=0):
-        """Set choice parameter from string s"""
-        clist = _getChoice(s,strict)
-        self.choice = list(map(self._coerceValue, clist))
-        self._setChoiceDict()
-
-    def _setChoiceDict(self):
-        """Create dictionary for choice list"""
-        # value is name of choice parameter (same as key)
-        self.choiceDict = {}
-        for c in self.choice: self.choiceDict[c] = c
-
-    def _nullPrompt(self):
-        """Returns value to use when answer to prompt is null string"""
-        # most parameters just keep current default (even if None)
-        return self.value
-
-    def _optionalPrompt(self, mode):
-        """Interactively prompt for parameter if necessary
-
-        Prompt for value if
-        (1) mode is hidden but value is undefined or bad, or
-        (2) mode is query and value was not set on command line
-        Never prompt for "u" mode parameters, which are local variables.
-        """
-        if (self.mode == "h") or (self.mode == "a" and mode == "h"):
-            # hidden parameter
-            if not self.isLegal():
-                self.getWithPrompt()
-        elif self.mode == "u":
-            # "u" is a special mode used for local variables in CL scripts
-            # They should never prompt under any circumstances
-            if not self.isLegal():
-                raise ValueError(
-                                "Attempt to access undefined local variable `%s'" %
-                                self.name)
-        else:
-            # query parameter
-            if self.isCmdline()==0:
-                self.getWithPrompt()
-
-    def _getPFilename(self,native,prompt):
-        """Get p_filename field for this parameter
-
-        Same as get for non-list params
-        """
-        return self.get(native=native,prompt=prompt)
-
-    def _getPType(self):
-        """Get underlying datatype for this parameter
-
-        Just self.type for normal params
-        """
-        return self.type
-
-    def _getField(self, field, native=0, prompt=1):
-        """Get a parameter field value"""
-        try:
-            # expand field name using minimum match
-            field = _getFieldDict[field]
-        except KeyError as e:
-            # re-raise the exception with a bit more info
-            raise SyntaxError("Cannot get field " + field +
-                    " for parameter " + self.name + "\n" + str(e))
-        if field == "p_value":
-            # return value of parameter
-            # Note that IRAF returns the filename for list parameters
-            # when p_value is used.  I consider this a bug, and it does
-            # not appear to be used by any cl scripts or SPP programs
-            # in either IRAF or STSDAS.  It is also in conflict with
-            # the IRAF help documentation.  I am making p_value exactly
-            # the same as just a simple CL parameter reference.
-            return self.get(native=native,prompt=prompt)
-        elif field == "p_name": return self.name
-        elif field == "p_xtype": return self.type
-        elif field == "p_type": return self._getPType()
-        elif field == "p_mode": return self.mode
-        elif field == "p_prompt": return self.prompt
-        elif field == "p_scope": return self.scope
-        elif field == "p_default" or field == "p_filename":
-            # these all appear to be equivalent -- they just return the
-            # current PFilename of the parameter (which is the same as the value
-            # for non-list parameters, and is the filename for list parameters)
-            return self._getPFilename(native,prompt)
-        elif field == "p_maximum":
-            if native:
-                return self.max
-            else:
-                return self.toString(self.max)
-        elif field == "p_minimum":
-            if self.choice is not None:
-                if native:
-                    return self.choice
-                else:
-                    schoice = list(map(self.toString, self.choice))
-                    return "|" + "|".join(schoice) + "|"
-            else:
-                if native:
-                    return self.min
-                else:
-                    return self.toString(self.min)
-        else:
-            # XXX unimplemented fields:
-            # p_length: maximum string length in bytes -- what to do with it?
-            raise RuntimeError("Program bug in IrafPar._getField()\n" +
-                    "Requested field " + field + " for parameter " + self.name)
-
-    def _setField(self, value, field, check=1):
-        """Set a parameter field value"""
-        try:
-            # expand field name using minimum match
-            field = _setFieldDict[field]
-        except KeyError as e:
-            raise SyntaxError("Cannot set field " + field +
-                    " for parameter " + self.name + "\n" + str(e))
-        if field == "p_prompt":
-            self.prompt = irafutils.removeEscapes(irafutils.stripQuotes(value))
-        elif field == "p_value":
-            self.set(value,check=check)
-        elif field == "p_filename":
-            # this is only relevant for list parameters (*imcur, *gcur, etc.)
-            self.set(value,check=check)
-        elif field == "p_scope":
-            self.scope = value
-        elif field == "p_maximum":
-            self.max = self._coerceOneValue(value)
-        elif field == "p_minimum":
-            if isinstance(value,str) and '|' in value:
-                self._setChoice(irafutils.stripQuotes(value))
-            else:
-                self.min = self._coerceOneValue(value)
-        elif field == "p_mode":
-            # not doing any type or value checking here -- setting mode is
-            # rare, so assume that it is being done correctly
-            self.mode = irafutils.stripQuotes(value)
-        else:
-            raise RuntimeError("Program bug in IrafPar._setField()" +
-                    "Requested field " + field + " for parameter " + self.name)
-
-    def _coerceValue(self,value,strict=0):
-        """Coerce parameter to appropriate type
-
-        Should accept None or null string.
-        """
-        return self._coerceOneValue(value,strict)
-
-    def _coerceOneValue(self,value,strict=0):
-        """Coerce a scalar parameter to the appropriate type
-
-        Default implementation simply prevents direct use of base class.
-        Should accept None or null string.
-        """
-        raise NotImplementedError("class IrafPar cannot be used directly")
-
-
-# -----------------------------------------------------
-# IRAF array parameter base class
-# -----------------------------------------------------
-
-class IrafArrayPar(IrafPar):
-
-    """IRAF array parameter class"""
-
-    def __init__(self,fields,strict=0):
-        orig_len = len(fields)
-        if orig_len < 3:
-            raise SyntaxError("At least 3 fields must be given")
-        #
-        # all the attributes that are going to get defined
-        #
-        self.__dict__.update(_IrafPar_attr_dict)
-        self.name   = fields[0]
-        self.type   = fields[1]
-        self.mode   = fields[2]
-        self.__dict__['shape'] = None
-        #
-        # for array parameters, dimensions follow mode field
-        # and values come from fields after prompt
-        #
-        if len(fields)<4 or fields[3] is None:
-            raise ValueError("Missing dimension field for array parameter")
-        ndim = int(fields[3])
-        if len(fields) < 4+2*ndim:
-            raise ValueError("Missing array shape fields for array parameter")
-        shape = []
-        array_size = 1
-        for i in range(ndim):
-            shape.append(int(fields[4+2*i]))
-            array_size = array_size*shape[-1]
-        self.shape = tuple(shape)
-        nvstart = 7+2*ndim
-        fields.extend([""]*(nvstart-len(fields)))
-        fields.extend([None]*(nvstart+array_size-len(fields)))
-        if len(fields) > nvstart+array_size:
-            raise SyntaxError("Too many values for array" +
-                    " for parameter " + self.name)
-        #
-        self.value = [None]*array_size
-        self.value = self._coerceValue(fields[nvstart:],strict)
-        if fields[nvstart-3] is not None and '|' in fields[nvstart-3]:
-            self._setChoice(fields[nvstart-3].strip(),strict)
-            if fields[nvstart-2] is not None:
-                if orig_len < nvstart:
-                    warning("Max value illegal when choice list given" +
-                                    " for parameter " + self.name +
-                                    " (probably missing comma)",
-                                    strict)
-                    # try to recover by assuming max string is prompt
-                    #XXX risky -- all init values might be off by one
-                    fields[nvstart-1] = fields[nvstart-2]
-                    fields[nvstart-2] = None
-                else:
-                    warning("Max value illegal when choice list given" +
-                            " for parameter " + self.name, strict)
-        else:
-            self.min = self._coerceOneValue(fields[nvstart-3],strict)
-            self.max = self._coerceOneValue(fields[nvstart-2],strict)
-        if fields[nvstart-1] is not None:
-            self.prompt = irafutils.removeEscapes(
-                                            irafutils.stripQuotes(fields[nvstart-1]))
-        else:
-            self.prompt = ''
-        if self.min not in [None, INDEF] and \
-           self.max not in [None, INDEF] and self.max < self.min:
-            warning("Maximum " + str(self.max) + " is less than minimum " + \
-                    str(self.min) + " for parameter " + self.name,
-                    strict)
-            self.min, self.max = self.max, self.min
-        #
-        # check attributes to make sure they are appropriate for
-        # this parameter type (e.g. some do not allow choice list
-        # or min/max)
-        #
-        self._checkAttribs(strict)
-        #
-        # check parameter value to see if it is correct
-        #
-        try:
-            self.checkValue(self.value,strict)
-        except ValueError as e:
-            warning("Illegal initial value for parameter\n" + str(e),
-                    strict, exception=ValueError)
-            # Set illegal values to None, just like IRAF
-            self.value = None
-
-    #--------------------------------------------
-    # public methods
-    #--------------------------------------------
-
-    def save(self, dolist=0):
-        """Return .par format string for this parameter
-
-        If dolist is set, returns fields as a list of strings.  Default
-        is to return a single string appropriate for writing to a file.
-        """
-        quoted = not dolist
-        array_size = 1
-        for d in self.shape:
-            array_size = d*array_size
-        ndim = len(self.shape)
-        fields = (7+2*ndim+len(self.value))*[""]
-        fields[0] = self.name
-        fields[1] = self.type
-        fields[2] = self.mode
-        fields[3] = str(ndim)
-        next = 4
-        for d in self.shape:
-            fields[next] = str(d); next += 1
-            fields[next] = '1';    next += 1
-        nvstart = 7+2*ndim
-        if self.choice is not None:
-            schoice = list(map(self.toString, self.choice))
-            schoice.insert(0,'')
-            schoice.append('')
-            fields[nvstart-3] = repr('|'.join(schoice))
-        elif self.min not in [None,INDEF]:
-            fields[nvstart-3] = self.toString(self.min,quoted=quoted)
-        # insert an escaped line break before min field
-        if quoted:
-            fields[nvstart-3] = '\\\n' + fields[nvstart-3]
-        if self.max not in [None,INDEF]:
-            fields[nvstart-2] = self.toString(self.max,quoted=quoted)
-        if self.prompt:
-            if quoted:
-                sprompt = repr(self.prompt)
-            else:
-                sprompt = self.prompt
-            # prompt can have embedded newlines (which are printed)
-            sprompt = sprompt.replace(r'\012', '\n')
-            sprompt = sprompt.replace(r'\n', '\n')
-            fields[nvstart-1] = sprompt
-        for i in range(len(self.value)):
-            fields[nvstart+i] = self.toString(self.value[i],quoted=quoted)
-        # insert an escaped line break before value fields
-        if dolist:
-            return fields
-        else:
-            fields[nvstart] = '\\\n' + fields[nvstart]
-            return ','.join(fields)
-
-    def dpar(self, cl=1):
-        """Return dpar-style executable assignment for parameter
-
-        Default is to write CL version of code; if cl parameter is
-        false, writes Python executable code instead.  Note that
-        dpar doesn't even work for arrays in the CL, so we just use
-        Python syntax here.
-        """
-        sval = list(map(self.toString, self.value, len(self.value)*[1]))
-        for i in range(len(sval)):
-            if sval[i] == "":
-                sval[i] = "None"
-        s = "%s = [%s]" % (self.name, ', '.join(sval))
-        return s
-
-    def get(self, field=None, index=None, lpar=0, prompt=1, native=0, mode="h"):
-        """Return value of this parameter as a string (or in native format
-        if native is non-zero.)"""
-
-        if field: return self._getField(field,native=native,prompt=prompt)
-
-        # may prompt for value if prompt flag is set
-        #XXX should change _optionalPrompt so we prompt for each element of
-        #XXX the array separately?  I think array parameters are
-        #XXX not useful as non-hidden params.
-
-        if prompt: self._optionalPrompt(mode)
-
-        if index is not None:
-            sumindex = self._sumindex(index)
-            try:
-                if native:
-                    return self.value[sumindex]
-                else:
-                    return self.toString(self.value[sumindex])
-            except IndexError:
-                # should never happen
-                raise SyntaxError("Illegal index [" + repr(sumindex) +
-                        "] for array parameter " + self.name)
-        elif native:
-            # return object itself for an array because it is
-            # indexable, can have values assigned, etc.
-            return self
-        else:
-            # return blank-separated string of values for array
-            return str(self)
-
-    def set(self, value, field=None, index=None, check=1):
-        """Set value of this parameter from a string or other value.
-        Field is optional parameter field (p_prompt, p_minimum, etc.)
-        Index is optional array index (zero-based).  Set check=0 to
-        assign the value without checking to see if it is within
-        the min-max range or in the choice list."""
-        if index is not None:
-            sumindex = self._sumindex(index)
-            try:
-                value = self._coerceOneValue(value)
-                if check:
-                    self.value[sumindex] = self.checkOneValue(value)
-                else:
-                    self.value[sumindex] = value
-                return
-            except IndexError:
-                # should never happen
-                raise SyntaxError("Illegal index [" + repr(sumindex) +
-                        "] for array parameter " + self.name)
-        if field:
-            self._setField(value,field,check=check)
-        else:
-            if check:
-                self.value = self.checkValue(value)
-            else:
-                self.value = self._coerceValue(value)
-            self.setChanged()
-
-    def checkValue(self,value,strict=0):
-        """Check and convert a parameter value.
-
-        Raises an exception if the value is not permitted for this
-        parameter.  Otherwise returns the value (converted to the
-        right type.)
-        """
-        v = self._coerceValue(value,strict)
-        for i in range(len(v)):
-            self.checkOneValue(v[i],strict=strict)
-        return v
-
-    #--------------------------------------------
-    # special methods
-    #--------------------------------------------
-
-    # array parameters can be subscripted
-    # note subscripts start at zero, unlike CL subscripts
-    # that start at one
-
-    def __getitem__(self, index):
-        return self.get(index=index,native=1)
-
-    def __setitem__(self, index, value):
-        self.set(value, index=index)
-
-    def __str__(self):
-        """Return readable description of parameter"""
-        # This differs from non-arrays in that it returns a
-        # print string with just the values.  That's because
-        # the object itself is returned as the native value.
-        sv = list(map(str, self.value))
-        for i in range(len(sv)):
-            if self.value[i] is None:
-                sv[i] = "INDEF"
-        return ' '.join(sv)
-
-    def __len__(self):
-        return len(self.value)
-
-    #--------------------------------------------
-    # private methods
-    #--------------------------------------------
-
-    def _sumindex(self, index=None):
-        """Convert tuple index to 1-D index into value"""
-        try:
-            ndim = len(index)
-        except TypeError:
-            # turn index into a 1-tuple
-            index = (index,)
-            ndim = 1
-        if len(self.shape) != ndim:
-            raise ValueError("Index to %d-dimensional array %s has too %s dimensions" %
-                (len(self.shape), self.name, ["many","few"][len(self.shape) > ndim]))
-        sumindex = 0
-        for i in range(ndim-1,-1,-1):
-            index1 = index[i]
-            if index1 < 0 or index1 >= self.shape[i]:
-                raise ValueError("Dimension %d index for array %s is out of bounds (value=%d)" %
-                    (i+1, self.name, index1))
-            sumindex = index1 + sumindex*self.shape[i]
-        return sumindex
-
-    def _getPType(self):
-        """Get underlying datatype for this parameter (strip off 'a' array params)"""
-        return self.type[1:]
-
-    def _coerceValue(self,value,strict=0):
-        """Coerce parameter to appropriate type
-
-        Should accept None or null string.  Must be an array.
-        """
-        try:
-            if isinstance(value,str):
-                # allow single blank-separated string as input
-                value = value.split()
-            if len(value) != len(self.value):
-                raise IndexError
-            v = len(self.value)*[0]
-            for i in range(len(v)):
-                v[i] = self._coerceOneValue(value[i],strict)
-            return v
-        except (IndexError, TypeError):
-            raise ValueError("Value must be a " + repr(len(self.value)) +
-                    "-element array for " + self.name)
-
-    def isLegal(self):
-        """Dont call checkValue for arrays"""
-        try:
-            return self.value is not None
-        except ValueError:
-            return 0
-
-
-# -----------------------------------------------------
-# IRAF string parameter mixin class
-# -----------------------------------------------------
-
-class _StringMixin:
-
-    """IRAF string parameter mixin class"""
-
-    #--------------------------------------------
-    # public methods
-    #--------------------------------------------
-
-    def toString(self, value, quoted=0):
-        """Convert a single (non-array) value of the appropriate type for
-        this parameter to a string"""
-        if value is None:
-            return ""
-        elif quoted:
-            return repr(value)
-        else:
-            return value
-
-    # slightly modified checkOneValue allows minimum match for
-    # choice strings and permits null string as value
-    def checkOneValue(self,v,strict=0):
-        if v is None or v[:1] == ")":
-            return v
-        elif self.choice is not None:
-            try:
-                v = self.choiceDict[v]
-            except minmatch.AmbiguousKeyError:
-                clist = self.choiceDict.getall(v)
-                raise ValueError("Parameter %s: "
-                        "ambiguous value `%s', could be %s" %
-                        (self.name, str(v), "|".join(clist)))
-            except KeyError:
-                raise ValueError("Parameter %s: "
-                        "value `%s' is not in choice list (%s)" %
-                        (self.name, str(v), "|".join(self.choice)))
-        elif (self.min is not None and v<self.min):
-            raise ValueError("Parameter %s: "
-                    "value `%s' is less than minimum `%s'" %
-                    (self.name, str(v), str(self.min)))
-        elif (self.max is not None and v>self.max):
-            raise ValueError("Parameter %s: "
-                    "value `%s' is greater than maximum `%s'" %
-                    (self.name, str(v), str(self.max)))
-        return v
-
-    #--------------------------------------------
-    # private methods
-    #--------------------------------------------
-
-    def _checkAttribs(self, strict):
-        """Check initial attributes to make sure they are legal"""
-        if self.min:
-            warning("Minimum value not allowed for string-type parameter " +
-                    self.name, strict)
-        self.min = None
-        if self.max:
-            if not self.prompt:
-                warning("Maximum value not allowed for string-type parameter " +
-                                self.name + " (probably missing comma)",
-                                strict)
-                # try to recover by assuming max string is prompt
-                self.prompt = self.max
-            else:
-                warning("Maximum value not allowed for string-type parameter " +
-                        self.name, strict)
-        self.max = None
-        # If not in strict mode, allow file (f) to act just like string (s).
-        # Otherwise choice is also forbidden for file type
-        if strict and self.type == "f" and self.choice:
-            warning("Illegal choice value for type '" +
-                    self.type + "' for parameter " + self.name,
-                    strict)
-            self.choice = None
-
-    def _setChoiceDict(self):
-        """Create min-match dictionary for choice list"""
-        # value is full name of choice parameter
-        self.choiceDict = minmatch.MinMatchDict()
-        for c in self.choice: self.choiceDict.add(c, c)
-
-    def _nullPrompt(self):
-        """Returns value to use when answer to prompt is null string"""
-        # for string, null string is a legal value
-        # keep current default unless it is None
-        if self.value is None:
-            return ""
-        else:
-            return self.value
-
-    def _coerceOneValue(self,value,strict=0):
-        if value is None:
-            return value
-        elif isinstance(value,str):
-            # strip double quotes and remove escapes before quotes
-            return irafutils.removeEscapes(irafutils.stripQuotes(value))
-        else:
-            return str(value)
-
-# -----------------------------------------------------
-# IRAF string parameter class
-# -----------------------------------------------------
-
-class IrafParS(_StringMixin, IrafPar):
-
-    """IRAF string parameter class"""
-    pass
-
-# -----------------------------------------------------
-# IRAF string array parameter class
-# -----------------------------------------------------
-
-class IrafParAS(_StringMixin,IrafArrayPar):
-
-    """IRAF string array parameter class"""
-    pass
-
-# -----------------------------------------------------
-# IRAF boolean parameter mixin class
-# -----------------------------------------------------
-
-class _BooleanMixin:
-
-    """IRAF boolean parameter mixin class"""
-
-    #--------------------------------------------
-    # public methods
-    #--------------------------------------------
-
-    def toString(self, value, quoted=0):
-        if value in [None, INDEF]:
-            return ""
-        elif isinstance(value,str):
-            # presumably an indirection value ')task.name'
-            if quoted:
-                return repr(value)
-            else:
-                return value
-        else:
-            # must be internal yes, no value
-            return str(value)
-
-    #--------------------------------------------
-    # private methods
-    #--------------------------------------------
-
-    def _checkAttribs(self, strict):
-        """Check initial attributes to make sure they are legal"""
-        if self.min:
-            warning("Minimum value not allowed for boolean-type parameter " +
-                    self.name, strict)
-            self.min = None
-        if self.max:
-            if not self.prompt:
-                warning("Maximum value not allowed for boolean-type parameter " +
-                                self.name + " (probably missing comma)",
-                                strict)
-                # try to recover by assuming max string is prompt
-                self.prompt = self.max
-            else:
-                warning("Maximum value not allowed for boolean-type parameter " +
-                        self.name, strict)
-            self.max = None
-        if self.choice:
-            warning("Choice values not allowed for boolean-type parameter " +
-                    self.name, strict)
-            self.choice = None
-
-    # accepts special yes, no objects, integer values 0,1 or
-    # string 'yes','no' and variants
-    # internal value is yes, no, None/INDEF, or indirection string
-    def _coerceOneValue(self,value,strict=0):
-        if value == INDEF:
-            return INDEF
-        elif value is None or value == "":
-            return None
-        elif value in (1, 1.0, yes, "yes", "YES", "y", "Y", True):
-            return yes
-        elif value in (0, 0.0, no,  "no",  "NO",  "n", "N", False):
-            return no
-        elif isinstance(value,str):
-            v2 = irafutils.stripQuotes(value.strip())
-            if v2 == "" or v2 == "INDEF" or \
-                    ((not strict) and (v2.upper() == "INDEF")):
-                return INDEF
-            elif v2[0:1] == ")":
-                # assume this is indirection -- just save it as a string
-                return v2
-        raise ValueError("Parameter %s: illegal boolean value %s or type %s" %
-                (self.name, repr(value), str(type(value))))
-
-# -----------------------------------------------------
-# IRAF boolean parameter class
-# -----------------------------------------------------
-
-class IrafParB(_BooleanMixin,IrafPar):
-
-    """IRAF boolean parameter class"""
-    pass
-
-# -----------------------------------------------------
-# IRAF boolean array parameter class
-# -----------------------------------------------------
-
-class IrafParAB(_BooleanMixin,IrafArrayPar):
-
-    """IRAF boolean array parameter class"""
-    pass
-
-# -----------------------------------------------------
-# IRAF integer parameter mixin class
-# -----------------------------------------------------
-
-class _IntMixin:
-
-    """IRAF integer parameter mixin class"""
-
-    #--------------------------------------------
-    # public methods
-    #--------------------------------------------
-
-    def toString(self, value, quoted=0):
-        if value is None:
-            return ""
-        else:
-            return str(value)
-
-    #--------------------------------------------
-    # private methods
-    #--------------------------------------------
-
-    # coerce value to integer
-    def _coerceOneValue(self,value,strict=0):
-        if value == INDEF:
-            return INDEF
-        elif value is None or isinstance(value,int):
-            return value
-        elif value in ("", "None", "NONE"):
-            return None
-        elif isinstance(value,float):
-            # try converting to integer
-            try:
-                return int(value)
-            except (ValueError, OverflowError):
-                pass
-        elif isinstance(value,str):
-            s2 = irafutils.stripQuotes(value.strip())
-            if s2 == "INDEF" or \
-              ((not strict) and (s2.upper() == "INDEF")):
-                return INDEF
-            elif s2[0:1] == ")":
-                # assume this is indirection -- just save it as a string
-                return s2
-            elif s2[-1:] == "x":
-                # hexadecimal
-                return int(s2[:-1],16)
-            elif "." in s2:
-                # try interpreting as a float and converting to integer
-                try:
-                    return int(float(s2))
-                except (ValueError, OverflowError):
-                    pass
-            else:
-                try:
-                    return int(s2)
-                except ValueError:
-                    pass
-        else:
-            # maybe it has an int method
-            try:
-                return int(value)
-            except ValueError:
-                pass
-        raise ValueError("Parameter %s: illegal integer value %s" %
-                (self.name, repr(value)))
-
-# -----------------------------------------------------
-# IRAF integer parameter class
-# -----------------------------------------------------
-
-class IrafParI(_IntMixin,IrafPar):
-
-    """IRAF integer parameter class"""
-    pass
-
-# -----------------------------------------------------
-# IRAF integer array parameter class
-# -----------------------------------------------------
-
-class IrafParAI(_IntMixin,IrafArrayPar):
-
-    """IRAF integer array parameter class"""
-    pass
-
-# -----------------------------------------------------
-# Strict integer parameter mixin class
-# -----------------------------------------------------
-
-class _StrictIntMixin(_IntMixin):
-
-    """Strict integer parameter mixin class"""
-
-    #--------------------------------------------
-    # public methods
-    #--------------------------------------------
-
-    def toString(self, value, quoted=0):
-        return str(value)
-
-    #--------------------------------------------
-    # private methods
-    #--------------------------------------------
-
-    # coerce value to integer
-    def _coerceOneValue(self,value,strict=0):
-        if value is None or isinstance(value,int):
-            return value
-        elif isinstance(value,str):
-            s2 = irafutils.stripQuotes(value.strip())
-            if s2[-1:] == "x":
-                # hexadecimal
-                return int(s2[:-1],16)
-            elif s2 == '':
-                raise ValueError('Parameter '+self.name+ \
-                      ': illegal empty integer value')
-            else:
-                # see if it is a stringified int
-                try:
-                    return int(s2)
-                except ValueError:
-                    pass
-        # otherwise it is not a strict integer
-        raise ValueError("Parameter %s: illegal integer value %s" %
-                (self.name, repr(value)))
-
-# -----------------------------------------------------
-# Strict integer parameter class
-# -----------------------------------------------------
-
-class StrictParI(_StrictIntMixin,IrafPar):
-
-    """Strict integer parameter class"""
-    pass
-
-
-# -----------------------------------------------------
-# IRAF real parameter mixin class
-# -----------------------------------------------------
-
-_re_d = re.compile(r'[Dd]')
-_re_colon = re.compile(r':')
-
-class _RealMixin:
-
-    """IRAF real parameter mixin class"""
-
-    #--------------------------------------------
-    # public methods
-    #--------------------------------------------
-
-    def toString(self, value, quoted=0):
-        if value is None:
-            return ""
-        else:
-            return str(value)
-
-    #--------------------------------------------
-    # private methods
-    #--------------------------------------------
-
-    def _checkAttribs(self, strict):
-        """Check initial attributes to make sure they are legal"""
-        if self.choice:
-            warning("Choice values not allowed for real-type parameter " +
-                    self.name, strict)
-            self.choice = None
-
-    # coerce value to real
-    def _coerceOneValue(self,value,strict=0):
-        if value == INDEF:
-            return INDEF
-        elif value is None or isinstance(value,float):
-            return value
-        elif value in ("", "None", "NONE"):
-            return None
-        elif isinstance(value, int_types):
-            return float(value)
-        elif isinstance(value,str):
-            s2 = irafutils.stripQuotes(value.strip())
-            if s2 == "INDEF" or \
-              ((not strict) and (s2.upper() == "INDEF")):
-                return INDEF
-            elif s2[0:1] == ")":
-                # assume this is indirection -- just save it as a string
-                return s2
-            # allow +dd:mm:ss.s sexagesimal format for floats
-            fvalue = 0.0
-            vscale = 1.0
-            vsign = 1
-            i1 = 0
-            mm = _re_colon.search(s2)
-            if mm is not None:
-                if s2[0:1] == "-":
-                    i1 = 1
-                    vsign = -1
-                elif s2[0:1] == "+":
-                    i1 = 1
-                while mm is not None:
-                    i2 = mm.start()
-                    fvalue = fvalue + int(s2[i1:i2])/vscale
-                    i1 = i2+1
-                    vscale = vscale*60.0
-                    mm = _re_colon.search(s2,i1)
-            # special handling for d exponential notation
-            mm = _re_d.search(s2,i1)
-            try:
-                if mm is None:
-                    return vsign*(fvalue + float(s2[i1:])/vscale)
-                else:
-                    return vsign*(fvalue + \
-                            float(s2[i1:mm.start()]+"E"+s2[mm.end():])/vscale)
-            except ValueError:
-                pass
-        else:
-            # maybe it has a float method
-            try:
-                return float(value)
-            except ValueError:
-                pass
-        raise ValueError("Parameter %s: illegal float value %s" %
-                (self.name, repr(value)))
-
-
-# -----------------------------------------------------
-# IRAF real parameter class
-# -----------------------------------------------------
-
-class IrafParR(_RealMixin,IrafPar):
-
-    """IRAF real parameter class"""
-    pass
-
-# -----------------------------------------------------
-# IRAF real array parameter class
-# -----------------------------------------------------
-
-class IrafParAR(_RealMixin,IrafArrayPar):
-
-    """IRAF real array parameter class"""
-    pass
-
-# -----------------------------------------------------
-# Strict real parameter mixin class
-# -----------------------------------------------------
-
-class _StrictRealMixin(_RealMixin):
-
-    """Strict real parameter mixin class"""
-
-    #--------------------------------------------
-    # public methods
-    #--------------------------------------------
-
-    def toString(self, value, quoted=0):
-        return str(value)
-
-    #--------------------------------------------
-    # private methods
-    #--------------------------------------------
-
-    # coerce value to real
-    def _coerceOneValue(self,value,strict=0):
-        if value is None or isinstance(value,float):
-            return value
-        elif isinstance(value, int_types):
-            return float(value)
-        elif isinstance(value,str):
-            s2 = irafutils.stripQuotes(value.strip())
-            if s2 == '':
-                raise ValueError('Parameter '+self.name+ \
-                      ': illegal empty float value')
-            # allow +dd:mm:ss.s sexagesimal format for floats
-            fvalue = 0.0
-            vscale = 1.0
-            vsign = 1
-            i1 = 0
-            mm = _re_colon.search(s2)
-            if mm is not None:
-                if s2[0:1] == "-":
-                    i1 = 1
-                    vsign = -1
-                elif s2[0:1] == "+":
-                    i1 = 1
-                while mm is not None:
-                    i2 = mm.start()
-                    fvalue = fvalue + int(s2[i1:i2])/vscale
-                    i1 = i2+1
-                    vscale = vscale*60.0
-                    mm = _re_colon.search(s2,i1)
-            # special handling for d exponential notation
-            mm = _re_d.search(s2,i1)
-            try:
-                if mm is None:
-                    return vsign*(fvalue + float(s2[i1:])/vscale)
-                else:
-                    return vsign*(fvalue + \
-                            float(s2[i1:mm.start()]+"E"+s2[mm.end():])/vscale)
-            except ValueError:
-                pass
-            # see if it's a stringified float
-            try:
-                return float(s2)
-            except ValueError:
-                raise ValueError("Parameter %s: illegal float value %s" %
-                                 (self.name, repr(value)))
-        # Otherwise it is not a strict float
-        raise ValueError("Parameter %s: illegal float value %s" %
-                         (self.name, repr(value)))
-
-
-# -----------------------------------------------------
-# Strict real parameter class
-# -----------------------------------------------------
-
-class StrictParR(_StrictRealMixin,IrafPar):
-
-    """Strict real parameter class"""
-    pass
-
-
-# -----------------------------------------------------
-# Utility routine for parsing choice string
-# -----------------------------------------------------
-
-_re_choice = re.compile(r'\|')
-
-def _getChoice(s, strict):
-    clist = s.split("|")
-    # string is allowed to start and end with "|", so ignore initial
-    # and final empty strings
-    if not clist[0]: del clist[0]
-    if len(clist)>1 and not clist[-1]: del clist[-1]
-    return clist
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/bitmask.py b/required_pkgs/stsci.tools/lib/stsci/tools/bitmask.py
deleted file mode 100644
index 68c40a6..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/bitmask.py
+++ /dev/null
@@ -1,223 +0,0 @@
-"""
-A module that provides functions for manipulating bitmasks and data quality (DQ) arrays.
-
-:Authors: Mihai Cara (contact: help at stsci.edu)
-
-:License: `<http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE>`_
-
-"""
-
-import numpy as np
-
-__version__ = '0.1.0'
-__vdate__ = '29-March-2015'
-__author__ = 'Mihai Cara'
-
-
-def interpret_bits_value(val):
-    """
-    Converts input bits value from string to a single integer value or None.
-    If a comma- or '+'-separated set of values are provided, they are summed.
-
-    .. note::
-        In order to flip the bits of the final result (after summation),
-        for input of `str` type, prepend '~' to the input string. '~' must
-        be prepended to the *entire string* and not to each bit flag!
-
-    Parameters
-    ----------
-    val : int, str, None
-        An integer bit mask or flag, `None`, or a comma- or '+'-separated
-        string list of integer bit values. If `val` is a `str` and if
-        it is prepended with '~', then the output bit mask will have its
-        bits flipped (compared to simple sum of input val).
-
-    Returns
-    -------
-    bitmask : int or None
-        Returns and integer bit mask formed from the input bit value
-        or `None` if input `val` parameter is `None` or an empty string.
-        If input string value was prepended with '~', then returned
-        value will have its bits flipped (inverse mask).
-
-    Examples
-    --------
-        >>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28) )
-        '0000000000011100'
-        >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16') )
-        '0000000000011100'
-        >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16') )
-        '1111111111100011'
-        >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)') )
-        '1111111111100011'
-
-    """
-    if isinstance(val, int) or val is None:
-        return val
-
-    else:
-        val = str(val).strip()
-
-        if val.startswith('~'):
-            flip_bits = True
-            val = val[1:].lstrip()
-        else:
-            flip_bits = False
-
-        if val.startswith('('):
-            if val.endswith(')'):
-                val = val[1:-1].strip()
-            else:
-                raise ValueError('Unbalanced parantheses or incorrect syntax.')
-
-        if ',' in val:
-            valspl = val.split(',')
-            bitmask = 0
-            for v in valspl:
-                bitmask += int(v)
-
-        elif '+' in val:
-            valspl = val.split('+')
-            bitmask = 0
-            for v in valspl:
-                bitmask += int(v)
-
-        elif val.upper() in ['', 'NONE', 'INDEF']:
-            return None
-
-        else:
-            bitmask = int(val)
-
-        if flip_bits:
-            bitmask = ~bitmask
-
-    return bitmask
-
-
-def bitmask2mask(bitmask, ignore_bits, good_mask_value=1, dtype=np.uint8):
-    """
-    bitmask2mask(bitmask, ignore_bits, good_pix_value=1, dtype=numpy.uint8)
-    Interprets an array of bit flags and converts it to a "binary" mask array.
-    This function is particularly useful to convert data quality arrays to
-    binary masks.
-
-    Parameters
-    ----------
-    bitmask : numpy.ndarray
-        An array of bit flags. Values different from zero are interpreted as
-        "bad" values and values equal to zero are considered as "good" values.
-        However, see `ignore_bits` parameter on how to ignore some bits
-        in the `bitmask` array.
-
-    ignore_bits : int, str, None
-        An integer bit mask, `None`, or a comma- or '+'-separated
-        string list of integer bit values that indicate what bits in the
-        input `bitmask` should be *ignored* (i.e., zeroed). If `ignore_bits`
-        is a `str` and if it is prepended with '~', then the meaning
-        of `ignore_bits` parameters will be reversed: now it will be
-        interpreted as a list of bits to be *used* (or *not ignored*) when
-        deciding what elements of the input `bitmask` array are "bad".
-
-        The `ignore_bits` parameter is the integer sum of all of the bit
-        values from the input `bitmask` array that should be considered
-        "good" when creating the output binary mask. For example, if
-        values in the `bitmask` array can be combinations
-        of 1, 2, 4, and 8 flags and one wants to consider that
-        values having *only* bit flags 2 and/or 4 as being "good",
-        then `ignore_bits` should be set to 2+4=6. Then a `bitmask` element
-        having values 2,4, or 6 will be considered "good", while an
-        element with a value, e.g., 1+2=3, 4+8=12, etc. will be interpreted
-        as "bad".
-
-        Alternatively, one can enter a comma- or '+'-separated list
-        of integer bit flags that should be added to obtain the
-        final "good" bits. For example, both ``4,8`` and ``4+8``
-        are equivalent to setting `ignore_bits` to 12.
-
-        See :py:func:`interpret_bits_value` for examples.
-
-        | Setting `ignore_bits` to `None` effectively will interpret
-          all `bitmask` elements as "good" regardless of their value.
-
-        | Setting `ignore_bits` to 0 effectively will assume that all
-          non-zero elements in the input `bitmask` array are to be
-          interpreted as "bad".
-
-        | In order to reverse the meaning of the `ignore_bits`
-          parameter from indicating bits in the values of `bitmask`
-          elements that should be ignored when deciding which elements
-          are "good" (these are the elements that are zero after ignoring
-          `ignore_bits`), to indicating the bits should be used
-          exclusively in deciding whether a `bitmask` element is "good",
-          prepend '~' to the string value. For example, in order to use
-          **only** (or **exclusively**) flags 4 and 8 (2nd and 3rd bits)
-          in the values of the input `bitmask` array when deciding whether
-          or not that element is "good", set `ignore_bits` to ``~4+8``,
-          or ``~4,8 To obtain the same effect with an `int` input value
-          (except for 0), enter -(4+8+1)=-9. Following this convention,
-          a `ignore_bits` string value of ``'~0'`` would be equivalent to
-          setting ``ignore_bits=None``.
-
-    good_mask_value : int, bool (Default = 1)
-        This parameter is used to derive the values that will be assigned to
-        the elements in the output `mask` array that correspond to the "good"
-        flags (that are 0 after zeroing bits specified by `ignore_bits`)
-        in the input `bitmask` array. When `good_mask_value` is non-zero or
-        `True` then values in the output mask array corresponding to "good"
-        bit flags in `bitmask` will be 1 (or `True` if `dtype` is `bool`) and
-        values of corresponding to "bad" flags will be 0. When
-        `good_mask_value` is zero or `False` then values in the output mask
-        array corresponding to "good" bit flags in `bitmask` will be 0
-        (or `False` if `dtype` is `bool`) and values of corresponding
-        to "bad" flags will be 1.
-
-    dtype : data-type (Default = numpy.uint8)
-        The desired data-type for the output binary mask array.
-
-    Returns
-    -------
-    mask : numpy.ndarray
-        Returns an array whose elements can have two possible values,
-        e.g., 1 or 0 (or `True` or `False` if `dtype` is `bool`) according to
-        values of to the input `bitmask` elements, `ignore_bits` parameter,
-        and the `good_mask_value` parameter.
-
-    Examples
-    --------
-        >>> from stsci.tools import bitmask
-        >>> dqbits = np.asarray([[0,0,1,2,0,8,12,0],[10,4,0,0,0,16,6,0]])
-        >>> bitmask.bitmask2mask(dqbits, ignore_bits=0, dtype=int)
-        array([[1, 1, 0, 0, 1, 0, 0, 1],
-               [0, 0, 1, 1, 1, 0, 0, 1]])
-        >>> bitmask.bitmask2mask(dqbits, ignore_bits=0, dtype=bool)
-        array([[ True,  True, False, False,  True, False, False,  True],
-               [False, False,  True,  True,  True, False, False,  True]], dtype=bool)
-        >>> bitmask.bitmask2mask(dqbits, ignore_bits=6, good_pix_value=0, dtype=int)
-        array([[0, 0, 1, 0, 0, 1, 1, 0],
-               [1, 0, 0, 0, 0, 1, 0, 0]])
-        >>> bitmask.bitmask2mask(dqbits, ignore_bits=~6, good_pix_value=0, dtype=int)
-        array([[0, 0, 0, 1, 0, 0, 1, 0],
-               [1, 1, 0, 0, 0, 0, 1, 0]])
-        >>> bitmask.bitmask2mask(dqbits, ignore_bits='~(2+4)', good_pix_value=0, dtype=int)
-        array([[0, 0, 0, 1, 0, 0, 1, 0],
-               [1, 1, 0, 0, 0, 0, 1, 0]])
-
-    """
-
-    ignore_bits = interpret_bits_value(ignore_bits)
-
-    if good_mask_value:
-        mask = np.ones_like(bitmask, dtype=dtype)
-        if ignore_bits is None:
-            return mask
-        bad_mask_value = 0
-
-    else:
-        mask = np.zeros_like(bitmask, dtype=dtype)
-        if ignore_bits is None:
-            return mask
-        bad_mask_value = 1
-
-    mask[np.bitwise_and(bitmask, ~ignore_bits) > 0] = bad_mask_value
-
-    return mask
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/capable.py b/required_pkgs/stsci.tools/lib/stsci/tools/capable.py
deleted file mode 100644
index 258cc3e..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/capable.py
+++ /dev/null
@@ -1,167 +0,0 @@
-""" Learn basic capabilities here (e.g. can we display graphics?).
-This is meant to be fast and light, having no complicated dependencies, so
-that any module can fearlessly import this without adverse affects or
-performance concerns.
-
-$Id: capable.py 46455 2015-11-24 20:46:40Z sontag $
-"""
-
-from __future__ import division # confidence high
-
-import os, sys
-PY3K = sys.version_info[0] > 2
-
-def is_darwin_and_x():
-    """ Convenience function.  Returns True if is an X11-linked Python/Tkinter
-    build on OSX.  This is intended to be quick and easy without further
-    imports.  As a result, this relies on the assumption that on OSX, PyObjC
-    is installed (only) in the Framework builds of Python. """
-    if not sys.platform == 'darwin':
-        return False
-
-    return which_darwin_linkage() == "x11"
-
-
-def which_darwin_linkage(force_otool_check=False):
-    """ Convenience function.  Returns one of ('x11', 'aqua') in answer to the
-    question of whether this is an X11-linked Python/Tkinter, or a natively
-    built (framework, Aqua) one.  This is only for OSX.
-    This relies on the assumption that on OSX, PyObjC is installed
-    in the Framework builds of Python.  If it doesn't find PyObjC,
-    this inspects the actual tkinter library binary via otool.
-
-    One driving requirement here is to try to make the determination quickly
-    and quietly without actually importing/loading any GUI libraries.  We
-    even want to avoid importing Tkinter if we can.
-    """
-
-    # sanity check
-    assert sys.platform=='darwin', 'Incorrect usage, not on OSX'
-
-    # If not forced to run otool, then make some quick and dirty
-    # simple checks/assumptions, which do not add to startup time and do not
-    # attempt to initialize any graphics.
-    if not force_otool_check:
-
-        # There will (for now) only ever be an aqua-linked Python/Tkinter
-        # when using Ureka on darwin, so this is an easy short-circuit check.
-        if 'UR_DIR' in os.environ:
-            return "aqua"
-
-        # There will *usually* be PyObjC modules on sys.path on the natively-
-        # linked Python. This is assumed to be always correct on Python 2.x, as
-        # of 2012.  This is kludgy but quick and effective.
-        sp = ",".join(sys.path)
-        sp = sp.lower().strip(',')
-        if '/pyobjc' in sp or 'pyobjc,' in sp or 'pyobjc/' in sp or sp.endswith('pyobjc'):
-            return "aqua"
-
-        # Try one more thing - look for the physical PyObjC install dir under site-packages
-        # The assumption above using sys.path does not seem to be correct as of the
-        # combination of Python2.7.9/PyObjC3.0.4/2015.
-        sitepacksloc = os.path.split(os.__file__)[0]+'/site-packages/objc'
-        if os.path.exists(sitepacksloc):
-            return "aqua"
-
-        # OK, no trace of PyObjC found - need to fall through to the forced otool check.
-
-    # Use otool shell command
-    if PY3K:
-        import tkinter as Tkinter
-    else:
-        import Tkinter
-    import subprocess
-    try:
-        tk_dyn_lib = Tkinter._tkinter.__file__
-    except AttributeError: # happens on Ureka
-        if 'UR_DIR' in os.environ:
-            return 'aqua'
-        else:
-            return 'unknown'
-    libs = subprocess.check_output(('/usr/bin/otool', '-L', tk_dyn_lib)).decode('ascii')
-    if libs.find('/libX11.') >= 0:
-        return "x11"
-    else:
-        return "aqua"
-
-
-def get_dc_owner(raises, mask_if_self):
-    """ Convenience function to return owner of /dev/console.
-    If raises is True, this raises an exception on any error.
-    If not, it returns any error string as the owner name.
-    If owner is self, and if mask_if_self, returns "<self>"."""
-    try:
-        from pwd import getpwuid
-        owner_uid = os.stat('/dev/console').st_uid
-        self_uid  = os.getuid()
-        if mask_if_self and owner_uid == self_uid:
-            return "<self>"
-        owner_name = getpwuid(owner_uid).pw_name
-        return owner_name
-    except Exception as e:
-        if raises:
-            raise e
-        else:
-            return str(e)
-
-
-OF_GRAPHICS = True
-
-if 'PYRAF_NO_DISPLAY' in os.environ or 'PYTOOLS_NO_DISPLAY' in os.environ:
-    OF_GRAPHICS = False
-
-if OF_GRAPHICS and sys.platform == 'darwin':
-    #
-    # On OSX, there is an AppKit error where Python itself will abort if
-    # Tkinter operations (e.g. Tkinter._test() ...) are attempted when running
-    # from a remote terminal.  In these situations, it is not even safe to put
-    # the code in a try/except block, since the AppKit error seems to happen
-    # *asynchronously* within ObjectiveC code.  See PyRAF ticket #149.
-    #
-    # SO, let's try a quick simple test here (only on OSX) to find out if we
-    # are the "console user".  If we are not, then we don't even want to attempt
-    # any windows/graphics calls.  See "console user" here:
-    #     http://developer.apple.com/library/mac/#technotes/tn2083/_index.html
-    # If we are the console user, we own /dev/console and can read from it.
-    # When no one is logged in, /dev/console is owned by "root". When user "bob"
-    # is logged in locally/physically, /dev/console is owned by "bob".
-    # However, if "bob" restarts the X server while logged in, /dev/console
-    # may be owned by "sysadmin" - so we check for that.
-    #
-    if 'PYRAF_YES_DISPLAY' not in os.environ:
-        # the use of PYRAF_YES_DISPLAY is a temporary override while we
-        # debug why a user might have no read-acces to /dev/console
-        dc_owner = get_dc_owner(False, False)
-        OF_GRAPHICS = dc_owner == 'sysadmin' or os.access("/dev/console", os.R_OK)
-
-    # Add a double-check for remote X11 users.  We *think* this is a smaller
-    # set of cases, so we do it last minute here:
-    if not OF_GRAPHICS:
-        # On OSX, but logged in remotely. Normally (with native build) this
-        # means there are no graphics.  But, what if they're calling an
-        # X11-linked Python?  Then we should allow graphics to be attempted.
-        OF_GRAPHICS = is_darwin_and_x()
-
-        # OF_GRAPHICS will be True here in only two cases (2nd should be rare):
-        #    An OSX Python build linked with X11, or
-        #    An OSX Python build linked natively where PyObjC was left out
-
-# After all that, we may have decided that we want graphics.  Now
-# that we know it is ok to try to import Tkinter, we can test if it
-# is there.  If it is not, we are not capable of graphics.
-if OF_GRAPHICS :
-    try :
-        if PY3K:
-            import tkinter as Tkinter
-        else:
-            import Tkinter
-    except ImportError :
-        TKINTER_IMPORT_FAILED = 1
-        OF_GRAPHICS = False
-
-# Using tkFileDialog from PyRAF (and maybe in straight TEAL) is crashing python
-# itself on OSX only.  Allow on Linux.  Mac: use this until PyRAF #171 fixed.
-OF_TKFD_IN_EPAR = True
-if sys.platform == 'darwin' and OF_GRAPHICS and \
-   not is_darwin_and_x(): # if framework ver
-    OF_TKFD_IN_EPAR = 'TEAL_TRY_TKFD' in list(os.environ.keys())
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/cfgpars.py b/required_pkgs/stsci.tools/lib/stsci/tools/cfgpars.py
deleted file mode 100644
index db91875..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/cfgpars.py
+++ /dev/null
@@ -1,1336 +0,0 @@
-""" Contains the ConfigObjPars class and any related functionality.
-
-$Id: cfgpars.py 40432 2015-05-26 12:49:51Z bsimon $
-"""
-from __future__ import absolute_import, division, print_function # confidence high
-
-import copy, glob, os, stat, sys
-
-if sys.version_info[0] > 2:
-    string_types = str
-else:
-    string_types = basestring
-
-# ConfigObj modules
-from . import configobj, validate
-
-# Local modules
-from . import basicpar, eparoption, irafutils, taskpars, vtor_checks
-
-# Globals and useful functions
-
-APP_NAME = 'TEAL'
-TASK_NAME_KEY = '_task_name_'
-
-class DuplicateKeyError(Exception):
-    pass
-
-class NoCfgFileError(Exception):
-    pass
-
-
-def getAppDir():
-    """ Return our application dir.  Create it if it doesn't exist. """
-    # Be sure the resource dir exists
-    theDir = os.path.expanduser('~/.')+APP_NAME.lower()
-    if not os.path.exists(theDir):
-        try:
-            os.mkdir(theDir)
-        except OSError:
-            print('Could not create "'+theDir+'" to save GUI settings.')
-            theDir = "./"+APP_NAME.lower()
-    return theDir
-
-
-def getObjectFromTaskArg(theTask, strict, setAllToDefaults):
-    """ Take the arg (usually called theTask), which can be either a subclass
-    of ConfigObjPars, or a string package name, or a .cfg filename - no matter
-    what it is - take it and return a ConfigObjPars object.
-    strict - bool - warning severity, passed to the ConfigObjPars() ctor
-    setAllToDefaults - bool - if theTask is a pkg name, force all to defaults
-    """
-
-    # Already in the form we need (instance of us or of subclass)
-    if isinstance(theTask, ConfigObjPars):
-        if setAllToDefaults:
-            raise RuntimeError('Called getObjectFromTaskArg with existing'+\
-                  ' object AND setAllToDefaults - is unexpected use case.')
-        # If it is an existing object, make sure it's internal param list is
-        # up to date with it's ConfigObj dict, since the user may have manually
-        # edited the dict before calling us.
-        theTask.syncParamList(False) # use strict somehow?
-        # Note - some validation is done here in IrafPar creation, but it is
-        # not the same validation done by the ConfigObj s/w (no check funcs).
-        # Do we want to do that too here?
-        return theTask
-
-    # For example, a .cfg file
-    if os.path.isfile(str(theTask)):
-        try:
-            return ConfigObjPars(theTask, strict=strict,
-                                 setAllToDefaults=setAllToDefaults)
-        except KeyError:
-            # this might just be caused by a file sitting in the local cwd with
-            # the same exact name as the package we want to import, let's see
-            if theTask.find('.') > 0: # it has an extension, like '.cfg'
-                raise # this really was an error
-            # else we drop down to the next step - try it as a pkg name
-
-    # Else it must be a Python package name to load
-    if isinstance(theTask, str) and setAllToDefaults:
-        # NOTE how we pass the task name string in setAllToDefaults
-        return ConfigObjPars('', setAllToDefaults=theTask, strict=strict)
-    else:
-        return getParsObjForPyPkg(theTask, strict)
-
-
-def getEmbeddedKeyVal(cfgFileName, kwdName, dflt=None):
-    """ Read a config file and pull out the value of a given keyword. """
-    # Assume this is a ConfigObj file.  Use that s/w to quickly read it and
-    # put it in dict format.  Assume kwd is at top level (not in a section).
-    # The input may also be a .cfgspc file.
-    #
-    # Only use ConfigObj here as a tool to generate a dict from a file - do
-    # not use the returned object as a ConfigObj per se.  As such, we can call
-    # with "simple" format, ie. no cfgspc, no val'n, and "list_values"=False.
-    try:
-        junkObj = configobj.ConfigObj(cfgFileName, list_values=False)
-    except:
-        if kwdName == TASK_NAME_KEY:
-            raise KeyError('Can not parse as a parameter config file: '+ \
-                           '\n\t'+os.path.realpath(cfgFileName))
-        else:
-            raise KeyError('Unfound key "'+kwdName+'" while parsing: '+ \
-                           '\n\t'+os.path.realpath(cfgFileName))
-
-    if kwdName in junkObj:
-        retval = junkObj[kwdName]
-        del junkObj
-        return retval
-    # Not found
-    if dflt != None:
-        del junkObj
-        return dflt
-    else:
-        if kwdName == TASK_NAME_KEY:
-            raise KeyError('Can not parse as a parameter config file: '+ \
-                           '\n\t'+os.path.realpath(cfgFileName))
-        else:
-            raise KeyError('Unfound key "'+kwdName+'" while parsing: '+ \
-                           '\n\t'+os.path.realpath(cfgFileName))
-
-
-def findCfgFileForPkg(pkgName, theExt, pkgObj=None, taskName=None):
-    """ Locate the configuration files for/from/within a given python package.
-    pkgName is a string python package name.  This is used unless pkgObj
-    is given, in which case pkgName is taken from pkgObj.__name__.
-    theExt is either '.cfg' or '.cfgspc'. If the task name is known, it is
-    given as taskName, otherwise one is determined using the pkgName.
-    Returns a tuple of (package-object, cfg-file-name). """
-    # arg check
-    ext = theExt
-    if ext[0] != '.': ext = '.'+theExt
-
-    # Do the import, if needed
-    pkgsToTry = {}
-    if pkgObj:
-        pkgsToTry[pkgObj.__name__] = pkgObj
-    else:
-        # First try something simple like a regular or dotted import
-        try:
-            fl = []
-            if pkgName.find('.') > 0:
-                fl = [ pkgName[:pkgName.rfind('.')], ]
-            pkgsToTry[str(pkgName)] = __import__(str(pkgName), fromlist=fl)
-        except:
-            throwIt = True
-            # One last case to try is something like "csc_kill" from
-            # "acstools.csc_kill", but this convenience capability will only be
-            # allowed if the parent pkg (acstools) has already been imported.
-            if isinstance(pkgName, string_types) and pkgName.find('.') < 0:
-                matches = [x for x in sys.modules.keys() \
-                           if x.endswith("."+pkgName)]
-                if len(matches)>0:
-                    throwIt = False
-                    for mmm in matches:
-                        pkgsToTry[mmm] = sys.modules[mmm]
-            if throwIt:
-                raise NoCfgFileError("Unfound package or "+ext+" file via: "+\
-                                     "import "+str(pkgName))
-
-    # Now that we have the package object (or a few of them to try), for each
-    # one find the .cfg or .cfgspc file, and return
-    # Return as soon as ANY match is found.
-    for aPkgName in pkgsToTry:
-        aPkg = pkgsToTry[aPkgName]
-        path = os.path.dirname(aPkg.__file__)
-        if len(path) < 1: path = '.'
-        flist = irafutils.rglob(path, "*"+ext)
-        if len(flist) < 1:
-            continue
-
-        # Go through these and find the first one for the assumed or given task
-        # name.  The task name for 'BigBlackBox.drizzle' would be 'drizzle'.
-        if taskName == None:
-            taskName = aPkgName.split(".")[-1]
-        flist.sort()
-        for f in flist:
-            # A .cfg file gets checked for _task_name_=val, but a .cfgspc file
-            # will have a string check function signature as the val.
-            if ext == '.cfg':
-               itsTask = getEmbeddedKeyVal(f, TASK_NAME_KEY, '')
-            else: # .cfgspc
-               sigStr  = getEmbeddedKeyVal(f, TASK_NAME_KEY, '')
-               # .cfgspc file MUST have an entry for TASK_NAME_KEY w/ a default
-               itsTask = vtor_checks.sigStrToKwArgsDict(sigStr)['default']
-            if itsTask == taskName:
-                # We've found the correct file in an installation area.  Return
-                # the package object and the found file.
-                return aPkg, f
-
-    # What, are you still here?
-    raise NoCfgFileError('No valid '+ext+' files found in package: "'+ \
-                         str(pkgName)+'" for task: "'+str(taskName)+'"')
-
-
-def findAllCfgTasksUnderDir(aDir):
-    """ Finds all installed tasks by examining any .cfg files found on disk
-        at and under the given directory, as an installation might be.
-        This returns a dict of { file name : task name }
-    """
-    retval = {}
-    for f in irafutils.rglob(aDir, '*.cfg'):
-        retval[f] = getEmbeddedKeyVal(f, TASK_NAME_KEY, '')
-    return retval
-
-
-def getCfgFilesInDirForTask(aDir, aTask, recurse=False):
-    """ This is a specialized function which is meant only to keep the
-        same code from needlessly being much repeated throughout this
-        application.  This must be kept as fast and as light as possible.
-        This checks a given directory for .cfg files matching a given
-        task.  If recurse is True, it will check subdirectories.
-        If aTask is None, it returns all files and ignores aTask.
-    """
-    if recurse:
-        flist = irafutils.rglob(aDir, '*.cfg')
-    else:
-        flist = glob.glob(aDir+os.sep+'*.cfg')
-    if aTask:
-        retval = []
-        for f in flist:
-            try:
-                if aTask == getEmbeddedKeyVal(f, TASK_NAME_KEY, ''):
-                    retval.append(f)
-            except Exception as e:
-                print('Warning: '+str(e))
-        return retval
-    else:
-        return flist
-
-
-def getParsObjForPyPkg(pkgName, strict):
-    """ Locate the appropriate ConfigObjPars (or subclass) within the given
-        package. NOTE this begins the same way as getUsrCfgFilesForPyPkg().
-        Look for .cfg file matches in these places, in this order:
-          1 - any named .cfg file in current directory matching given task
-          2 - if there exists a ~/.teal/<taskname>.cfg file
-          3 - any named .cfg file in SOME*ENV*VAR directory matching given task
-          4 - the installed default .cfg file (with the given package)
-    """
-    # Get the python package and it's .cfg file - need this no matter what
-    installedPkg, installedFile = findCfgFileForPkg(pkgName, '.cfg')
-    theFile = None
-    tname = getEmbeddedKeyVal(installedFile, TASK_NAME_KEY)
-
-    # See if the user has any of their own .cfg files in the cwd for this task
-    if theFile == None:
-        flist = getCfgFilesInDirForTask(os.getcwd(), tname)
-        if len(flist) > 0:
-            if len(flist) == 1: # can skip file times sort
-                theFile = flist[0]
-            else:
-                # There are a few different choices.  In the absence of
-                # requirements to the contrary, just take the latest.  Set up a
-                # list of tuples of (mtime, fname) so we can sort by mtime.
-                ftups = [ (os.stat(f)[stat.ST_MTIME], f) for f in flist]
-                ftups.sort()
-                theFile = ftups[-1][1]
-
-    # See if the user has any of their own app-dir .cfg files for this task
-    if theFile == None:
-        flist = getCfgFilesInDirForTask(getAppDir(), tname) # verifies tname
-        flist = [f for f in flist if os.path.basename(f) == tname+'.cfg']
-        if len(flist) > 0:
-            theFile = flist[0]
-            assert len(flist) == 1, str(flist) # should never happen
-
-    # Add code to check an env. var defined area?  (speak to users first)
-
-    # Did we find one yet?  If not, use the installed version
-    useInstVer = False
-    if theFile == None:
-        theFile = installedFile
-        useInstVer = True
-
-    # Create a stand-in instance from this file.  Force a read-only situation
-    # if we are dealing with the installed, (expected to be) unwritable file.
-    return ConfigObjPars(theFile, associatedPkg=installedPkg,
-                         forceReadOnly=useInstVer, strict=strict)
-
-
-def getUsrCfgFilesForPyPkg(pkgName):
-    """ See if the user has one of their own local .cfg files for this task,
-        such as might be created automatically during the save of a read-only
-        package, and return their names. """
-    # Get the python package and it's .cfg file
-    thePkg, theFile = findCfgFileForPkg(pkgName, '.cfg')
-    # See if the user has any of their own local .cfg files for this task
-    tname = getEmbeddedKeyVal(theFile, TASK_NAME_KEY)
-    flist = getCfgFilesInDirForTask(getAppDir(), tname)
-    return flist
-
-
-def checkSetReadOnly(fname, raiseOnErr = False):
-    """ See if we have write-privileges to this file.  If we do, and we
-    are not supposed to, then fix that case. """
-    if os.access(fname, os.W_OK):
-        # We can write to this but it is supposed to be read-only. Fix it.
-        # Take away usr-write, leave group and other alone, though it
-        # may be simpler to just force/set it to: r--r--r-- or r--------
-        irafutils.setWritePrivs(fname, False, ignoreErrors= not raiseOnErr)
-
-
-def flattenDictTree(aDict):
-    """ Takes a dict of vals and dicts (so, a tree) as input, and returns
-    a flat dict (only one level) as output.  All key-vals are moved to
-    the top level.  Sub-section dict names (keys) are ignored/dropped.
-    If there are name collisions, an error is raised. """
-    retval = {}
-    for k in aDict:
-        val = aDict[k]
-        if isinstance(val, dict):
-            # This val is a dict, get its data (recursively) into a flat dict
-            subDict = flattenDictTree(val)
-            # Merge its dict of data into ours, watching for NO collisions
-            rvKeySet  = set(retval.keys())
-            sdKeySet = set(subDict.keys())
-            intr = rvKeySet.intersection(sdKeySet)
-            if len(intr) > 0:
-                raise DuplicateKeyError("Flattened dict already has "+ \
-                    "key(s): "+str(list(intr))+" - cannot flatten this.")
-
-            else:
-                retval.update(subDict)
-        else:
-            if k in retval:
-                raise DuplicateKeyError("Flattened dict already has key: "+\
-                                        k+" - cannot flatten this.")
-            else:
-                retval[k] = val
-    return retval
-
-
-def countKey(theDict, name):
-    """ Return the number of times the given par exists in this dict-tree,
-    since the same key name may be used in different sections/sub-sections. """
-
-    retval = 0
-    for key in theDict:
-        val = theDict[key]
-        if isinstance(val, dict):
-            retval += countKey(val, name) # recurse
-        else:
-            if key == name:
-                retval += 1
-                # can't break, even tho we found a hit, other items on
-                # this level will not be named "name", but child dicts
-                # may have further counts
-    return retval
-
-
-def findFirstPar(theDict, name, _depth=0):
-    """ Find the given par.  Return tuple: (its own (sub-)dict, its value).
-    Returns the first match found, without checking whether the given key name
-    is unique or whether it is used in multiple sections. """
-
-    for key in theDict:
-        val = theDict[key]
-#       print _depth*'   ', key, str(val)[:40]
-        if isinstance(val, dict):
-            retval = findFirstPar(val, name, _depth=_depth+1) # recurse
-            if retval != None:
-                return retval
-            # else keep looking
-        else:
-            if key == name:
-                return theDict, theDict[name]
-            # else keep looking
-    # if we get here then we have searched this whole (sub)-section and its
-    # descendants, and found no matches.  only raise if we are at the top.
-    if _depth == 0:
-        raise KeyError(name)
-    else:
-        return None
-
-
-def findScopedPar(theDict, scope, name):
-    """ Find the given par.  Return tuple: (its own (sub-)dict, its value). """
-    # Do not search (like findFirstPar), but go right to the correct
-    # sub-section, and pick it up.  Assume it is there as stated.
-    if len(scope):
-        theDict = theDict[scope] # ! only goes one level deep - enhance !
-    return theDict, theDict[name] # KeyError if unfound
-
-
-def setPar(theDict, name, value):
-    """ Sets a par's value without having to give its scope/section. """
-    section, previousVal = findFirstPar(theDict, name)
-    # "section" is the actual object, not a copy
-    section[name] = value
-
-
-def mergeConfigObj(configObj, inputDict):
-    """ Merge the inputDict values into an existing given configObj instance.
-    The inputDict is a "flat" dict - it has no sections/sub-sections.  The
-    configObj may have sub-sections nested to any depth.  This will raise a
-    DuplicateKeyError if one of the inputDict keys is used more than once in
-    configObj (e.g. within two different sub-sections). """
-    # Expanded upon Warren's version in astrodrizzle
-
-    # Verify that all inputDict keys in configObj are unique within configObj
-    for key in inputDict:
-        if countKey(configObj, key) > 1:
-            raise DuplicateKeyError(key)
-    # Now update configObj with each inputDict item
-    for key in inputDict:
-        setPar(configObj, key, inputDict[key])
-
-
-def integrityTestAllPkgCfgFiles(pkgObj, output=True):
-    """ Given a package OBJECT, inspect it and find all installed .cfg file-
-    using tasks under it.  Then them one at a time via
-    integrityTestTaskCfgFile, and report any/all errors. """
-    assert type(pkgObj) == type(os), \
-           "Expected module arg, got: "+str(type(pkgObj))
-    taskDict = findAllCfgTasksUnderDir(os.path.dirname(pkgObj.__file__))
-    # taskDict is { cfgFileName : taskName }
-    errors = []
-    for fname in taskDict:
-        taskName = taskDict[fname]
-        try:
-            if taskName:
-                if output:
-                    print('In '+pkgObj.__name__+', checking task: '+ 
-                           taskName+', file: '+fname)
-                integrityTestTaskCfgFile(taskName, fname)
-        except Exception as e:
-            errors.append(str(e))
-
-    assert len(errors) == 0, 'Errors found while integrity testing .cfg '+ \
-              'file(s) found under "'+pkgObj.__name__+'":\n'+ \
-              ('\n'.join(errors))
-
-
-def integrityTestTaskCfgFile(taskName, cfgFileName=None):
-    """ For a given task, inspect the given .cfg file (or simply find/use its
-    installed .cfg file), and check those values against the defaults
-    found in the installed .cfgspc file.  They should be the same.
-    If the file name is not given, the installed one is found and used. """
-
-    from . import teal # don't import above, to avoid circular import (may need to mv)
-    if not cfgFileName:
-        ignored, cfgFileName = findCfgFileForPkg(taskName, '.cfg')
-    diffDict = teal.diffFromDefaults(cfgFileName, report=False)
-    if len(diffDict) < 1:
-        return # no error
-    msg = 'The following par:value pairs from "'+cfgFileName+ \
-          '" are not the correct defaults: '+str(diffDict)
-    raise RuntimeError(msg)
-
-
-class ConfigObjPars(taskpars.TaskPars, configobj.ConfigObj):
-    """ This represents a task's dict of ConfigObj parameters. """
-
-    def __init__(self, cfgFileName, forUseWithEpar=True,
-                 setAllToDefaults=False, strict=True,
-                 associatedPkg=None, forceReadOnly=False):
-        """
-        cfgFileName - string path/name of .cfg file
-        forUseWithEpar - bool - will this be used in EPAR?
-        setAllToDefaults - <True, False, or string> string is pkg name to import
-        strict - bool - level of error/warning severity
-        associatedPkg - loaded package object
-        forceReadOnly - bool - make the .cfg file read-only
-        """
-
-        self._forUseWithEpar = forUseWithEpar
-        self._rcDir = getAppDir()
-        self._allTriggers = None # all known triggers in this object
-        self._allDepdcs = None   # all known dependencies in this object
-        self._allExecutes = None # all known codes-to-execute in this object
-        self._neverWrite = []    # all keys which are NOT written out to .cfg
-        self._debugLogger = None
-        self._debugYetToPost = []
-        self.__assocPkg = associatedPkg
-
-        # The __paramList pointer remains the same for the life of this object
-        self.__paramList = []
-
-        # Set up ConfigObj stuff
-        assert setAllToDefaults or os.path.isfile(cfgFileName), \
-               "Config file not found: "+cfgFileName
-        self.__taskName = ''
-        if setAllToDefaults:
-            # they may not have given us a real file name here since they
-            # just want defaults (in .cfgspc) so don't be too picky about
-            # finding and reading the file.
-            if isinstance(setAllToDefaults, str):
-                # here they have very clearly said to load only the defaults
-                # using the given name as the package name - below we will
-                # have it imported in _findAssociatedConfigSpecFile()
-                self.__taskName = setAllToDefaults
-                setAllToDefaults = True
-                cfgFileName = '' # ignore any given .cfg file, don't need one
-            else:
-                possible = os.path.splitext(os.path.basename(cfgFileName))[0]
-                if os.path.isfile(cfgFileName):
-                    self.__taskName = getEmbeddedKeyVal(cfgFileName,
-                                      TASK_NAME_KEY, possible)
-                else:
-                    self.__taskName = possible
-        else:
-            # this is the real deal, expect a real file name
-            self.__taskName = getEmbeddedKeyVal(cfgFileName, TASK_NAME_KEY)
-            if forceReadOnly:
-                checkSetReadOnly(cfgFileName)
-
-        # Find the associated .cfgspc file (first make sure we weren't
-        # given one by mistake)
-        if not cfgFileName.endswith('.cfg') and \
-           self.__taskName.find('(default=') >= 0:
-            # Handle case where they gave us a .cfgspc by mistake (no .cfg)
-            # (basically reset a few things)
-            cfgSpecPath = os.path.realpath(cfgFileName)
-            setAllToDefaults = True
-            cfgFileName = ''
-            sigStr  = getEmbeddedKeyVal(cfgSpecPath, TASK_NAME_KEY, '')
-            self.__taskName = vtor_checks.sigStrToKwArgsDict(sigStr)['default']
-        else:
-            cfgSpecPath = self._findAssociatedConfigSpecFile(cfgFileName)
-        assert os.path.exists(cfgSpecPath), \
-               "Matching configspec not found!  Expected: "+cfgSpecPath
-
-        self.debug('ConfigObjPars: .cfg='+str(cfgFileName)+ \
-                   ', .cfgspc='+str(cfgSpecPath)+ \
-                   ', defaults='+str(setAllToDefaults)+', strict='+str(strict))
-
-        # Run the ConfigObj ctor.  The result of this (if !setAllToDefaults)
-        # is the exact copy of the input file as a dict (ConfigObj).  If the
-        # infile had extra pars or missing pars, they are still that way here.
-        if setAllToDefaults:
-            configobj.ConfigObj.__init__(self, configspec=cfgSpecPath)
-        else:
-            configobj.ConfigObj.__init__(self, os.path.abspath(cfgFileName),
-                                         configspec=cfgSpecPath)
-
-        # Before we validate (and fill in missing pars), find any lost pars
-        # via this (somewhat kludgy) method suggested by ConfigObj folks.
-        missing = '' # assume no .cfg file
-        if strict and (not setAllToDefaults):
-            # don't even populate this if not strict
-            missing = findTheLost(os.path.abspath(cfgFileName), cfgSpecPath)
-
-        # Validate it here.  We can't skip this step even if we are just
-        # setting all to defaults, since this sets the values.
-        # NOTE - this fills in values for any missing pars !  AND, if our
-        # .cfgspc sets defaults vals, then missing pars are not an error...
-        self._vtor = validate.Validator(vtor_checks.FUNC_DICT)
-        # 'ans' will be True, False, or a dict (anything but True is bad)
-        ans = self.validate(self._vtor, preserve_errors=True,
-                            copy=setAllToDefaults)
-        # Note: before the call to validate(), the list returned from
-        # self.keys() is in the order found in self.filename.  If that file
-        # was missing items that are in the .cfgspc, they will now show up
-        # in self.keys(), but not necessarily in the same order as the .cfgspc
-        hasTypeErr = ans != True
-        extra = self.listTheExtras(True)
-
-        # DEAL WITH ERRORS (in this way)
-        #
-        # wrong par type:
-        #     strict -> severe error*
-        #     not -> severe error
-        # extra par(s) found:
-        #     strict -> severe error
-        #     not -> warn*
-        # missing par(s):
-        #     strict -> warn
-        #     not - be silent
-        #
-        # *severe - if in GUI, pop up error & stop (e.g. file load), else raise
-        # *warn - if in GUI, pop up warning, else print it to screen
-
-        if extra or missing or hasTypeErr:
-            flatStr = ''
-            if ans == False:
-                flatStr = "All values are invalid!"
-            if ans != True and ans != False:
-                flatStr = flattened2str(configobj.flatten_errors(self, ans))
-            if missing:
-                flatStr += "\n\n"+missing
-            if extra:
-                flatStr += "\n\n"+extra
-            msg = "Validation warnings for: "
-            if hasTypeErr or (strict and extra):
-                msg = "Validation errors for: "
-            msg = msg+os.path.realpath(cfgFileName)+\
-                  "\n\n"+flatStr.strip('\n')
-            if hasTypeErr or (strict and extra):
-                raise RuntimeError(msg)
-            else:
-                # just inform them, but don't throw anything
-                print(msg.replace('\n\n','\n'))
-
-        # get the initial param list out of the ConfigObj dict
-        self.syncParamList(True)
-
-        # take note of all trigger logic
-        self.debug(self.triggerLogicToStr())
-
-        # see if we are using a package with it's own run() function
-        self._runFunc = None
-        self._helpFunc = None
-        if self.__assocPkg != None:
-            if hasattr(self.__assocPkg, 'run'):
-                self._runFunc = self.__assocPkg.run
-            if hasattr(self.__assocPkg, 'getHelpAsString'):
-                self._helpFunc = self.__assocPkg.getHelpAsString
-
-
-    def setDebugLogger(self, obj):
-        # set the object we can use to post debugging info
-        self._debugLogger = obj
-        # now that we have one, post anything we have saved up (and clear list)
-        if obj and len(self._debugYetToPost) > 0:
-            for msg in self._debugYetToPost:
-                self._debugLogger.debug(msg)
-        self._debugYetToPost = []
-
-    def debug(self, msg):
-        if self._debugLogger:
-            self._debugLogger.debug(msg)
-        else:
-            # else just hold onto it until we do have a logger -during the
-            # init phase we may not yet have a logger, yet have stuff to log
-            self._debugYetToPost.append(msg) # add to our little cache
-
-    def getDefaultSaveFilename(self, stub=False):
-        """ Return name of file where we are expected to be saved if no files
-        for this task have ever been saved, and the user wishes to save.  If
-        stub is True, the result will be <dir>/<taskname>_stub.cfg instead of
-        <dir>/<taskname>.cfg. """
-        if stub:
-            return self._rcDir+os.sep+self.__taskName+'_stub.cfg'
-        else:
-            return self._rcDir+os.sep+self.__taskName+'.cfg'
-
-    def syncParamList(self, firstTime, preserve_order=True):
-        """ Set or reset the internal param list from the dict's contents. """
-        # See the note in setParam about this design.
-
-        # Get latest par values from dict.  Make sure we do not
-        # change the id of the __paramList pointer here.
-        new_list = self._getParamsFromConfigDict(self, initialPass=firstTime)
-                                               # dumpCfgspcTo=sys.stdout)
-        # Have to add this odd last one for the sake of the GUI (still?)
-        if self._forUseWithEpar:
-            new_list.append(basicpar.IrafParS(['$nargs','s','h','N']))
-
-        if len(self.__paramList) > 0 and preserve_order:
-            # Here we have the most up-to-date data from the actual data
-            # model, the ConfigObj dict, and we need to use it to fill in
-            # our param list.  BUT, we need to preserve the order our list
-            # has had up until now (by unique parameter name).
-            namesInOrder = [p.fullName() for p in self.__paramList]
-            assert len(namesInOrder) == len(new_list), \
-                   'Mismatch in num pars, had: '+str(len(namesInOrder))+ \
-                   ', now we have: '+str(len(new_list))+', '+ \
-                   str([p.fullName() for p in new_list])
-            self.__paramList[:] = [] # clear list, keep same pointer
-            # create a flat dict view of new_list, for ease of use in next step
-            new_list_dict = {} # can do in one step in v2.7
-            for par in new_list: new_list_dict[par.fullName()] = par
-            # populate
-            for fn in namesInOrder:
-                self.__paramList.append(new_list_dict[fn])
-        else:
-            # Here we just take the data in whatever order it came.
-            self.__paramList[:] = new_list # keep same list pointer
-
-    def getName(self): return self.__taskName
-
-    def getPkgname(self):  return '' # subclasses override w/ a sensible value
-
-    def getParList(self, docopy=False):
-        """ Return a list of parameter objects.  docopy is ignored as the
-        returned value is not a copy. """
-        return self.__paramList
-
-    def getDefaultParList(self):
-        """ Return a par list just like ours, but with all default values. """
-        # The code below (create a new set-to-dflts obj) is correct, but it
-        # adds a tenth of a second to startup.  Clicking "Defaults" in the
-        # GUI does not call this.  But this can be used to set the order seen.
-
-        # But first check for rare case of no cfg file name
-        if self.filename == None:
-            # this is a .cfgspc-only kind of object so far
-            self.filename = self.getDefaultSaveFilename(stub=True)
-            return copy.deepcopy(self.__paramList)
-
-        tmpObj = ConfigObjPars(self.filename, associatedPkg=self.__assocPkg,
-                               setAllToDefaults=True, strict=False)
-        return tmpObj.getParList()
-
-    def getFilename(self):
-        if self.filename in (None, ''):
-            return self.getDefaultSaveFilename()
-        else:
-            return self.filename
-
-    def getAssocPkg(self): return self.__assocPkg
-
-    def canExecute(self): return self._runFunc != None
-
-    def isSameTaskAs(self, aCfgObjPrs):
-        """ Return True if the passed in object is for the same task as
-        we are. """
-        return aCfgObjPrs.getName() == self.getName()
-
-#   def strictUpdate(self, aDict):
-#       """ Override the current values with those in the given dict.  This
-#           is like dict's update, except it doesn't allow new keys and it
-#           verifies the values (it does?!) """
-#       if aDict == None:
-#           return
-#       for k in aDict:
-#           v = aDict[k]
-#           print("Skipping ovverride key = "+k+", val = "+str(v))
-
-    def setParam(self, name, val, scope='', check=1, idxHint=None):
-        """ Find the ConfigObj entry.  Update the __paramList. """
-        theDict, oldVal = findScopedPar(self, scope, name)
-
-        # Set the value, even if invalid.  It needs to be set before
-        # the validation step (next).
-        theDict[name] = val
-
-        # If need be, check the proposed value.  Ideally, we'd like to
-        # (somehow elegantly) only check this one item. For now, the best
-        # shortcut is to only validate this section.
-        if check:
-            ans=self.validate(self._vtor, preserve_errors=True, section=theDict)
-            if ans != True:
-                flatStr = "All values are invalid!"
-                if ans != False:
-                    flatStr = flattened2str(configobj.flatten_errors(self, ans))
-                raise RuntimeError("Validation error: "+flatStr)
-
-        # Note - this design needs work.  Right now there are two copies
-        # of the data:  the ConfigObj dict, and the __paramList ...
-        # We rely on the idxHint arg so we don't have to search the __paramList
-        # every time this is called, which could really slows things down.
-        assert idxHint != None, "ConfigObjPars relies on a valid idxHint"
-        assert name == self.__paramList[idxHint].name, \
-               'Error in setParam, name: "'+name+'" != name at idxHint: "'+\
-               self.__paramList[idxHint].name+'", idxHint: '+str(idxHint)
-        self.__paramList[idxHint].set(val)
-
-    def saveParList(self, *args, **kw):
-        """Write parameter data to filename (string or filehandle)"""
-        if 'filename' in kw:
-            filename = kw['filename']
-        if not filename:
-            filename = self.getFilename()
-        if not filename:
-            raise ValueError("No filename specified to save parameters")
-
-        if hasattr(filename,'write'):
-            fh = filename
-            absFileName = os.path.abspath(fh.name)
-        else:
-            absFileName = os.path.expanduser(filename)
-            absDir = os.path.dirname(absFileName)
-            if len(absDir) and not os.path.isdir(absDir): os.makedirs(absDir)
-            fh = open(absFileName,'w')
-        numpars = len(self.__paramList)
-        if self._forUseWithEpar: numpars -= 1
-        if not self.final_comment: self.final_comment = [''] # force \n at EOF
-        # Empty the ConfigObj version of section.defaults since that is based
-        # on an assumption incorrect for us, and override with our own list.
-        # THIS IS A BIT OF MONKEY-PATCHING!  WATCH FUTURE VERSION CHANGES!
-        # See Trac ticket #762.
-        while len(self.defaults):
-            self.defaults.pop(-1) # empty it, keeping ref
-        for key in self._neverWrite:
-            self.defaults.append(key)
-        # Note also that we are only overwriting the top/main section's
-        # "defaults" list, but EVERY [sub-]section has such an attribute...
-
-        # Now write to file, delegating work to ConfigObj (note that ConfigObj
-        # write() skips any items listed by name in the self.defaults list)
-        self.write(fh)
-        fh.close()
-        retval = str(numpars) + " parameters written to " + absFileName
-        self.filename = absFileName # reset our own ConfigObj filename attr
-        self.debug('Keys not written: '+str(self.defaults))
-        return retval
-
-    def run(self, *args, **kw):
-        """ This may be overridden by a subclass. """
-        if self._runFunc != None:
-            # remove the two args sent by EditParDialog which we do not use
-            if 'mode' in kw: kw.pop('mode')
-            if '_save' in kw: kw.pop('_save')
-            return self._runFunc(self, *args, **kw)
-        else:
-            raise taskpars.NoExecError('No way to run task "'+self.__taskName+\
-                '". You must either override the "run" method in your '+ \
-                'ConfigObjPars subclass, or you must supply a "run" '+ \
-                'function in your package.')
-
-    def triggerLogicToStr(self):
-        """ Print all the trigger logic to a string and return it. """
-        try:
-            import json
-        except ImportError:
-            return "Cannot dump triggers/dependencies/executes (need json)"
-        retval = "TRIGGERS:\n"+json.dumps(self._allTriggers, indent=3)
-        retval += "\nDEPENDENCIES:\n"+json.dumps(self._allDepdcs, indent=3)
-        retval += "\nTO EXECUTE:\n"+json.dumps(self._allExecutes, indent=3)
-        retval += "\n"
-        return retval
-
-
-    def getHelpAsString(self):
-        """ This may be overridden by a subclass. """
-        if self._helpFunc != None:
-            return self._helpFunc()
-        else:
-            return 'No help string found for task "'+self.__taskName+ \
-            '".  \n\nThe developer must either override the '+\
-            'getHelpAsString() method in their ConfigObjPars \n'+ \
-            'subclass, or they must supply such a function in their package.'
-
-    def _findAssociatedConfigSpecFile(self, cfgFileName):
-        """ Given a config file, find its associated config-spec file, and
-        return the full pathname of the file. """
-
-        # Handle simplest 2 cases first: co-located or local .cfgspc file
-        retval = "."+os.sep+self.__taskName+".cfgspc"
-        if os.path.isfile(retval): return retval
-
-        retval = os.path.dirname(cfgFileName)+os.sep+self.__taskName+".cfgspc"
-        if os.path.isfile(retval): return retval
-
-        # Also try the resource dir
-        retval = self.getDefaultSaveFilename()+'spc' # .cfgspc
-        if os.path.isfile(retval): return retval
-
-        # Now try and see if there is a matching .cfgspc file in/under an
-        # associated package, if one is defined.
-        if self.__assocPkg != None:
-            x, theFile = findCfgFileForPkg(None, '.cfgspc',
-                                           pkgObj = self.__assocPkg,
-                                           taskName = self.__taskName)
-            return theFile
-
-        # Finally try to import the task name and see if there is a .cfgspc
-        # file in that directory
-        x, theFile = findCfgFileForPkg(self.__taskName, '.cfgspc',
-                                       taskName = self.__taskName)
-        if os.path.exists(theFile):
-            return theFile
-
-        # unfound
-        raise NoCfgFileError('Unfound config-spec file for task: "'+ \
-                             self.__taskName+'"')
-
-
-    def _getParamsFromConfigDict(self, cfgObj, scopePrefix='',
-                                 initialPass=False, dumpCfgspcTo=None):
-        """ Walk the given ConfigObj dict pulling out IRAF-like parameters into
-        a list. Since this operates on a dict this can be called recursively.
-        This is also our chance to find and pull out triggers and such
-        dependencies. """
-        # init
-        retval = []
-        if initialPass and len(scopePrefix) < 1:
-            self._posArgs = [] # positional args [2-tuples]: (index,scopedName)
-            # FOR SECURITY: the following 3 chunks of data,
-            #     _allTriggers, _allDepdcs, _allExecutes,
-            # are collected ONLY from the .cfgspc file
-            self._allTriggers = {}
-            self._allDepdcs = {}
-            self._allExecutes = {}
-
-        # start walking ("tell yer story walkin, buddy")
-        # NOTE: this relies on the "in" operator returning keys in the
-        # order that they exist in the dict (which depends on ConfigObj keeping
-        # the order they were found in the original file)
-        for key in cfgObj:
-            val = cfgObj[key]
-
-            # Do we need to skip this - if not a par, like a rule or something
-            toBeHidden = isHiddenName(key)
-            if toBeHidden:
-                if key not in self._neverWrite and key != TASK_NAME_KEY:
-                    self._neverWrite.append(key)
-                    # yes TASK_NAME_KEY is hidden, but it IS output to the .cfg
-
-            # a section
-            if isinstance(val, dict):
-                if not toBeHidden:
-                    if len(list(val.keys()))>0 and len(retval)>0:
-                        # Here is where we sneak in the section comment
-                        # This is so incredibly kludgy (as the code was), it
-                        # MUST be revamped eventually! This is for the epar GUI.
-                        prevPar = retval[-1]
-                        # Use the key (or its comment?) as the section header
-                        prevPar.set(prevPar.get('p_prompt')+'\n\n'+key,
-                                    field='p_prompt', check=0)
-                    if dumpCfgspcTo:
-                        dumpCfgspcTo.write('\n['+key+']\n')
-                    # a logical grouping (append its params)
-                    pfx = scopePrefix+'.'+key
-                    pfx = pfx.strip('.')
-                    retval = retval + self._getParamsFromConfigDict(val, pfx,
-                                      initialPass, dumpCfgspcTo) # recurse
-            else:
-                # a param
-                fields = []
-                choicesOrMin = None
-                fields.append(key) # name
-                dtype = 's'
-                cspc = None
-                if cfgObj.configspec:
-                    cspc = cfgObj.configspec.get(key) # None if not found
-                chk_func_name = ''
-                chk_args_dict = {}
-                if cspc:
-                    chk_func_name = cspc[:cspc.find('(')]
-                    chk_args_dict = vtor_checks.sigStrToKwArgsDict(cspc)
-                if chk_func_name.find('option') >= 0:
-                    dtype = 's'
-                    # convert the choices string to a list (to weed out kwds)
-                    x = cspc[cspc.find('(')+1:-1] # just the options() args
-# cspc e.g.: option_kw("poly5","nearest","linear", default="poly5", comment="Interpolant (poly5,nearest,linear)")
-                    x = x.split(',') # tokenize
-                    # but! comment value may have commas in it, find it
-                    # using it's equal sign, rm all after it
-                    has_eq = [i for i in x if i.find('=')>=0]
-                    if len(has_eq) > 0:
-                        x = x[: x.index(has_eq[0]) ]
-                    # rm spaces, extra quotes; rm kywd arg pairs
-                    x = [i.strip("' ") for i in x if i.find('=')<0]
-                    choicesOrMin = '|'+'|'.join(x)+'|' # IRAF format for enums
-                elif chk_func_name.find('boolean') >= 0:     dtype = 'b'
-                elif chk_func_name.find('float_or_') >= 0:   dtype = 'r'
-                elif chk_func_name.find('float') >= 0:       dtype = 'R'
-                elif chk_func_name.find('integer_or_') >= 0: dtype = 'i'
-                elif chk_func_name.find('integer') >= 0:     dtype = 'I'
-                elif chk_func_name.find('action') >= 0:      dtype = 'z'
-                fields.append(dtype)
-                fields.append('a')
-                if type(val)==bool:
-                    if val: fields.append('yes')
-                    else:   fields.append('no')
-                else:
-                    fields.append(val)
-                fields.append(choicesOrMin)
-                fields.append(None)
-                # Primarily use description from .cfgspc file (0). But, allow
-                # overrides from .cfg file (1) if different.
-                dscrp0 = chk_args_dict.get('comment','').strip() # ok if missing
-                dscrp1 = cfgObj.inline_comments[key]
-                if dscrp1==None: dscrp1 = ''
-                while len(dscrp1)>0 and dscrp1[0] in (' ','#'):
-                    dscrp1 = dscrp1[1:] # .cfg file comments start with '#'
-                dscrp1 = dscrp1.strip()
-                # Now, decide what to do/say about the descriptions
-                if len(dscrp1) > 0:
-                    dscrp = dscrp0
-                    if dscrp0 != dscrp1: # allow override if different
-                        dscrp = dscrp1+eparoption.DSCRPTN_FLAG # flag it
-                        if initialPass:
-                            if dscrp0 == '' and cspc == None:
-                                # this is a case where this par isn't in the
-                                # .cfgspc; ignore, it is caught/error later
-                                pass
-                            else:
-                                self.debug('Description of "'+key+ \
-                                    '" overridden, from:  '+repr(dscrp0)+\
-                                    '  to:  '+repr(dscrp1))
-                    fields.append(dscrp)
-                else:
-                    # set the field for the GUI
-                    fields.append(dscrp0)
-                    # ALSO set it in the dict so it is written to file later
-                    cfgObj.inline_comments[key] = '# '+dscrp0
-                # This little section, while never intended to be used during
-                # normal operation, could save a lot of manual work.
-                if dumpCfgspcTo:
-                    junk = cspc
-                    junk = key+' = '+junk.strip()
-                    if junk.find(' comment=')<0:
-                        junk = junk[:-1]+", comment="+ \
-                               repr(irafutils.stripQuotes(dscrp1.strip()))+")"
-                    dumpCfgspcTo.write(junk+'\n')
-                # Create the par
-                if not toBeHidden or chk_func_name.find('action')==0:
-                    par = basicpar.parFactory(fields, True)
-                    par.setScope(scopePrefix)
-                    retval.append(par)
-                # else this is a hidden key
-
-                # The next few items require a fully scoped name
-                absKeyName = scopePrefix+'.'+key # assumed to be unique
-                # Check for pars marked to be positional args
-                if initialPass:
-                    pos = chk_args_dict.get('pos')
-                    if pos:
-                        # we'll sort them later, on demand
-                        self._posArgs.append( (int(pos), scopePrefix, key) )
-                # Check for triggers and/or dependencies
-                if initialPass:
-                    # What triggers what? (thats why theres an 's' in the kwd)
-                    # try "trigger" (old)
-                    if chk_args_dict.get('trigger'):
-                        print("WARNING: outdated version of .cfgspc!! for "+
-                              self.__taskName+", 'trigger' unused for "+
-                              absKeyName)
-                    # try "triggers"
-                    trgs = chk_args_dict.get('triggers')
-                    if trgs and len(trgs)>0:
-                        # eg. _allTriggers['STEP2.xy'] == ('_rule1_','_rule3_')
-                        assert absKeyName not in self._allTriggers, \
-                            'More than 1 of these in .cfgspc?: '+absKeyName
-                        # we force this to always be a sequence
-                        if isinstance(trgs, (list,tuple)):
-                            self._allTriggers[absKeyName] = trgs
-                        else:
-                            self._allTriggers[absKeyName] = (trgs,)
-                    # try "executes"
-                    excs = chk_args_dict.get('executes')
-                    if excs and len(excs)>0:
-                        # eg. _allExecutes['STEP2.xy'] == ('_rule1_','_rule3_')
-                        assert absKeyName not in self._allExecutes, \
-                            'More than 1 of these in .cfgspc?: '+absKeyName
-                        # we force this to always be a sequence
-                        if isinstance(excs, (list,tuple)):
-                            self._allExecutes[absKeyName] = excs
-                        else:
-                            self._allExecutes[absKeyName] = (excs,)
-
-                    # Dependencies? (besides these used here, may someday
-                    # add: 'range_from', 'warn_if', etc.)
-                    depName = None
-                    if not depName:
-                        depType = 'active_if'
-                        depName = chk_args_dict.get(depType) # e.g. =='_rule1_'
-                    if not depName:
-                        depType = 'inactive_if'
-                        depName = chk_args_dict.get(depType)
-                    if not depName:
-                        depType = 'is_set_by'
-                        depName = chk_args_dict.get(depType)
-                    if not depName:
-                        depType = 'set_yes_if'
-                        depName = chk_args_dict.get(depType)
-                    if not depName:
-                        depType = 'set_no_if'
-                        depName = chk_args_dict.get(depType)
-                    if not depName:
-                        depType = 'is_disabled_by'
-                        depName = chk_args_dict.get(depType)
-                    # NOTE - the above few lines stops at the first dependency
-                    # found (depName) for a given par.  If, in the future a
-                    # given par can have >1 dependency than we need to revamp!!
-                    if depName:
-                        # Add to _allDepdcs dict: (val is dict of pars:types)
-                        #
-                        # e.g. _allDepdcs['_rule1_'] == \
-                        #        {'STEP3.ra':      'active_if',
-                        #         'STEP3.dec':     'active_if',
-                        #         'STEP3.azimuth': 'inactive_if'}
-                        if depName in self._allDepdcs:
-                            thisRulesDict = self._allDepdcs[depName]
-                            assert not absKeyName in thisRulesDict, \
-                                'Cant yet handle multiple actions for the '+ \
-                                'same par and the same rule.  For "'+depName+ \
-                                '" dict was: '+str(thisRulesDict)+ \
-                                ' while trying to add to it: '+\
-                                str({absKeyName:depType})
-                            thisRulesDict[absKeyName] = depType
-                        else:
-                            self._allDepdcs[depName] = {absKeyName:depType}
-                    # else no dependencies found for this chk_args_dict
-        return retval
-
-
-    def getTriggerStrings(self, parScope, parName):
-        """ For a given item (scope + name), return all strings (in a tuple)
-        that it is meant to trigger, if any exist.  Returns None is none. """
-        # The data structure of _allTriggers was chosen for how easily/quickly
-        # this particular access can be made here.
-        fullName = parScope+'.'+parName
-        return self._allTriggers.get(fullName) # returns None if unfound
-
-
-    def getParsWhoDependOn(self, ruleName):
-        """ Find any parameters which depend on the given trigger name. Returns
-        None or a dict of {scopedName: dependencyName} from _allDepdcs. """
-        # The data structure of _allDepdcs was chosen for how easily/quickly
-        # this particular access can be made here.
-        return self._allDepdcs.get(ruleName)
-
-
-    def getExecuteStrings(self, parScope, parName):
-        """ For a given item (scope + name), return all strings (in a tuple)
-        that it is meant to execute, if any exist.  Returns None is none. """
-        # The data structure of _allExecutes was chosen for how easily/quickly
-        # this particular access can be made here.
-        fullName = parScope+'.'+parName
-        return self._allExecutes.get(fullName) # returns None if unfound
-
-
-    def getPosArgs(self):
-        """ Return a list, in order, of any parameters marked with "pos=N" in
-            the .cfgspc file. """
-        if len(self._posArgs) < 1: return []
-        # The first item in the tuple is the index, so we now sort by it
-        self._posArgs.sort()
-        # Build a return list
-        retval = []
-        for idx, scope, name in self._posArgs:
-            theDict, val = findScopedPar(self, scope, name)
-            retval.append(val)
-        return retval
-
-
-    def getKwdArgs(self, flatten = False):
-        """ Return a dict of all normal dict parameters - that is, all
-            parameters NOT marked with "pos=N" in the .cfgspc file.  This will
-            also exclude all hidden parameters (metadata, rules, etc). """
-
-        # Start with a full deep-copy.  What complicates this method is the
-        # idea of sub-sections.  This dict can have dicts as values, and so on.
-        dcopy = self.dict() # ConfigObj docs say this is a deep-copy
-
-        # First go through the dict removing all positional args
-        for idx,scope,name in self._posArgs:
-            theDict, val = findScopedPar(dcopy, scope, name)
-            # 'theDict' may be dcopy, or it may be a dict under it
-            theDict.pop(name)
-
-        # Then go through the dict removing all hidden items ('_item_name_')
-        for k in list(dcopy.keys()):
-            if isHiddenName(k):
-                dcopy.pop(k)
-
-        # Done with the nominal operation
-        if not flatten:
-            return dcopy
-
-        # They have asked us to flatten the structure - to bring all parameters
-        # up to the top level, even if they are in sub-sections.  So we look
-        # for values that are dicts.  We will throw something if we end up
-        # with name collisions at the top level as a result of this.
-        return flattenDictTree(dcopy)
-
-
-    def canPerformValidation(self):
-        """ Override this so we can do our own validation. tryValue() will
-            be called as a result. """
-        return True
-
-
-    def knowAsNative(self):
-        """ Override so we can keep native types in the internal dict. """
-        return True
-
-
-    def tryValue(self, name, val, scope=''):
-        """ For the given item name (and scope), we are being asked to try
-            the given value to see if it would pass validation.  We are not
-            to set it, but just try it.  We return a tuple:
-            If it fails, we return: (False,  the last known valid value).
-            On success, we return: (True, None). """
-
-        # SIMILARITY BETWEEN THIS AND setParam() SHOULD BE CONSOLIDATED!
-
-        # Set the value, even if invalid.  It needs to be set before
-        # the validation step (next).
-        theDict, oldVal = findScopedPar(self, scope, name)
-        if oldVal == val: return (True, None) # assume oldVal is valid
-        theDict[name] = val
-
-        # Check the proposed value.  Ideally, we'd like to
-        # (somehow elegantly) only check this one item. For now, the best
-        # shortcut is to only validate this section.
-        ans=self.validate(self._vtor, preserve_errors=True, section=theDict)
-
-        # No matter what ans is, immediately return the item to its original
-        # value since we are only checking the value here - not setting.
-        theDict[name] = oldVal
-
-        # Now see what the validation check said
-        errStr = ''
-        if ans != True:
-            flatStr = "All values are invalid!"
-            if ans != False:
-                flatStr = flattened2str(configobj.flatten_errors(self, ans))
-            errStr = "Validation error: "+flatStr # for now this info is unused
-
-        # Done
-        if len(errStr): return (False, oldVal) # was an error
-        else:           return (True, None)    # val is OK
-
-
-    def listTheExtras(self, deleteAlso):
-        """ Use ConfigObj's get_extra_values() call to find any extra/unknown
-        parameters we may have loaded.  Return a string similar to findTheLost.
-        If deleteAlso is True, this will also delete any extra/unknown items.
-        """
-        # get list of extras
-        extras = configobj.get_extra_values(self)
-        # extras is in format: [(sections, key), (sections, key), ]
-        # but we need: [(sections, key, result), ...] - set all results to
-        # a bool just to make it the right shape.  BUT, since we are in
-        # here anyway, make that bool mean something - hide info in it about
-        # whether that extra item is a section (1) or just a single par (0)
-        #
-        # simplified, this is:  expanded = [ (x+(abool,)) for x in extras]
-        expanded = [ (x+ \
-                       ( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \
-                     ) for x in extras]
-        retval = ''
-        if expanded:
-            retval = flattened2str(expanded, extra=1)
-        # but before we return, delete them (from ourself!) if requested to
-        if deleteAlso:
-            for tup_to_del in extras:
-                target = self
-                # descend the tree to the dict where this items is located.
-                # (this works because target is not a copy (because the dict
-                #  type is mutable))
-                location = tup_to_del[0]
-                for subdict in location: target = target[subdict]
-                # delete it
-                target.pop(tup_to_del[1])
-
-        return retval
-
-
-# ---------------------------- helper functions --------------------------------
-
-
-def findTheLost(config_file, configspec_file, skipHidden=True):
-    """ Find any lost/missing parameters in this cfg file, compared to what
-    the .cfgspc says should be there. This method is recommended by the
-    ConfigObj docs. Return a stringified list of item errors. """
-    # do some sanity checking, but don't (yet) make this a serious error
-    if not os.path.exists(config_file):
-        print("ERROR: Config file not found: "+config_file)
-        return []
-    if not os.path.exists(configspec_file):
-        print("ERROR: Configspec file not found: "+configspec_file)
-        return []
-    tmpObj = configobj.ConfigObj(config_file, configspec=configspec_file)
-    simval = configobj.SimpleVal()
-    test = tmpObj.validate(simval)
-    if test == True:
-        return []
-    # If we get here, there is a dict returned of {key1: bool, key2: bool}
-    # which matches the shape of the config obj.  We need to walk it to
-    # find the Falses, since they are the missing pars.
-    missing = []
-    flattened = configobj.flatten_errors(tmpObj, test)
-    # But, before we move on, skip/eliminate any 'hidden' items from our list,
-    # since hidden items are really supposed to be missing from the .cfg file.
-    if len(flattened) > 0 and skipHidden:
-        keepers = []
-        for tup in flattened:
-            keep = True
-            # hidden section
-            if len(tup[0])>0 and isHiddenName(tup[0][-1]):
-                keep = False
-            # hidden par (in a section, or at the top level)
-            elif tup[1] != None and isHiddenName(tup[1]):
-                keep = False
-            if keep:
-                keepers.append(tup)
-        flattened = keepers
-    flatStr = flattened2str(flattened, missing=True)
-    return flatStr
-
-
-def isHiddenName(astr):
-    """ Return True if this string name denotes a hidden par or section """
-    if astr != None and len(astr) > 2 and astr.startswith('_') and \
-       astr.endswith('_'):
-        return True
-    else:
-        return False
-
-
-def flattened2str(flattened, missing=False, extra=False):
-    """ Return a pretty-printed multi-line string version of the output of
-    flatten_errors. Know that flattened comes in the form of a list
-    of keys that failed. Each member of the list is a tuple::
-
-        ([list of sections...], key, result)
-
-    so we turn that into a string. Set missing to True if all the input
-    problems are from missing items.  Set extra to True if all the input
-    problems are from extra items. """
-
-    if flattened == None or len(flattened) < 1:
-        return ''
-    retval = ''
-    for sections, key, result in flattened:
-        # Name the section and item, to start the message line
-        if sections == None or len(sections) == 0:
-            retval += '\t"'+key+'"'
-        elif len(sections) == 1:
-            if key == None:
-                # a whole section is missing at the top-level; see if hidden
-                junk = sections[0]
-                if isHiddenName(junk):
-                    continue # this missing or extra section is not an error
-                else:
-                    retval += '\tSection "'+sections[0]+'"'
-            else:
-                retval += '\t"'+sections[0]+'.'+key+'"'
-        else: # len > 1
-            joined = '.'.join(sections)
-            joined = '"'+joined+'"'
-            if key == None:
-                retval +=  '\tSection '+joined
-            else:
-                retval +=  '\t"'+key+'" from '+joined
-        # End the msg line with "what seems to be the trouble" with this one
-        if missing and result==False:
-            retval += ' is missing.'
-        elif extra:
-            if result:
-                retval += ' is an unexpected section. Is your file out of date?'
-            else:
-                retval += ' is an unexpected parameter. Is your file out of date?'
-        elif isinstance(result, bool):
-            retval += ' has an invalid value'
-        else:
-            retval += ' is invalid, '+result.message
-        retval += '\n\n'
-    return retval.rstrip()
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/check_files.py b/required_pkgs/stsci.tools/lib/stsci/tools/check_files.py
deleted file mode 100644
index 41b95bd..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/check_files.py
+++ /dev/null
@@ -1,456 +0,0 @@
-from __future__ import division, print_function # confidence high
-
-from stsci.tools import parseinput, fileutil
-from astropy.io import fits
-import os
-
-def checkFiles(filelist,ivmlist = None):
-    """
-    - Converts waiver fits sciece and data quality files to MEF format
-    - Converts GEIS science and data quality files to MEF format
-    - Checks for stis association tables and splits them into single imsets
-    - Removes files with EXPTIME=0 and the corresponding ivm files
-    - Removes files with NGOODPIX == 0 (to exclude saturated images)
-    - Removes files with missing PA_V3 keyword
-
-    The list of science files should match the list of ivm files at the end.
-    """
-
-    newfilelist, ivmlist = checkFITSFormat(filelist, ivmlist)
-
-    # check for STIS association files. This must be done before
-    # the other checks in order to handle correctly stis
-    # assoc files
-    #if fits.getval(newfilelist[0], 'INSTRUME') == 'STIS':
-    newfilelist, ivmlist = checkStisFiles(newfilelist, ivmlist)
-    if newfilelist == []:
-        return [], []
-    removed_expt_files = check_exptime(newfilelist)
-
-    newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_expt_files)
-    if newfilelist == []:
-        return [], []
-    removed_ngood_files = checkNGOODPIX(newfilelist)
-    newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_ngood_files)
-    if newfilelist == []:
-        return [], []
-
-    removed_pav3_files = checkPA_V3(newfilelist)
-    newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_pav3_files)
-
-    newfilelist, ivmlist = update_input(newfilelist, ivmlist,[])
-
-    if newfilelist == []:
-        return [], []
-
-    return newfilelist, ivmlist
-
-def checkFITSFormat(filelist, ivmlist=None):
-    """
-    This code will check whether or not files are GEIS or WAIVER FITS and
-    convert them to MEF if found. It also keeps the IVMLIST consistent with
-    the input filelist, in the case that some inputs get dropped during
-    the check/conversion.
-    """
-    if ivmlist == None:
-        ivmlist = [None for l in filelist]
-
-    sci_ivm = list(zip(filelist, ivmlist))
-
-    removed_files, translated_names, newivmlist = convert2fits(sci_ivm)
-    newfilelist, ivmlist = update_input(filelist, ivmlist, removed_files)
-
-    if newfilelist == [] and translated_names == []:
-        return [], []
-
-    elif translated_names != []:
-        newfilelist.extend(translated_names)
-        ivmlist.extend(newivmlist)
-
-    return newfilelist, ivmlist
-
-
-def checkStisFiles(filelist, ivmlist=None):
-    newflist = []
-    newilist = []
-    removed_files = []
-    assoc_files = []
-    assoc_ilist = []
-
-    if len(filelist) != len(ivmlist):
-        errormsg = "Input file list and ivm list have different lenghts\n"
-        errormsg += "Quitting ...\n"
-        raise ValueError(errormsg)
-
-    for t in zip(filelist, ivmlist):
-
-        if fits.getval(t[0], 'INSTRUME') != 'STIS':
-            newflist.append(t[0])
-            newilist.append(t[1])
-            continue
-        if isSTISSpectroscopic(t[0]):
-            removed_files.append(t[0])
-            continue
-        sci_count = stisObsCount(t[0])
-        if sci_count >1:
-            newfilenames = splitStis(t[0], sci_count)
-            assoc_files.extend(newfilenames)
-            removed_files.append(t[0])
-            if (isinstance(t[1], tuple) and t[1][0] is not None) or \
-               (not isinstance(t[1], tuple) and t[1] is not None):
-                print('Does not handle STIS IVM files and STIS association files\n')
-            else:
-                assoc_ilist.extend([None]*len(assoc_files))
-        elif sci_count == 1:
-            newflist.append(t[0])
-            newilist.append(t[1])
-        else:
-            errormsg = "No valid 'SCI extension in STIS file\n"
-            raise ValueError(errormsg)
-
-        stisExt2PrimKw([t[0]])
-
-    newflist.extend(assoc_files)
-    newilist.extend(assoc_ilist)
-    return newflist, newilist
-
-def check_exptime(filelist):
-    """
-    Removes files with EXPTIME==0 from filelist.
-    """
-    removed_files = []
-    for f in filelist:
-        try:
-            exptime = fileutil.getHeader(f+'[sci,1]')['EXPTIME']
-        except KeyError:
-            removed_files.append(f)
-            print("Warning:  There are files without keyword EXPTIME")
-            continue
-        if exptime <= 0:
-            removed_files.append(f)
-            print("Warning:  There are files with zero exposure time: keyword EXPTIME = 0.0")
-
-    if removed_files != []:
-        print("Warning:  Removing the following files from input list")
-        for f in removed_files:
-            print('\t',f)
-    return removed_files
-
-def checkNGOODPIX(filelist):
-    """
-    Only for ACS, WFC3 and STIS, check NGOODPIX
-    If all pixels are 'bad' on all chips, exclude this image
-    from further processing.
-    Similar checks requiring comparing 'driz_sep_bits' against
-    WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be
-    done separately (and later).
-    """
-    removed_files = []
-    supported_instruments = ['ACS','STIS','WFC3']
-    for inputfile in filelist:
-        if fileutil.getKeyword(inputfile,'instrume') in supported_instruments:
-            file = fits.open(inputfile)
-            ngood = 0
-            for extn in file:
-                if 'EXTNAME' in extn.header and extn.header['EXTNAME'] == 'SCI':
-                    ngood += extn.header['NGOODPIX']
-            file.close()
-
-            if (ngood == 0):
-                removed_files.append(inputfile)
-
-    if removed_files != []:
-        print("Warning:  Files without valid pixels detected: keyword NGOODPIX = 0.0")
-        print("Warning:  Removing the following files from input list")
-        for f in removed_files:
-            print('\t',f)
-
-    return removed_files
-
-def update_input(filelist, ivmlist=None, removed_files=None):
-    """
-    Removes files flagged to be removed from the input filelist.
-    Removes the corresponding ivm files if present.
-    """
-    newfilelist = []
-
-    if removed_files == []:
-        return filelist, ivmlist
-    else:
-        sci_ivm = list(zip(filelist, ivmlist))
-        for f in removed_files:
-            result = [sci_ivm.remove(t) for t in sci_ivm if t[0] == f ]
-        ivmlist = [el[1] for el in sci_ivm]
-        newfilelist = [el[0] for el in sci_ivm]
-        return newfilelist, ivmlist
-
-
-def stisObsCount(input):
-    """
-    Input: A stis multiextension file
-    Output: Number of stis science extensions in input
-    """
-    count = 0
-    f = fits.open(input)
-    for ext in f:
-        if 'extname' in ext.header:
-            if (ext.header['extname'].upper() == 'SCI'):
-                count += 1
-    f.close()
-    return count
-
-def splitStis(stisfile, sci_count):
-    """
-    :Purpose: Split a STIS association file into multiple imset MEF files.
-
-    Split the corresponding spt file if present into single spt files.
-    If an spt file can't be split or is missing a Warning is printed.
-
-    Returns
-    -------
-    names: list
-        a list with the names of the new flt files.
-
-    """
-    newfiles = []
-
-    f = fits.open(stisfile)
-    hdu0 = f[0].copy()
-
-
-    for count in range(1,sci_count+1):
-        fitsobj = fits.HDUList()
-        fitsobj.append(hdu0)
-        hdu = f[('sci',count)].copy()
-        fitsobj.append(hdu)
-        rootname = hdu.header['EXPNAME']
-        newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits')
-        try:
-            # Verify error array exists
-            if f[('err',count)].data == None:
-                raise ValueError
-            # Verify dq array exists
-            if f[('dq',count)].data == None:
-                raise ValueError
-            # Copy the err extension
-            hdu = f[('err',count)].copy()
-            fitsobj.append(hdu)
-            # Copy the dq extension
-            hdu = f[('dq',count)].copy()
-            fitsobj.append(hdu)
-            fitsobj[1].header['EXTVER'] = 1
-            fitsobj[2].header['EXTVER'] = 1
-            fitsobj[3].header['EXTVER'] = 1
-        except ValueError:
-            print('\nWarning:')
-            print('Extension version %d of the input file %s does not' %(count, stisfile))
-            print('contain all required image extensions. Each must contain')
-            print('populates SCI, ERR and DQ arrays.')
-
-            continue
-
-
-        # Determine if the file you wish to create already exists on the disk.
-        # If the file does exist, replace it.
-        if (os.path.exists(newfilename)):
-            os.remove(newfilename)
-            print("       Replacing "+newfilename+"...")
-
-            # Write out the new file
-        fitsobj.writeto(newfilename)
-        newfiles.append(newfilename)
-    f.close()
-
-    sptfilename = fileutil.buildNewRootname(stisfile, extn='_spt.fits')
-    try:
-        sptfile = fits.open(sptfilename)
-    except IOError:
-        print('SPT file not found %s \n' % sptfilename)
-        return newfiles
-
-    if sptfile:
-        hdu0 = sptfile[0].copy()
-        try:
-            for count in range(1,sci_count+1):
-                fitsobj = fits.HDUList()
-                fitsobj.append(hdu0)
-                hdu = sptfile[count].copy()
-                fitsobj.append(hdu)
-                rootname = hdu.header['EXPNAME']
-                newfilename = fileutil.buildNewRootname(rootname, extn='_spt.fits')
-                fitsobj[1].header['EXTVER'] = 1
-                if (os.path.exists(newfilename)):
-                    os.remove(newfilename)
-                    print("       Replacing "+newfilename+"...")
-
-                # Write out the new file
-                fitsobj.writeto(newfilename)
-        except:
-            print("Warning: Unable to split spt file %s " % sptfilename)
-        sptfile.close()
-
-    return newfiles
-
-def stisExt2PrimKw(stisfiles):
-    """
-        Several kw which are usuall yin the primary header
-        are in the extension header for STIS. They are copied to
-        the primary header for convenience.
-        List if kw:
-        'DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME'
-    """
-
-    kw_list = ['DATE-OBS', 'EXPEND', 'EXPSTART', 'EXPTIME']
-
-    for sfile in stisfiles:
-        d = {}
-        for k in kw_list:
-            d[k] = fits.getval(sfile, k, ext=1)
-
-        for item in d.items():
-            fits.setval(sfile, item[0], value=item[1], comment='Copied from extension header')
-
-
-def isSTISSpectroscopic(fname):
-
-    if fits.getval(fname, 'OBSTYPE') == 'SPECTROSCOPIC':
-        print("Warning:  STIS spectroscopic files detected")
-        print("Warning:  Removing %s from input list" % fname)
-        return True
-    else:
-        return False
-
-def checkPA_V3(fnames):
-    removed_files = []
-    for f in fnames:
-        try:
-            pav3 = fits.getval(f, 'PA_V3')
-        except KeyError:
-            rootname = fits.getval(f, 'ROOTNAME')
-            sptfile = rootname+'_spt.fits'
-            if fileutil.findFile(sptfile):
-                try:
-                    pav3 = fits.getval(sptfile, 'PA_V3')
-                except KeyError:
-                    print("Warning:  Files without keyword PA_V3 detected")
-                    removed_files.append(f)
-                fits.setval(f, 'PA_V3', value=pav3)
-            else:
-                print("Warning:  Files without keyword PA_V3 detected")
-                removed_files.append(f)
-    if removed_files != []:
-        print("Warning:  Removing the following files from input list")
-        for f in removed_files:
-            print('\t',f)
-    return removed_files
-
-def convert2fits(sci_ivm):
-    """
-    Checks if a file is in WAIVER of GEIS format and converts it to MEF
-    """
-    removed_files = []
-    translated_names = []
-    newivmlist = []
-
-    for file in sci_ivm:
-        #find out what the input is
-        # if science file is not found on disk, add it to removed_files for removal
-        try:
-            imgfits,imgtype = fileutil.isFits(file[0])
-        except IOError:
-            print("Warning:  File %s could not be found" %file[0])
-            print("Warning:  Removing file %s from input list" %file[0])
-            removed_files.append(file[0])
-            continue
-
-        # Check for existence of waiver FITS input, and quit if found.
-        # Or should we print a warning and continue but not use that file
-        if imgfits and imgtype == 'waiver':
-            newfilename = waiver2mef(file[0], convert_dq=True)
-            if newfilename == None:
-                print("Removing file %s from input list - could not convert WAIVER format to MEF\n" %file[0])
-                removed_files.append(file[0])
-            else:
-                removed_files.append(file[0])
-                translated_names.append(newfilename)
-                newivmlist.append(file[1])
-
-        # If a GEIS image is provided as input, create a new MEF file with
-        # a name generated using 'buildFITSName()'
-        # Convert the corresponding data quality file if present
-        if not imgfits:
-            newfilename = geis2mef(file[0], convert_dq=True)
-            if newfilename == None:
-                print("Removing file %s from input list - could not convert GEIS format to MEF\n" %file[0])
-                removed_files.append(file[0])
-            else:
-                removed_files.append(file[0])
-                translated_names.append(newfilename)
-                newivmlist.append(file[1])
-
-    return removed_files, translated_names, newivmlist
-
-def waiver2mef(sciname, newname=None, convert_dq=True):
-    """
-    Converts a GEIS science file and its corresponding
-    data quality file (if present) to MEF format
-    Writes out both files to disk.
-    Returns the new name of the science image.
-    """
-
-    def convert(file):
-        newfilename = fileutil.buildNewRootname(file, extn='_c0h.fits')
-        try:
-            newimage = fileutil.openImage(file,writefits=True,
-                                          fitsname=newfilename,clobber=True)
-            del newimage
-            return newfilename
-        except IOError:
-            print('Warning: File %s could not be found' % file)
-            return None
-
-    newsciname = convert(sciname)
-    if convert_dq:
-        dq_name = convert(fileutil.buildNewRootname(sciname, extn='_c1h.fits'))
-
-    return newsciname
-
-
-
-def geis2mef(sciname, convert_dq=True):
-    """
-    Converts a GEIS science file and its corresponding
-    data quality file (if present) to MEF format
-    Writes out both files to disk.
-    Returns the new name of the science image.
-    """
-
-    def convert(file):
-        newfilename = fileutil.buildFITSName(file)
-        try:
-            newimage = fileutil.openImage(file,writefits=True,
-                fitsname=newfilename, clobber=True)
-            del newimage
-            return newfilename
-        except IOError:
-            print('Warning: File %s could not be found' % file)
-            return None
-
-    newsciname = convert(sciname)
-    if convert_dq:
-        dq_name = convert(sciname.split('.')[0] + '.c1h')
-
-    return newsciname
-
-def countInput(input):
-    files = parseinput.parseinput(input)
-    count = len(files[0])
-    for f in files[0]:
-        if fileutil.isFits(f)[0]:
-            try:
-                ins = fits.getval(f, 'INSTRUME')
-            except: # allow odd fits files; do not stop the count
-                ins = None
-            if ins == 'STIS':
-                count += (stisObsCount(f)-1)
-    return count
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/clipboard_helper.py b/required_pkgs/stsci.tools/lib/stsci/tools/clipboard_helper.py
deleted file mode 100644
index c14e092..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/clipboard_helper.py
+++ /dev/null
@@ -1,95 +0,0 @@
-""" Usually copying to and from the clipboard in an app is handled automatically
-and correctly on a given platform, when the user applies the right keystrokes
-or mouse events for that platform.  In some corner cases this might not be
-true, so this module exists to help facilitate any needed copying or pasting.
-For now, this is Tkinter based, but it is imported lazily.
-
-$Id: clipboard_helper.py 38142 2015-03-06 13:42:21Z bsimon $
-"""
-
-from __future__ import division, print_function # confidence high
-
-import sys
-if sys.version_info[0] > 2:
-    import tkinter as Tkinter
-else:
-    import Tkinter
-
-_theRoot = None
-_lastSel = '' # our own copy of the last selected text (for PRIMARY)
-
-# Install our own PRIMARY request handler.
-def ch_handler(offset=0, length=-1, **kw):
-    """ Handle standard PRIMARY clipboard access.  Note that offset and length
-    are passed as strings.  This differs from CLIPBOARD. """
-    global _lastSel
-
-    offset = int(offset)
-    length = int(length)
-    if length < 0: length = len(_lastSel)
-    return _lastSel[offset:offset+length]
-
-
-# X11 apps (e.g. xterm) seem to use PRIMARY for select=copy and midmouse=paste
-# Other X11 apps        seem to use CLIPBOARD for ctl-c=copy and ?ctl-v?=paste
-# OS X seems to use CLIPBOARD for everything, which is Cmd-C and Cmd-V
-# Described here:  http://wiki.tcl.tk/1217 "Primary Transfer vs. the Clipboard"
-# See also:  http://www.tcl.tk/man/tcl8.5/TkCmd/selection.htm
-#      and:  http://www.tcl.tk/man/tcl8.5/TkCmd/clipboard.htm
-
-
-def put(text, cbname):
-    """ Put the given string into the given clipboard. """
-    global _lastSel
-    _checkTkInit()
-    if cbname == 'CLIPBOARD':
-        _theRoot.clipboard_clear()
-        if text:
-            # for clipboard_append, kwds can be -displayof, -format, or -type
-            _theRoot.clipboard_append(text)
-        return
-    if cbname == 'PRIMARY':
-        _lastSel = text
-        _theRoot.selection_handle(ch_handler, selection='PRIMARY')
-        # we need to claim/own it so that ch_handler is used
-        _theRoot.selection_own(selection='PRIMARY')
-        # could add command arg for a func to be called when we lose ownership
-        return
-    raise RuntimeError("Unexpected clipboard name: "+str(cbname))
-
-
-def get(cbname):
-    """ Get the contents of the given clipboard. """
-    _checkTkInit()
-    if cbname == 'PRIMARY':
-        try:
-            return _theRoot.selection_get(selection='PRIMARY')
-        except:
-            return None
-    if cbname == 'CLIPBOARD':
-        try:
-            return _theRoot.selection_get(selection='CLIPBOARD')
-        except:
-            return None
-    raise RuntimeError("Unexpected clipboard name: "+str(cbname))
-
-
-def dump():
-    _checkTkInit()
-    print ('primary   = '+str(get('PRIMARY')))
-    print ('clipboard = '+str(get('CLIPBOARD')))
-    print ('owner     = '+str(_theRoot.selection_own_get()))
-
-
-def _checkTkInit():
-    """ Make sure the Tkinter root is defined. """
-    global _theRoot
-    if _theRoot is None:
-        if Tkinter._default_root:
-            # use it
-            _theRoot = Tkinter._default_root
-        else:
-            # create it but withdraw it immediately
-            _theRoot = Tkinter.Tk()
-            _theRoot.withdraw()
-        del Tkinter
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/compmixin.py b/required_pkgs/stsci.tools/lib/stsci/tools/compmixin.py
deleted file mode 100644
index 79cdeb4..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/compmixin.py
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/env python
-#
-"""
-   This module is from Lennart Regebro's ComparableMixin class, available at:
-
-       http://regebro.wordpress.com/2010/12/13/
-              python-implementing-rich-comparison-the-correct-way/
-
-   The idea is to prevent you from having to define lt,le,eq,ne,etc...
-   This may no longer be necessary after the functools total_ordering
-   decorator (Python v2.7) is available on all Python versions
-   supported by our software.
-
-   For simple comparisons, all that is necessary is to derive your class
-   from ComparableMixin and override the _cmpkey() method.
-
-   For more complex comparisons (where type-checking needs to occur and
-   comparisons to other types are allowed), simply override _compare() instead
-   of _cmpkey().
-
-   BEWARE that comparing different types has different results in Python 2.x
-   versus Python 3.x:
-
-        Python 2.7
-        >>> 'a' < 2
-        False
-
-        Python 3.2.1
-        >>> 'a' < 2
-        Traceback (most recent call last):
-          File "<stdin>", line 1, in <module>
-        TypeError: unorderable types: str() < int()
-"""
-from __future__ import print_function
-
-import sys
-if sys.version_info[0] < 3:
-    string_types = basestring
-else:
-    string_types = str
-
-class ComparableMixin(object):
-    def _compare(self, other, method):
-        try:
-            return method(self._cmpkey(), other._cmpkey())
-        except (AttributeError, TypeError):
-            # _cmpkey not implemented, or return different type,
-            # so I can't compare with "other".
-            return NotImplemented
-
-    def __lt__(self, other):
-        return self._compare(other, lambda s,o: s < o)
-
-    def __le__(self, other):
-        return self._compare(other, lambda s,o: s <= o)
-
-    def __eq__(self, other):
-        return self._compare(other, lambda s,o: s == o)
-
-    def __ge__(self, other):
-        return self._compare(other, lambda s,o: s >= o)
-
-    def __gt__(self, other):
-        return self._compare(other, lambda s,o: s > o)
-
-    def __ne__(self, other):
-        return self._compare(other, lambda s,o: s != o)
-
-
-class ComparableIntBaseMixin(ComparableMixin):
-    """ For those classes which, at heart, are comparable to integers. """
-    def _compare(self, other, method):
-        if isinstance(other, self.__class__): # two objects of same class
-            return method(self._cmpkey(), other._cmpkey())
-        else:
-            return method(int(self._cmpkey()), int(other))
-
-
-class ComparableFloatBaseMixin(ComparableMixin):
-    """ For those classes which, at heart, are comparable to floats. """
-    def _compare(self, other, method):
-        if isinstance(other, self.__class__): # two objects of same class
-            return method(self._cmpkey(), other._cmpkey())
-        else:
-            return method(float(self._cmpkey()), float(other))
-
-
-# -----------------------------------------------------------------------------
-
-
-# this class is only used for testing this module!
-class SimpleStrUnitTest(ComparableMixin):
-    def __init__(self, v): self.val = str(v) # all input turned to string
-    def __str__(self): return str(self.val)
-    def _cmpkey(self): return self.val
-
-
-# this class is only used for testing this module!
-class AnyTypeUnitTest(ComparableMixin):
-    def __init__(self, v): self.val = v # leave all input typed as is
-    def __str__(self): return str(self.val)
-
-    # define this instead of _cmpkey - handle ALL sorts of scenarios,
-    # except intentionally don't compare self strings (strlen>1) with integers
-    # so we have a case which fails in our test below
-    def _compare(self, other, method):
-        if isinstance(other, self.__class__):
-            return self._compare(other.val, method) # recurse, get 2 logic below
-        if isinstance(other, string_types):
-            return method(str(self.val), other)
-        elif other==None and self.val==None:
-            return method(0, 0)
-        elif other==None:
-            return method(str(self.val), '') # coerce to str compare
-        elif isinstance(other, int):
-            # handle ONLY case where self.val is a single char or an int
-            if isinstance(self.val, string_types) and len(self.val)==1:
-                return method(ord(self.val), other)
-            else:
-                return method(int(self.val), other) # assume we are int-like
-        try:
-            return method(self.val, other)
-        except (AttributeError, TypeError):
-            return NotImplemented
-
-
-# -----------------------------------------------------------------------------
-
-
-def test():
-    a = SimpleStrUnitTest('a')
-    b = SimpleStrUnitTest('b')
-    c = SimpleStrUnitTest('c')
-    two = SimpleStrUnitTest(2)
-
-    # compare two SimpleStrUnitTest objects
-    assert str(a>b) == "False"
-    assert str(a<b) == "True"
-    assert str(a<=b) == "True"
-    assert str(a==b) == "False"
-    assert str(b==b) == "True"
-    assert str(a<c) == "True"
-    assert str(a<=c) == "True"
-    assert str(a!=c) == "True"
-    assert str(c!=c) == "False"
-    assert str(c==c) == "True"
-    assert str(b<two) == "False"
-    assert str(b>=two) == "True"
-    assert str(b==two) == "False"
-    assert str([str(jj) for jj in sorted([b,a,two,c])])=="['2', 'a', 'b', 'c']"
-    print('Success in first set')
-
-    x = AnyTypeUnitTest('x')
-    y = AnyTypeUnitTest('yyy')
-    z = AnyTypeUnitTest(0)
-    nn = AnyTypeUnitTest(None)
-
-    # compare two AnyTypeUnitTest objects
-    assert str(x>y) == "False"
-    assert str(x<y) == "True"
-    assert str(x<=y) == "True"
-    assert str(x==y) == "False"
-    assert str(y==y) == "True"
-    assert str(x<z) == "False"
-    assert str(x<=z) == "False"
-    assert str(x>z) == "True"
-    assert str(x!=z) == "True"
-    assert str(z!=z) == "False"
-    assert str(z==z) == "True"
-    assert str(y<nn) == "False"
-    assert str(y>=nn) == "True"
-    assert str(y==nn) == "False"
-    assert str(nn==nn) == "True"
-    assert str([str(jj) for jj in sorted([y,x,nn,z])]) == "['None', '0', 'x', 'yyy']"
-    print('Success in second set')
-
-    # compare AnyTypeUnitTest objects to built-in types
-    assert str(x<0) == "False"
-    assert str(x<=0) == "False"
-    assert str(x>0) == "True"
-    assert str(x!=0) == "True"
-    assert str(x==0) == "False"
-    assert str(x<None) == "False"
-    assert str(x<=None) == "False"
-    assert str(x>None) == "True"
-    assert str(x!=None) == "True"
-    assert str(x==None) == "False"
-    assert str(x<"abc") == "False"
-    assert str(x<="abc") == "False"
-    assert str(x>"abc") == "True"
-    assert str(x!="abc") == "True"
-    assert str(x=="abc") == "False"
-    assert str(y<None) == "False"
-    assert str(y<=None) == "False"
-    assert str(y>None) == "True"
-    assert str(y!=None) == "True"
-    assert str(y==None) == "False"
-    assert str(y<"abc") == "False"
-    assert str(y<="abc") == "False"
-    assert str(y>"abc") == "True"
-    assert str(y!="abc") == "True"
-    assert str(y=="abc") == "False"
-    print('Success in third set')
-
-    # all of the above should work without errors; now raise some
-    print('yyy == 0 ?')
-    try:
-        y == z # AnyTypeUnitTest intentionally doesn't compare strlen>1 to ints
-        assert 0, 'Exception expected but not found'
-    except ValueError:
-        print('   ... exception handled')
-
-    print('sorted([0, yyy]) ?')
-    try:
-        sorted([z,y])
-        assert 0, 'Exception expected but not found'
-    except ValueError:
-        print('   ... exception handled')
-    print('Test successful')
-
-# -----------------------------------------------------------------------------
-
-if __name__=='__main__': # in case something else imports this file
-    test()
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/configobj.py b/required_pkgs/stsci.tools/lib/stsci/tools/configobj.py
deleted file mode 100644
index e7f5418..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/configobj.py
+++ /dev/null
@@ -1,2485 +0,0 @@
-# configobj.py
-# A config file reader/writer that supports nested sections in config files.
-# Copyright (C) 2005-2010 Michael Foord, Nicola Larosa
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-#         nico AT tekNico DOT net
-
-# ConfigObj 4
-# http://www.voidspace.org.uk/python/configobj.html
-
-# Released subject to the BSD License
-# Please see http://www.voidspace.org.uk/python/license.shtml
-
-# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
-# For information about bugfixes, updates and support, please join the
-# ConfigObj mailing list:
-# http://lists.sourceforge.net/lists/listinfo/configobj-develop
-# Comments, suggestions and bug reports welcome.
-
-from __future__ import absolute_import, division, generators
-
-import os
-import re
-import sys
-
-from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
-
-# To conditionally use version dependent code
-PY3K = sys.version_info[0] > 2
-
-if PY3K:
-    string_types = str
-else:
-    string_types = basestring
-
-# imported lazily to avoid startup performance hit if it isn't used
-compiler = None
-
-# A dictionary mapping BOM to
-# the encoding to decode with, and what to set the
-# encoding attribute to.
-BOMS = {
-    BOM_UTF8: ('utf_8', None),
-    BOM_UTF16_BE: ('utf16_be', 'utf_16'),
-    BOM_UTF16_LE: ('utf16_le', 'utf_16'),
-    BOM_UTF16: ('utf_16', 'utf_16'),
-    }
-# All legal variants of the BOM codecs.
-# TODO: the list of aliases is not meant to be exhaustive, is there a
-#   better way ?
-BOM_LIST = {
-    'utf_16': 'utf_16',
-    'u16': 'utf_16',
-    'utf16': 'utf_16',
-    'utf-16': 'utf_16',
-    'utf16_be': 'utf16_be',
-    'utf_16_be': 'utf16_be',
-    'utf-16be': 'utf16_be',
-    'utf16_le': 'utf16_le',
-    'utf_16_le': 'utf16_le',
-    'utf-16le': 'utf16_le',
-    'utf_8': 'utf_8',
-    'u8': 'utf_8',
-    'utf': 'utf_8',
-    'utf8': 'utf_8',
-    'utf-8': 'utf_8',
-    }
-
-# Map of encodings to the BOM to write.
-BOM_SET = {
-    'utf_8': BOM_UTF8,
-    'utf_16': BOM_UTF16,
-    'utf16_be': BOM_UTF16_BE,
-    'utf16_le': BOM_UTF16_LE,
-    None: BOM_UTF8
-    }
-
-
-def match_utf8(encoding):
-    return BOM_LIST.get(encoding.lower()) == 'utf_8'
-
-
-# Quote strings used for writing values
-squot = "'%s'"
-dquot = '"%s"'
-noquot = "%s"
-wspace_plus = ' \r\n\v\t\'"'
-tsquot = '"""%s"""'
-tdquot = "'''%s'''"
-
-# Sentinel for use in getattr calls to replace hasattr
-MISSING = object()
-
-__version__ = '4.7.2'
-
-try:
-    any
-except NameError:
-    def any(iterable):
-        for entry in iterable:
-            if entry:
-                return True
-        return False
-
-
-__all__ = (
-    '__version__',
-    'DEFAULT_INDENT_TYPE',
-    'DEFAULT_INTERPOLATION',
-    'ConfigObjError',
-    'NestingError',
-    'ParseError',
-    'DuplicateError',
-    'ConfigspecError',
-    'ConfigObj',
-    'SimpleVal',
-    'InterpolationError',
-    'InterpolationLoopError',
-    'MissingInterpolationOption',
-    'RepeatSectionError',
-    'ReloadError',
-    'UnreprError',
-    'UnknownType',
-    'flatten_errors',
-    'get_extra_values'
-)
-
-DEFAULT_INTERPOLATION = 'configparser'
-DEFAULT_INDENT_TYPE = '    '
-MAX_INTERPOL_DEPTH = 10
-
-OPTION_DEFAULTS = {
-    'interpolation': True,
-    'raise_errors': False,
-    'list_values': True,
-    'create_empty': False,
-    'file_error': False,
-    'configspec': None,
-    'stringify': True,
-    # option may be set to one of ('', ' ', '\t')
-    'indent_type': None,
-    'encoding': None,
-    'default_encoding': None,
-    'unrepr': False,
-    'write_empty_values': False,
-}
-
-
-
-def getObj(s):
-    global compiler
-    if compiler is None:
-        import compiler
-    s = "a=" + s
-    p = compiler.parse(s)
-    return p.getChildren()[1].getChildren()[0].getChildren()[1]
-
-
-class UnknownType(Exception):
-    pass
-
-
-class Builder(object):
-    
-    def build(self, o):
-        m = getattr(self, 'build_' + o.__class__.__name__, None)
-        if m is None:
-            raise UnknownType(o.__class__.__name__)
-        return m(o)
-    
-    def build_List(self, o):
-        return list(map(self.build, o.getChildren()))
-    
-    def build_Const(self, o):
-        return o.value
-    
-    def build_Dict(self, o):
-        d = {}
-        if PY3K:
-            i = map(self.build, o.getChildren())
-            for el in i:
-                d[el] = next(i)
-        else:
-            i = iter(map(self.build, o.getChildren()))
-            for el in i:
-                d[el] = i.next()
-        return d
-    
-    def build_Tuple(self, o):
-        return tuple(self.build_List(o))
-    
-    def build_Name(self, o):
-        if o.name == 'None':
-            return None
-        if o.name == 'True':
-            return True
-        if o.name == 'False':
-            return False
-        
-        # An undefined Name
-        raise UnknownType('Undefined Name')
-    
-    def build_Add(self, o):
-        real, imag = list(map(self.build_Const, o.getChildren()))
-        try:
-            real = float(real)
-        except TypeError:
-            raise UnknownType('Add')
-        if not isinstance(imag, complex) or imag.real != 0.0:
-            raise UnknownType('Add')
-        return real+imag
-    
-    def build_Getattr(self, o):
-        parent = self.build(o.expr)
-        return getattr(parent, o.attrname)
-    
-    def build_UnarySub(self, o):
-        return -self.build_Const(o.getChildren()[0])
-    
-    def build_UnaryAdd(self, o):
-        return self.build_Const(o.getChildren()[0])
-
-
-_builder = Builder()
-
-
-def unrepr(s):
-    if not s:
-        return s
-    return _builder.build(getObj(s))
-
-
-
-class ConfigObjError(SyntaxError):
-    """
-    This is the base class for all errors that ConfigObj raises.
-    It is a subclass of SyntaxError.
-    """
-    def __init__(self, message='', line_number=None, line=''):
-        self.line = line
-        self.line_number = line_number
-        SyntaxError.__init__(self, message)
-
-
-class NestingError(ConfigObjError):
-    """
-    This error indicates a level of nesting that doesn't match.
-    """
-
-
-class ParseError(ConfigObjError):
-    """
-    This error indicates that a line is badly written.
-    It is neither a valid ``key = value`` line,
-    nor a valid section marker line.
-    """
-
-
-class ReloadError(IOError):
-    """
-    A 'reload' operation failed.
-    This exception is a subclass of ``IOError``.
-    """
-    def __init__(self):
-        IOError.__init__(self, 'reload failed, filename is not set.')
-
-
-class DuplicateError(ConfigObjError):
-    """
-    The keyword or section specified already exists.
-    """
-
-
-class ConfigspecError(ConfigObjError):
-    """
-    An error occured whilst parsing a configspec.
-    """
-
-
-class InterpolationError(ConfigObjError):
-    """Base class for the two interpolation errors."""
-
-
-class InterpolationLoopError(InterpolationError):
-    """Maximum interpolation depth exceeded in string interpolation."""
-
-    def __init__(self, option):
-        InterpolationError.__init__(
-            self,
-            'interpolation loop detected in value "%s".' % option)
-
-
-class RepeatSectionError(ConfigObjError):
-    """
-    This error indicates additional sections in a section with a
-    ``__many__`` (repeated) section.
-    """
-
-
-class MissingInterpolationOption(InterpolationError):
-    """A value specified for interpolation was missing."""
-    def __init__(self, option):
-        msg = 'missing option "%s" in interpolation.' % option
-        InterpolationError.__init__(self, msg)
-
-
-class UnreprError(ConfigObjError):
-    """An error parsing in unrepr mode."""
-
-
-
-class InterpolationEngine(object):
-    """
-    A helper class to help perform string interpolation.
-
-    This class is an abstract base class; its descendants perform
-    the actual work.
-    """
-
-    # compiled regexp to use in self.interpolate()
-    _KEYCRE = re.compile(r"%\(([^)]*)\)s")
-    _cookie = '%'
-
-    def __init__(self, section):
-        # the Section instance that "owns" this engine
-        self.section = section
-
-
-    def interpolate(self, key, value):
-        # short-cut
-        if not self._cookie in value:
-            return value
-        
-        def recursive_interpolate(key, value, section, backtrail):
-            """The function that does the actual work.
-
-            ``value``: the string we're trying to interpolate.
-            ``section``: the section in which that string was found
-            ``backtrail``: a dict to keep track of where we've been,
-            to detect and prevent infinite recursion loops
-
-            This is similar to a depth-first-search algorithm.
-            """
-            # Have we been here already?
-            if (key, section.name) in backtrail:
-                # Yes - infinite loop detected
-                raise InterpolationLoopError(key)
-            # Place a marker on our backtrail so we won't come back here again
-            backtrail[(key, section.name)] = 1
-
-            # Now start the actual work
-            match = self._KEYCRE.search(value)
-            while match:
-                # The actual parsing of the match is implementation-dependent,
-                # so delegate to our helper function
-                k, v, s = self._parse_match(match)
-                if k is None:
-                    # That's the signal that no further interpolation is needed
-                    replacement = v
-                else:
-                    # Further interpolation may be needed to obtain final value
-                    replacement = recursive_interpolate(k, v, s, backtrail)
-                # Replace the matched string with its final value
-                start, end = match.span()
-                value = ''.join((value[:start], replacement, value[end:]))
-                new_search_start = start + len(replacement)
-                # Pick up the next interpolation key, if any, for next time
-                # through the while loop
-                match = self._KEYCRE.search(value, new_search_start)
-
-            # Now safe to come back here again; remove marker from backtrail
-            del backtrail[(key, section.name)]
-
-            return value
-
-        # Back in interpolate(), all we have to do is kick off the recursive
-        # function with appropriate starting values
-        value = recursive_interpolate(key, value, self.section, {})
-        return value
-
-
-    def _fetch(self, key):
-        """Helper function to fetch values from owning section.
-
-        Returns a 2-tuple: the value, and the section where it was found.
-        """
-        # switch off interpolation before we try and fetch anything !
-        save_interp = self.section.main.interpolation
-        self.section.main.interpolation = False
-
-        # Start at section that "owns" this InterpolationEngine
-        current_section = self.section
-        while True:
-            # try the current section first
-            val = current_section.get(key)
-            if val is not None and not isinstance(val, Section):
-                break
-            # try "DEFAULT" next
-            val = current_section.get('DEFAULT', {}).get(key)
-            if val is not None and not isinstance(val, Section):
-                break
-            # move up to parent and try again
-            # top-level's parent is itself
-            if current_section.parent is current_section:
-                # reached top level, time to give up
-                break
-            current_section = current_section.parent
-
-        # restore interpolation to previous value before returning
-        self.section.main.interpolation = save_interp
-        if val is None:
-            raise MissingInterpolationOption(key)
-        return val, current_section
-
-
-    def _parse_match(self, match):
-        """Implementation-dependent helper function.
-
-        Will be passed a match object corresponding to the interpolation
-        key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
-        key in the appropriate config file section (using the ``_fetch()``
-        helper function) and return a 3-tuple: (key, value, section)
-
-        ``key`` is the name of the key we're looking for
-        ``value`` is the value found for that key
-        ``section`` is a reference to the section where it was found
-
-        ``key`` and ``section`` should be None if no further
-        interpolation should be performed on the resulting value
-        (e.g., if we interpolated "$$" and returned "$").
-        """
-        raise NotImplementedError()
-    
-
-
-class ConfigParserInterpolation(InterpolationEngine):
-    """Behaves like ConfigParser."""
-    _cookie = '%'
-    _KEYCRE = re.compile(r"%\(([^)]*)\)s")
-
-    def _parse_match(self, match):
-        key = match.group(1)
-        value, section = self._fetch(key)
-        return key, value, section
-
-
-
-class TemplateInterpolation(InterpolationEngine):
-    """Behaves like string.Template."""
-    _cookie = '$'
-    _delimiter = '$'
-    _KEYCRE = re.compile(r"""
-        \$(?:
-          (?P<escaped>\$)              |   # Two $ signs
-          (?P<named>[_a-z][_a-z0-9]*)  |   # $name format
-          {(?P<braced>[^}]*)}              # ${name} format
-        )
-        """, re.IGNORECASE | re.VERBOSE)
-
-    def _parse_match(self, match):
-        # Valid name (in or out of braces): fetch value from section
-        key = match.group('named') or match.group('braced')
-        if key is not None:
-            value, section = self._fetch(key)
-            return key, value, section
-        # Escaped delimiter (e.g., $$): return single delimiter
-        if match.group('escaped') is not None:
-            # Return None for key and section to indicate it's time to stop
-            return None, self._delimiter, None
-        # Anything else: ignore completely, just return it unchanged
-        return None, match.group(), None
-
-
-interpolation_engines = {
-    'configparser': ConfigParserInterpolation,
-    'template': TemplateInterpolation,
-}
-
-
-def __newobj__(cls, *args):
-    # Hack for pickle
-    return cls.__new__(cls, *args) 
-
-class Section(dict):
-    """
-    A dictionary-like object that represents a section in a config file.
-    
-    It does string interpolation if the 'interpolation' attribute
-    of the 'main' object is set to True.
-    
-    Interpolation is tried first from this object, then from the 'DEFAULT'
-    section of this object, next from the parent and its 'DEFAULT' section,
-    and so on until the main object is reached.
-    
-    A Section will behave like an ordered dictionary - following the
-    order of the ``scalars`` and ``sections`` attributes.
-    You can use this to change the order of members.
-    
-    Iteration follows the order: scalars, then sections.
-    """
-
-    
-    def __setstate__(self, state):
-        dict.update(self, state[0])
-        self.__dict__.update(state[1])
-
-    def __reduce__(self):
-        state = (dict(self), self.__dict__)
-        return (__newobj__, (self.__class__,), state)
-    
-    
-    def __init__(self, parent, depth, main, indict=None, name=None):
-        """
-        * parent is the section above
-        * depth is the depth level of this section
-        * main is the main ConfigObj
-        * indict is a dictionary to initialise the section with
-        """
-        if indict is None:
-            indict = {}
-        dict.__init__(self)
-        # used for nesting level *and* interpolation
-        self.parent = parent
-        # used for the interpolation attribute
-        self.main = main
-        # level of nesting depth of this Section
-        self.depth = depth
-        # purely for information
-        self.name = name
-        #
-        self._initialise()
-        # we do this explicitly so that __setitem__ is used properly
-        # (rather than just passing to ``dict.__init__``)
-        for entry, value in indict.items():
-            self[entry] = value
-            
-            
-    def _initialise(self):
-        # the sequence of scalar values in this Section
-        self.scalars = []
-        # the sequence of sections in this Section
-        self.sections = []
-        # for comments :-)
-        self.comments = {}
-        self.inline_comments = {}
-        # the configspec
-        self.configspec = None
-        # for defaults
-        self.defaults = []
-        self.default_values = {}
-        self.extra_values = []
-        self._created = False
-
-
-    def _interpolate(self, key, value):
-        try:
-            # do we already have an interpolation engine?
-            engine = self._interpolation_engine
-        except AttributeError:
-            # not yet: first time running _interpolate(), so pick the engine
-            name = self.main.interpolation
-            if name == True:  # note that "if name:" would be incorrect here
-                # backwards-compatibility: interpolation=True means use default
-                name = DEFAULT_INTERPOLATION
-            name = name.lower()  # so that "Template", "template", etc. all work
-            class_ = interpolation_engines.get(name, None)
-            if class_ is None:
-                # invalid value for self.main.interpolation
-                self.main.interpolation = False
-                return value
-            else:
-                # save reference to engine so we don't have to do this again
-                engine = self._interpolation_engine = class_(self)
-        # let the engine do the actual work
-        return engine.interpolate(key, value)
-
-
-    def __getitem__(self, key):
-        """Fetch the item and do string interpolation."""
-        val = dict.__getitem__(self, key)
-        if self.main.interpolation: 
-            if isinstance(val, str):
-                return self._interpolate(key, val)
-            if isinstance(val, list):
-                def _check(entry):
-                    if isinstance(entry, string_types):
-                        return self._interpolate(key, entry)
-                    return entry
-                new = [_check(entry) for entry in val]
-                if new != val:
-                    return new
-        return val
-
-
-    def __setitem__(self, key, value, unrepr=False):
-        """
-        Correctly set a value.
-        
-        Making dictionary values Section instances.
-        (We have to special case 'Section' instances - which are also dicts)
-        
-        Keys must be strings.
-        Values need only be strings (or lists of strings) if
-        ``main.stringify`` is set.
-        
-        ``unrepr`` must be set when setting a value to a dictionary, without
-        creating a new sub-section.
-        """
-        if not isinstance(key, string_types):
-            raise ValueError('The key "%s" is not a string.' % key)
-        
-        # add the comment
-        if key not in self.comments:
-            self.comments[key] = []
-            self.inline_comments[key] = ''
-        # remove the entry from defaults
-        if key in self.defaults:
-            self.defaults.remove(key)
-        #
-        if isinstance(value, Section):
-            if key not in self:
-                self.sections.append(key)
-            dict.__setitem__(self, key, value)
-        elif isinstance(value, dict) and not unrepr:
-            # First create the new depth level,
-            # then create the section
-            if key not in self:
-                self.sections.append(key)
-            new_depth = self.depth + 1
-            dict.__setitem__(
-                self,
-                key,
-                Section(
-                    self,
-                    new_depth,
-                    self.main,
-                    indict=value,
-                    name=key))
-        else:
-            if key not in self:
-                self.scalars.append(key)
-            if not self.main.stringify:
-                if isinstance(value, string_types):
-                    pass
-                elif isinstance(value, (list, tuple)):
-                    for entry in value:
-                        if not isinstance(entry, string_types):
-                            raise TypeError('Value is not a string "%s".' % entry)
-                else:
-                    raise TypeError('Value is not a string "%s".' % value)
-            dict.__setitem__(self, key, value)
-
-
-    def __delitem__(self, key):
-        """Remove items from the sequence when deleting."""
-        dict. __delitem__(self, key)
-        if key in self.scalars:
-            self.scalars.remove(key)
-        else:
-            self.sections.remove(key)
-        del self.comments[key]
-        del self.inline_comments[key]
-
-
-    def get(self, key, default=None):
-        """A version of ``get`` that doesn't bypass string interpolation."""
-        try:
-            return self[key]
-        except KeyError:
-            return default
-
-
-    def update(self, indict):
-        """
-        A version of update that uses our ``__setitem__``.
-        """
-        for entry in indict:
-            self[entry] = indict[entry]
-
-
-    def pop(self, key, default=MISSING):
-        """
-        'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
-        If key is not found, d is returned if given, otherwise KeyError is raised'
-        """
-        try:
-            val = self[key]
-        except KeyError:
-            if default is MISSING:
-                raise
-            val = default
-        else:
-            del self[key]
-        return val
-
-
-    def popitem(self):
-        """Pops the first (key,val)"""
-        sequence = (self.scalars + self.sections)
-        if not sequence:
-            raise KeyError(": 'popitem(): dictionary is empty'")
-        key = sequence[0]
-        val =  self[key]
-        del self[key]
-        return key, val
-
-
-    def clear(self):
-        """
-        A version of clear that also affects scalars/sections
-        Also clears comments and configspec.
-        
-        Leaves other attributes alone :
-            depth/main/parent are not affected
-        """
-        dict.clear(self)
-        self.scalars = []
-        self.sections = []
-        self.comments = {}
-        self.inline_comments = {}
-        self.configspec = None
-        self.defaults = []
-        self.extra_values = []
-
-
-    def setdefault(self, key, default=None):
-        """A version of setdefault that sets sequence if appropriate."""
-        try:
-            return self[key]
-        except KeyError:
-            self[key] = default
-            return self[key]
-
-
-    def items(self):
-        """D.items() -> list of D's (key, value) pairs, as 2-tuples"""
-        return list(zip((self.scalars + self.sections), list(self.values())))
-
-
-    def keys(self):
-        """D.keys() -> list of D's keys"""
-        return (self.scalars + self.sections)
-
-
-    def values(self):
-        """D.values() -> list of D's values"""
-        return [self[key] for key in (self.scalars + self.sections)]
-
-
-    def iteritems(self):
-        """D.iteritems() -> an iterator over the (key, value) items of D"""
-        return iter(list(self.items()))
-
-
-    def iterkeys(self):
-        """D.iterkeys() -> an iterator over the keys of D"""
-        return iter((self.scalars + self.sections))
-
-    __iter__ = iterkeys
-
-
-    def itervalues(self):
-        """D.itervalues() -> an iterator over the values of D"""
-        return iter(list(self.values()))
-
-
-    def __repr__(self):
-        """x.__repr__() <==> repr(x)"""
-        def _getval(key):
-            try:
-                return self[key]
-            except MissingInterpolationOption:
-                return dict.__getitem__(self, key)
-        return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
-            for key in (self.scalars + self.sections)])
-
-    __str__ = __repr__
-    __str__.__doc__ = "x.__str__() <==> str(x)"
-
-
-    # Extra methods - not in a normal dictionary
-
-    def dict(self):
-        """
-        Return a deepcopy of self as a dictionary.
-        
-        All members that are ``Section`` instances are recursively turned to
-        ordinary dictionaries - by calling their ``dict`` method.
-        
-        >>> n = a.dict()
-        >>> n == a
-        1
-        >>> n is a
-        0
-        """
-        newdict = {}
-        for entry in self:
-            this_entry = self[entry]
-            if isinstance(this_entry, Section):
-                this_entry = this_entry.dict()
-            elif isinstance(this_entry, list):
-                # create a copy rather than a reference
-                this_entry = list(this_entry)
-            elif isinstance(this_entry, tuple):
-                # create a copy rather than a reference
-                this_entry = tuple(this_entry)
-            newdict[entry] = this_entry
-        return newdict
-
-
-    def merge(self, indict):
-        """
-        A recursive update - useful for merging config files.
-        
-        >>> a = '''[section1]
-        ...     option1 = True
-        ...     [[subsection]]
-        ...     more_options = False
-        ...     # end of file'''.splitlines()
-        >>> b = '''# File is user.ini
-        ...     [section1]
-        ...     option1 = False
-        ...     # end of file'''.splitlines()
-        >>> c1 = ConfigObj(b)
-        >>> c2 = ConfigObj(a)
-        >>> c2.merge(c1)
-        >>> c2
-        ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
-        """
-        for key, val in list(indict.items()):
-            if (key in self and isinstance(self[key], dict) and
-                                isinstance(val, dict)):
-                self[key].merge(val)
-            else:   
-                self[key] = val
-
-
-    def rename(self, oldkey, newkey):
-        """
-        Change a keyname to another, without changing position in sequence.
-        
-        Implemented so that transformations can be made on keys,
-        as well as on values. (used by encode and decode)
-        
-        Also renames comments.
-        """
-        if oldkey in self.scalars:
-            the_list = self.scalars
-        elif oldkey in self.sections:
-            the_list = self.sections
-        else:
-            raise KeyError('Key "%s" not found.' % oldkey)
-        pos = the_list.index(oldkey)
-        #
-        val = self[oldkey]
-        dict.__delitem__(self, oldkey)
-        dict.__setitem__(self, newkey, val)
-        the_list.remove(oldkey)
-        the_list.insert(pos, newkey)
-        comm = self.comments[oldkey]
-        inline_comment = self.inline_comments[oldkey]
-        del self.comments[oldkey]
-        del self.inline_comments[oldkey]
-        self.comments[newkey] = comm
-        self.inline_comments[newkey] = inline_comment
-
-
-    def walk(self, function, raise_errors=True,
-            call_on_sections=False, **keywargs):
-        """
-        Walk every member and call a function on the keyword and value.
-        
-        Return a dictionary of the return values
-        
-        If the function raises an exception, raise the errror
-        unless ``raise_errors=False``, in which case set the return value to
-        ``False``.
-        
-        Any unrecognised keyword arguments you pass to walk, will be pased on
-        to the function you pass in.
-        
-        Note: if ``call_on_sections`` is ``True`` then - on encountering a
-        subsection, *first* the function is called for the *whole* subsection,
-        and then recurses into it's members. This means your function must be
-        able to handle strings, dictionaries and lists. This allows you
-        to change the key of subsections as well as for ordinary members. The
-        return value when called on the whole subsection has to be discarded.
-        
-        See  the encode and decode methods for examples, including functions.
-        
-        admonition:: caution
-        
-        You can use ``walk`` to transform the names of members of a section
-        but you mustn't add or delete members.
-        
-        >>> config = '''[XXXXsection]
-        ... XXXXkey = XXXXvalue'''.splitlines()
-        >>> cfg = ConfigObj(config)
-        >>> cfg
-        ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
-        >>> def transform(section, key):
-        ...     val = section[key]
-        ...     newkey = key.replace('XXXX', 'CLIENT1')
-        ...     section.rename(key, newkey)
-        ...     if isinstance(val, (tuple, list, dict)):
-        ...         pass
-        ...     else:
-        ...         val = val.replace('XXXX', 'CLIENT1')
-        ...         section[newkey] = val
-        >>> cfg.walk(transform, call_on_sections=True)
-        {'CLIENT1section': {'CLIENT1key': None}}
-        >>> cfg
-        ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
-        """
-        out = {}
-        # scalars first
-        for i in range(len(self.scalars)):
-            entry = self.scalars[i]
-            try:
-                val = function(self, entry, **keywargs)
-                # bound again in case name has changed
-                entry = self.scalars[i]
-                out[entry] = val
-            except Exception:
-                if raise_errors:
-                    raise
-                else:
-                    entry = self.scalars[i]
-                    out[entry] = False
-        # then sections
-        for i in range(len(self.sections)):
-            entry = self.sections[i]
-            if call_on_sections:
-                try:
-                    function(self, entry, **keywargs)
-                except Exception:
-                    if raise_errors:
-                        raise
-                    else:
-                        entry = self.sections[i]
-                        out[entry] = False
-                # bound again in case name has changed
-                entry = self.sections[i]
-            # previous result is discarded
-            out[entry] = self[entry].walk(
-                function,
-                raise_errors=raise_errors,
-                call_on_sections=call_on_sections,
-                **keywargs)
-        return out
-
-
-    def as_bool(self, key):
-        """
-        Accepts a key as input. The corresponding value must be a string or
-        the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
-        retain compatibility with Python 2.2.
-        
-        If the string is one of  ``True``, ``On``, ``Yes``, or ``1`` it returns 
-        ``True``.
-        
-        If the string is one of  ``False``, ``Off``, ``No``, or ``0`` it returns 
-        ``False``.
-        
-        ``as_bool`` is not case sensitive.
-        
-        Any other input will raise a ``ValueError``.
-        
-        >>> a = ConfigObj()
-        >>> a['a'] = 'fish'
-        >>> a.as_bool('a')
-        Traceback (most recent call last):
-        ValueError: Value "fish" is neither True nor False
-        >>> a['b'] = 'True'
-        >>> a.as_bool('b')
-        1
-        >>> a['b'] = 'off'
-        >>> a.as_bool('b')
-        0
-        """
-        val = self[key]
-        if val == True:
-            return True
-        elif val == False:
-            return False
-        else:
-            try:
-                if not isinstance(val, string_types):
-                    # TODO: Why do we raise a KeyError here?
-                    raise KeyError()
-                else:
-                    return self.main._bools[val.lower()]
-            except KeyError:
-                raise ValueError('Value "%s" is neither True nor False' % val)
-
-
-    def as_int(self, key):
-        """
-        A convenience method which coerces the specified value to an integer.
-        
-        If the value is an invalid literal for ``int``, a ``ValueError`` will
-        be raised.
-        
-        >>> a = ConfigObj()
-        >>> a['a'] = 'fish'
-        >>> a.as_int('a')
-        Traceback (most recent call last):
-        ValueError: invalid literal for int() with base 10: 'fish'
-        >>> a['b'] = '1'
-        >>> a.as_int('b')
-        1
-        >>> a['b'] = '3.2'
-        >>> a.as_int('b')
-        Traceback (most recent call last):
-        ValueError: invalid literal for int() with base 10: '3.2'
-        """
-        return int(self[key])
-
-
-    def as_float(self, key):
-        """
-        A convenience method which coerces the specified value to a float.
-        
-        If the value is an invalid literal for ``float``, a ``ValueError`` will
-        be raised.
-        
-        >>> a = ConfigObj()
-        >>> a['a'] = 'fish'
-        >>> a.as_float('a')
-        Traceback (most recent call last):
-        ValueError: invalid literal for float(): fish
-        >>> a['b'] = '1'
-        >>> a.as_float('b')
-        1.0
-        >>> a['b'] = '3.2'
-        >>> a.as_float('b')
-        3.2000000000000002
-        """
-        return float(self[key])
-    
-    
-    def as_list(self, key):
-        """
-        A convenience method which fetches the specified value, guaranteeing
-        that it is a list.
-        
-        >>> a = ConfigObj()
-        >>> a['a'] = 1
-        >>> a.as_list('a')
-        [1]
-        >>> a['a'] = (1,)
-        >>> a.as_list('a')
-        [1]
-        >>> a['a'] = [1]
-        >>> a.as_list('a')
-        [1]
-        """
-        result = self[key]
-        if isinstance(result, (tuple, list)):
-            return list(result)
-        return [result]
-        
-
-    def restore_default(self, key):
-        """
-        Restore (and return) default value for the specified key.
-        
-        This method will only work for a ConfigObj that was created
-        with a configspec and has been validated.
-        
-        If there is no default value for this key, ``KeyError`` is raised.
-        """
-        default = self.default_values[key]
-        dict.__setitem__(self, key, default)
-        if key not in self.defaults:
-            self.defaults.append(key)
-        return default
-
-    
-    def restore_defaults(self):
-        """
-        Recursively restore default values to all members
-        that have them.
-        
-        This method will only work for a ConfigObj that was created
-        with a configspec and has been validated.
-        
-        It doesn't delete or modify entries without default values.
-        """
-        for key in self.default_values:
-            self.restore_default(key)
-            
-        for section in self.sections:
-            self[section].restore_defaults()
-
-
-class ConfigObj(Section):
-    """An object to read, create, and write config files."""
-
-    _keyword = re.compile(r'''^ # line start
-        (\s*)                   # indentation
-        (                       # keyword
-            (?:".*?")|          # double quotes
-            (?:'.*?')|          # single quotes
-            (?:[^'"=].*?)       # no quotes
-        )
-        \s*=\s*                 # divider
-        (.*)                    # value (including list values and comments)
-        $   # line end
-        ''',
-        re.VERBOSE)
-
-    _sectionmarker = re.compile(r'''^
-        (\s*)                     # 1: indentation
-        ((?:\[\s*)+)              # 2: section marker open
-        (                         # 3: section name open
-            (?:"\s*\S.*?\s*")|    # at least one non-space with double quotes
-            (?:'\s*\S.*?\s*')|    # at least one non-space with single quotes
-            (?:[^'"\s].*?)        # at least one non-space unquoted
-        )                         # section name close
-        ((?:\s*\])+)              # 4: section marker close
-        \s*(\#.*)?                # 5: optional comment
-        $''',
-        re.VERBOSE)
-
-    # this regexp pulls list values out as a single string
-    # or single values and comments
-    # FIXME: this regex adds a '' to the end of comma terminated lists
-    #   workaround in ``_handle_value``
-    _valueexp = re.compile(r'''^
-        (?:
-            (?:
-                (
-                    (?:
-                        (?:
-                            (?:".*?")|              # double quotes
-                            (?:'.*?')|              # single quotes
-                            (?:[^'",\#][^,\#]*?)    # unquoted
-                        )
-                        \s*,\s*                     # comma
-                    )*      # match all list items ending in a comma (if any)
-                )
-                (
-                    (?:".*?")|                      # double quotes
-                    (?:'.*?')|                      # single quotes
-                    (?:[^'",\#\s][^,]*?)|           # unquoted
-                    (?:(?<!,))                      # Empty value
-                )?          # last item in a list - or string value
-            )|
-            (,)             # alternatively a single comma - empty list
-        )
-        \s*(\#.*)?          # optional comment
-        $''',
-        re.VERBOSE)
-
-    # use findall to get the members of a list value
-    _listvalueexp = re.compile(r'''
-        (
-            (?:".*?")|          # double quotes
-            (?:'.*?')|          # single quotes
-            (?:[^'",\#]?.*?)       # unquoted
-        )
-        \s*,\s*                 # comma
-        ''',
-        re.VERBOSE)
-
-    # this regexp is used for the value
-    # when lists are switched off
-    _nolistvalue = re.compile(r'''^
-        (
-            (?:".*?")|          # double quotes
-            (?:'.*?')|          # single quotes
-            (?:[^'"\#].*?)|     # unquoted
-            (?:)                # Empty value
-        )
-        \s*(\#.*)?              # optional comment
-        $''',
-        re.VERBOSE)
-
-    # regexes for finding triple quoted values on one line
-    _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
-    _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
-    _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
-    _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
-
-    _triple_quote = {
-        "'''": (_single_line_single, _multi_line_single),
-        '"""': (_single_line_double, _multi_line_double),
-    }
-
-    # Used by the ``istrue`` Section method
-    _bools = {
-        'yes': True, 'no': False,
-        'on': True, 'off': False,
-        '1': True, '0': False,
-        'true': True, 'false': False,
-        }
-
-
-    def __init__(self, infile=None, options=None, configspec=None, encoding=None,
-                 interpolation=True, raise_errors=False, list_values=True,
-                 create_empty=False, file_error=False, stringify=True,
-                 indent_type=None, default_encoding=None, unrepr=False,
-                 write_empty_values=False, _inspec=False):
-        """
-        Parse a config file or create a config file object.
-        
-        ``ConfigObj(infile=None, configspec=None, encoding=None,
-                    interpolation=True, raise_errors=False, list_values=True,
-                    create_empty=False, file_error=False, stringify=True,
-                    indent_type=None, default_encoding=None, unrepr=False,
-                    write_empty_values=False, _inspec=False)``
-        """
-        self._inspec = _inspec
-        # init the superclass
-        Section.__init__(self, self, 0, self)
-        
-        infile = infile or []
-        
-        _options = {'configspec': configspec,
-                    'encoding': encoding, 'interpolation': interpolation,
-                    'raise_errors': raise_errors, 'list_values': list_values,
-                    'create_empty': create_empty, 'file_error': file_error,
-                    'stringify': stringify, 'indent_type': indent_type,
-                    'default_encoding': default_encoding, 'unrepr': unrepr,
-                    'write_empty_values': write_empty_values}
-
-        if options is None:
-            options = _options
-        else:
-            import warnings
-            warnings.warn('Passing in an options dictionary to ConfigObj() is '
-                          'deprecated. Use **options instead.',
-                          DeprecationWarning, stacklevel=2)
-            
-            # TODO: check the values too.
-            for entry in options:
-                if entry not in OPTION_DEFAULTS:
-                    raise TypeError('Unrecognised option "%s".' % entry)
-            for entry, value in list(OPTION_DEFAULTS.items()):
-                if entry not in options:
-                    options[entry] = value
-                keyword_value = _options[entry]
-                if value != keyword_value:
-                    options[entry] = keyword_value
-        
-        # XXXX this ignores an explicit list_values = True in combination
-        # with _inspec. The user should *never* do that anyway, but still...
-        if _inspec:
-            options['list_values'] = False
-        
-        self._initialise(options)
-        configspec = options['configspec']
-        self._original_configspec = configspec
-        self._load(infile, configspec)
-        
-        
-    def _load(self, infile, configspec):
-        if isinstance(infile, string_types):
-            self.filename = infile
-            if os.path.isfile(infile):
-                h = open(infile) # !!! was 'rb' but fails PY3K and we dont need
-                infile = h.read() or []
-                h.close()
-            elif self.file_error:
-                # raise an error if the file doesn't exist
-                raise IOError('Config file not found: "%s".' % self.filename)
-            else:
-                # file doesn't already exist
-                if self.create_empty:
-                    # this is a good test that the filename specified
-                    # isn't impossible - like on a non-existent device
-                    h = open(infile, 'w')
-                    h.write('')
-                    h.close()
-                infile = []
-                
-        elif isinstance(infile, (list, tuple)):
-            infile = list(infile)
-            
-        elif isinstance(infile, dict):
-            # initialise self
-            # the Section class handles creating subsections
-            if isinstance(infile, ConfigObj):
-                # get a copy of our ConfigObj
-                def set_section(in_section, this_section):
-                    for entry in in_section.scalars:
-                        this_section[entry] = in_section[entry]
-                    for section in in_section.sections:
-                        this_section[section] = {}
-                        set_section(in_section[section], this_section[section])
-                set_section(infile, self)
-                
-            else:
-                for entry in infile:
-                    self[entry] = infile[entry]
-            del self._errors
-            
-            if configspec is not None:
-                self._handle_configspec(configspec)
-            else:
-                self.configspec = None
-            return
-        
-        elif getattr(infile, 'read', MISSING) is not MISSING:
-            # This supports file like objects
-            infile = infile.read() or []
-            # needs splitting into lines - but needs doing *after* decoding
-            # in case it's not an 8 bit encoding
-        else:
-            raise TypeError('infile must be a filename, file like object, or list of lines.')
-        
-        if infile:
-            # don't do it for the empty ConfigObj
-            infile = self._handle_bom(infile)
-            # infile is now *always* a list
-            #
-            # Set the newlines attribute (first line ending it finds)
-            # and strip trailing '\n' or '\r' from lines
-            for line in infile:
-                if (not line) or (line[-1] not in ('\r', '\n', '\r\n')):
-                    continue
-                for end in ('\r\n', '\n', '\r'):
-                    if line.endswith(end):
-                        self.newlines = end
-                        break
-                break
-
-            infile = [line.rstrip('\r\n') for line in infile]
-            
-        self._parse(infile)
-        # if we had any errors, now is the time to raise them
-        if self._errors:
-            info = "at line %s." % self._errors[0].line_number
-            if len(self._errors) > 1:
-                msg = "Parsing failed with several errors.\nFirst error %s" % info
-                error = ConfigObjError(msg)
-            else:
-                error = self._errors[0]
-            # set the errors attribute; it's a list of tuples:
-            # (error_type, message, line_number)
-            error.errors = self._errors
-            # set the config attribute
-            error.config = self
-            raise error
-        # delete private attributes
-        del self._errors
-        
-        if configspec is None:
-            self.configspec = None
-        else:
-            self._handle_configspec(configspec)
-    
-    
-    def _initialise(self, options=None):
-        if options is None:
-            options = OPTION_DEFAULTS
-            
-        # initialise a few variables
-        self.filename = None
-        self._errors = []
-        self.raise_errors = options['raise_errors']
-        self.interpolation = options['interpolation']
-        self.list_values = options['list_values']
-        self.create_empty = options['create_empty']
-        self.file_error = options['file_error']
-        self.stringify = options['stringify']
-        self.indent_type = options['indent_type']
-        self.encoding = options['encoding']
-        self.default_encoding = options['default_encoding']
-        self.BOM = False
-        self.newlines = None
-        self.write_empty_values = options['write_empty_values']
-        self.unrepr = options['unrepr']
-        
-        self.initial_comment = []
-        self.final_comment = []
-        self.configspec = None
-        
-        if self._inspec:
-            self.list_values = False
-        
-        # Clear section attributes as well
-        Section._initialise(self)
-        
-        
-    def __repr__(self):
-        def _getval(key):
-            try:
-                return self[key]
-            except MissingInterpolationOption:
-                return dict.__getitem__(self, key)
-        return ('ConfigObj({%s})' % 
-                ', '.join([('%s: %s' % (repr(key), repr(_getval(key)))) 
-                for key in (self.scalars + self.sections)]))
-    
-    
-    def _handle_bom(self, infile):
-        """
-        Handle any BOM, and decode if necessary.
-        
-        If an encoding is specified, that *must* be used - but the BOM should
-        still be removed (and the BOM attribute set).
-        
-        (If the encoding is wrongly specified, then a BOM for an alternative
-        encoding won't be discovered or removed.)
-        
-        If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
-        removed. The BOM attribute will be set. UTF16 will be decoded to
-        unicode.
-        
-        NOTE: This method must not be called with an empty ``infile``.
-        
-        Specifying the *wrong* encoding is likely to cause a
-        ``UnicodeDecodeError``.
-        
-        ``infile`` must always be returned as a list of lines, but may be
-        passed in as a single string.
-        """
-        if ((self.encoding is not None) and
-            (self.encoding.lower() not in BOM_LIST)):
-            # No need to check for a BOM
-            # the encoding specified doesn't have one
-            # just decode
-            return self._decode(infile, self.encoding)
-        
-        if isinstance(infile, (list, tuple)):
-            line = infile[0]
-        else:
-            line = infile
-        if self.encoding is not None:
-            # encoding explicitly supplied
-            # And it could have an associated BOM
-            # TODO: if encoding is just UTF16 - we ought to check for both
-            # TODO: big endian and little endian versions.
-            enc = BOM_LIST[self.encoding.lower()]
-            if enc == 'utf_16':
-                # For UTF16 we try big endian and little endian
-                for BOM, (encoding, final_encoding) in list(BOMS.items()):
-                    if not final_encoding:
-                        # skip UTF8
-                        continue
-                    if infile.startswith(BOM):
-                        ### BOM discovered
-                        ##self.BOM = True
-                        # Don't need to remove BOM
-                        return self._decode(infile, encoding)
-                    
-                # If we get this far, will *probably* raise a DecodeError
-                # As it doesn't appear to start with a BOM
-                return self._decode(infile, self.encoding)
-            
-            # Must be UTF8
-            BOM = BOM_SET[enc]
-            if not line.startswith(BOM):
-                return self._decode(infile, self.encoding)
-            
-            newline = line[len(BOM):]
-            
-            # BOM removed
-            if isinstance(infile, (list, tuple)):
-                infile[0] = newline
-            else:
-                infile = newline
-            self.BOM = True
-            return self._decode(infile, self.encoding)
-        
-        # No encoding specified - so we need to check for UTF8/UTF16
-        for BOM, (encoding, final_encoding) in list(BOMS.items()):
-            if not isinstance(BOM, str) or not line.startswith(BOM):
-                continue
-            else:
-                # BOM discovered
-                self.encoding = final_encoding
-                if not final_encoding:
-                    self.BOM = True
-                    # UTF8
-                    # remove BOM
-                    newline = line[len(BOM):]
-                    if isinstance(infile, (list, tuple)):
-                        infile[0] = newline
-                    else:
-                        infile = newline
-                    # UTF8 - don't decode
-                    if isinstance(infile, string_types):
-                        return infile.splitlines(True)
-                    else:
-                        return infile
-                # UTF16 - have to decode
-                return self._decode(infile, encoding)
-            
-        # No BOM discovered and no encoding specified, just return
-        if isinstance(infile, string_types):
-            # infile read from a file will be a single string
-            return infile.splitlines(True)
-        return infile
-
-
-    def _a_to_u(self, aString):
-        """Decode ASCII strings to unicode if a self.encoding is specified."""
-        if self.encoding:
-            return aString.decode('ascii')
-        else:
-            return aString
-
-
-    def _decode(self, infile, encoding):
-        """
-        Decode infile to unicode. Using the specified encoding.
-        
-        if is a string, it also needs converting to a list.
-        """
-        if isinstance(infile, string_types):
-            # can't be unicode
-            # NOTE: Could raise a ``UnicodeDecodeError``
-            return infile.decode(encoding).splitlines(True)
-        for i, line in enumerate(infile):
-            # NOTE: The isinstance test here handles mixed lists of unicode/string
-            # NOTE: But the decode will break on any non-string values
-            # NOTE: Or could raise a ``UnicodeDecodeError``
-            if PY3K:
-                if not isinstance(line, str):
-                    infile[i] = line.decode(encoding)
-            else:    
-                if not isinstance(line, unicode):
-                    infile[i] = line.decode(encoding)
-        return infile
-
-
-    def _decode_element(self, line):
-        """Decode element to unicode if necessary."""
-        if not self.encoding:
-            return line
-        if isinstance(line, str) and self.default_encoding:
-            return line.decode(self.default_encoding)
-        return line
-
-
-    def _str(self, value):
-        """
-        Used by ``stringify`` within validate, to turn non-string values
-        into strings.
-        """
-        if not isinstance(value, string_types):
-            return str(value)
-        else:
-            return value
-
-
-    def _parse(self, infile):
-        """Actually parse the config file."""
-        temp_list_values = self.list_values
-        if self.unrepr:
-            self.list_values = False
-            
-        comment_list = []
-        done_start = False
-        this_section = self
-        maxline = len(infile) - 1
-        cur_index = -1
-        reset_comment = False
-        
-        while cur_index < maxline:
-            if reset_comment:
-                comment_list = []
-            cur_index += 1
-            line = infile[cur_index]
-            sline = line.strip()
-            # do we have anything on the line ?
-            if not sline or sline.startswith('#'):
-                reset_comment = False
-                comment_list.append(line)
-                continue
-            
-            if not done_start:
-                # preserve initial comment
-                self.initial_comment = comment_list
-                comment_list = []
-                done_start = True
-                
-            reset_comment = True
-            # first we check if it's a section marker
-            mat = self._sectionmarker.match(line)
-            if mat is not None:
-                # is a section line
-                (indent, sect_open, sect_name, sect_close, comment) = mat.groups()
-                if indent and (self.indent_type is None):
-                    self.indent_type = indent
-                cur_depth = sect_open.count('[')
-                if cur_depth != sect_close.count(']'):
-                    self._handle_error("Cannot compute the section depth at line %s.",
-                                       NestingError, infile, cur_index)
-                    continue
-                
-                if cur_depth < this_section.depth:
-                    # the new section is dropping back to a previous level
-                    try:
-                        parent = self._match_depth(this_section,
-                                                   cur_depth).parent
-                    except SyntaxError:
-                        self._handle_error("Cannot compute nesting level at line %s.",
-                                           NestingError, infile, cur_index)
-                        continue
-                elif cur_depth == this_section.depth:
-                    # the new section is a sibling of the current section
-                    parent = this_section.parent
-                elif cur_depth == this_section.depth + 1:
-                    # the new section is a child the current section
-                    parent = this_section
-                else:
-                    self._handle_error("Section too nested at line %s.",
-                                       NestingError, infile, cur_index)
-                    
-                sect_name = self._unquote(sect_name)
-                if sect_name in parent:
-                    self._handle_error('Duplicate section name at line %s.',
-                                       DuplicateError, infile, cur_index)
-                    continue
-                
-                # create the new section
-                this_section = Section(
-                    parent,
-                    cur_depth,
-                    self,
-                    name=sect_name)
-                parent[sect_name] = this_section
-                parent.inline_comments[sect_name] = comment
-                parent.comments[sect_name] = comment_list
-                continue
-            #
-            # it's not a section marker,
-            # so it should be a valid ``key = value`` line
-            mat = self._keyword.match(line)
-            if mat is None:
-                # it neither matched as a keyword
-                # or a section marker
-                self._handle_error(
-                    'Invalid line at line "%s".',
-                    ParseError, infile, cur_index)
-            else:
-                # is a keyword value
-                # value will include any inline comment
-                (indent, key, value) = mat.groups()
-                if indent and (self.indent_type is None):
-                    self.indent_type = indent
-                # check for a multiline value
-                if value[:3] in ['"""', "'''"]:
-                    try:
-                        value, comment, cur_index = self._multiline(
-                            value, infile, cur_index, maxline)
-                    except SyntaxError:
-                        self._handle_error(
-                            'Parse error in value at line %s.',
-                            ParseError, infile, cur_index)
-                        continue
-                    else:
-                        if self.unrepr:
-                            comment = ''
-                            try:
-                                value = unrepr(value)
-                            except Exception as e:
-                                if type(e) == UnknownType:
-                                    msg = 'Unknown name or type in value at line %s.'
-                                else:
-                                    msg = 'Parse error in value at line %s.'
-                                self._handle_error(msg, UnreprError, infile,
-                                    cur_index)
-                                continue
-                else:
-                    if self.unrepr:
-                        comment = ''
-                        try:
-                            value = unrepr(value)
-                        except Exception as e:
-                            if isinstance(e, UnknownType):
-                                msg = 'Unknown name or type in value at line %s.'
-                            else:
-                                msg = 'Parse error in value at line %s.'
-                            self._handle_error(msg, UnreprError, infile,
-                                cur_index)
-                            continue
-                    else:
-                        # extract comment and lists
-                        try:
-                            (value, comment) = self._handle_value(value)
-                        except SyntaxError:
-                            self._handle_error(
-                                'Parse error in value at line %s.',
-                                ParseError, infile, cur_index)
-                            continue
-                #
-                key = self._unquote(key)
-                if key in this_section:
-                    self._handle_error(
-                        'Duplicate keyword name at line %s.',
-                        DuplicateError, infile, cur_index)
-                    continue
-                # add the key.
-                # we set unrepr because if we have got this far we will never
-                # be creating a new section
-                this_section.__setitem__(key, value, unrepr=True)
-                this_section.inline_comments[key] = comment
-                this_section.comments[key] = comment_list
-                continue
-        #
-        if self.indent_type is None:
-            # no indentation used, set the type accordingly
-            self.indent_type = ''
-
-        # preserve the final comment
-        if not self and not self.initial_comment:
-            self.initial_comment = comment_list
-        elif not reset_comment:
-            self.final_comment = comment_list
-        self.list_values = temp_list_values
-
-
-    def _match_depth(self, sect, depth):
-        """
-        Given a section and a depth level, walk back through the sections
-        parents to see if the depth level matches a previous section.
-        
-        Return a reference to the right section,
-        or raise a SyntaxError.
-        """
-        while depth < sect.depth:
-            if sect is sect.parent:
-                # we've reached the top level already
-                raise SyntaxError()
-            sect = sect.parent
-        if sect.depth == depth:
-            return sect
-        # shouldn't get here
-        raise SyntaxError()
-
-
-    def _handle_error(self, text, ErrorClass, infile, cur_index):
-        """
-        Handle an error according to the error settings.
-        
-        Either raise the error or store it.
-        The error will have occured at ``cur_index``
-        """
-        line = infile[cur_index]
-        cur_index += 1
-        message = text % cur_index
-        error = ErrorClass(message, cur_index, line)
-        if self.raise_errors:
-            # raise the error - parsing stops here
-            raise error
-        # store the error
-        # reraise when parsing has finished
-        self._errors.append(error)
-
-
-    def _unquote(self, value):
-        """Return an unquoted version of a value"""
-        if not value:
-            # should only happen during parsing of lists
-            raise SyntaxError
-        if (value[0] == value[-1]) and (value[0] in ('"', "'")):
-            value = value[1:-1]
-        return value
-
-
-    def _quote(self, value, multiline=True):
-        """
-        Return a safely quoted version of a value.
-        
-        Raise a ConfigObjError if the value cannot be safely quoted.
-        If multiline is ``True`` (default) then use triple quotes
-        if necessary.
-        
-        * Don't quote values that don't need it.
-        * Recursively quote members of a list and return a comma joined list.
-        * Multiline is ``False`` for lists.
-        * Obey list syntax for empty and single member lists.
-        
-        If ``list_values=False`` then the value is only quoted if it contains
-        a ``\\n`` (is multiline) or '#'.
-        
-        If ``write_empty_values`` is set, and the value is an empty string, it
-        won't be quoted.
-        """
-        if multiline and self.write_empty_values and value == '':
-            # Only if multiline is set, so that it is used for values not
-            # keys, and not values that are part of a list
-            return ''
-        
-        if multiline and isinstance(value, (list, tuple)):
-            if not value:
-                return ','
-            elif len(value) == 1:
-                return self._quote(value[0], multiline=False) + ','
-            return ', '.join([self._quote(val, multiline=False)
-                for val in value])
-        if not isinstance(value, string_types):
-            if self.stringify:
-                value = str(value)
-            else:
-                raise TypeError('Value "%s" is not a string.' % value)
-
-        if not value:
-            return '""'
-        
-        no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
-        need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
-        hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
-        check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
-        
-        if check_for_single:
-            if not self.list_values:
-                # we don't quote if ``list_values=False``
-                quot = noquot
-            # for normal values either single or double quotes will do
-            elif '\n' in value:
-                # will only happen if multiline is off - e.g. '\n' in key
-                raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
-            elif ((value[0] not in wspace_plus) and
-                    (value[-1] not in wspace_plus) and
-                    (',' not in value)):
-                quot = noquot
-            else:
-                quot = self._get_single_quote(value)
-        else:
-            # if value has '\n' or "'" *and* '"', it will need triple quotes
-            quot = self._get_triple_quote(value)
-        
-        if quot == noquot and '#' in value and self.list_values:
-            quot = self._get_single_quote(value)
-                
-        return quot % value
-    
-    
-    def _get_single_quote(self, value):
-        if ("'" in value) and ('"' in value):
-            raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
-        elif '"' in value:
-            quot = squot
-        else:
-            quot = dquot
-        return quot
-    
-    
-    def _get_triple_quote(self, value):
-        if (value.find('"""') != -1) and (value.find("'''") != -1):
-            raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
-        if value.find('"""') == -1:
-            quot = tdquot
-        else:
-            quot = tsquot 
-        return quot
-
-
-    def _handle_value(self, value):
-        """
-        Given a value string, unquote, remove comment,
-        handle lists. (including empty and single member lists)
-        """
-        if self._inspec:
-            # Parsing a configspec so don't handle comments
-            return (value, '')
-        # do we look for lists in values ?
-        if not self.list_values:
-            mat = self._nolistvalue.match(value)
-            if mat is None:
-                raise SyntaxError()
-            # NOTE: we don't unquote here
-            return mat.groups()
-        #
-        mat = self._valueexp.match(value)
-        if mat is None:
-            # the value is badly constructed, probably badly quoted,
-            # or an invalid list
-            raise SyntaxError()
-        (list_values, single, empty_list, comment) = mat.groups()
-        if (list_values == '') and (single is None):
-            # change this if you want to accept empty values
-            raise SyntaxError()
-        # NOTE: note there is no error handling from here if the regex
-        # is wrong: then incorrect values will slip through
-        if empty_list is not None:
-            # the single comma - meaning an empty list
-            return ([], comment)
-        if single is not None:
-            # handle empty values
-            if list_values and not single:
-                # FIXME: the '' is a workaround because our regex now matches
-                #   '' at the end of a list if it has a trailing comma
-                single = None
-            else:
-                single = single or '""'
-                single = self._unquote(single)
-        if list_values == '':
-            # not a list value
-            return (single, comment)
-        the_list = self._listvalueexp.findall(list_values)
-        the_list = [self._unquote(val) for val in the_list]
-        if single is not None:
-            the_list += [single]
-        return (the_list, comment)
-
-
-    def _multiline(self, value, infile, cur_index, maxline):
-        """Extract the value, where we are in a multiline situation."""
-        quot = value[:3]
-        newvalue = value[3:]
-        single_line = self._triple_quote[quot][0]
-        multi_line = self._triple_quote[quot][1]
-        mat = single_line.match(value)
-        if mat is not None:
-            retval = list(mat.groups())
-            retval.append(cur_index)
-            return retval
-        elif newvalue.find(quot) != -1:
-            # somehow the triple quote is missing
-            raise SyntaxError()
-        #
-        while cur_index < maxline:
-            cur_index += 1
-            newvalue += '\n'
-            line = infile[cur_index]
-            if line.find(quot) == -1:
-                newvalue += line
-            else:
-                # end of multiline, process it
-                break
-        else:
-            # we've got to the end of the config, oops...
-            raise SyntaxError()
-        mat = multi_line.match(line)
-        if mat is None:
-            # a badly formed line
-            raise SyntaxError()
-        (value, comment) = mat.groups()
-        return (newvalue + value, comment, cur_index)
-
-
-    def _handle_configspec(self, configspec):
-        """Parse the configspec."""
-        # FIXME: Should we check that the configspec was created with the 
-        #        correct settings ? (i.e. ``list_values=False``)
-        if not isinstance(configspec, ConfigObj):
-            try:
-                configspec = ConfigObj(configspec,
-                                       raise_errors=True,
-                                       file_error=True,
-                                       _inspec=True)
-            except ConfigObjError as e:
-                # FIXME: Should these errors have a reference
-                #        to the already parsed ConfigObj ?
-                raise ConfigspecError('Parsing configspec failed: %s' % e)
-            except IOError as e:
-                raise IOError('Reading configspec failed: %s' % e)
-        
-        self.configspec = configspec
-            
-
-        
-    def _set_configspec(self, section, copy):
-        """
-        Called by validate. Handles setting the configspec on subsections
-        including sections to be validated by __many__
-        """
-        configspec = section.configspec
-        many = configspec.get('__many__')
-        if isinstance(many, dict):
-            for entry in section.sections:
-                if entry not in configspec:
-                    section[entry].configspec = many
-                    
-        for entry in configspec.sections:
-            if entry == '__many__':
-                continue
-            if entry not in section:
-                section[entry] = {}
-                section[entry]._created = True
-                if copy:
-                    # copy comments
-                    section.comments[entry] = configspec.comments.get(entry, [])
-                    section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
-                
-            # Could be a scalar when we expect a section
-            if isinstance(section[entry], Section):
-                section[entry].configspec = configspec[entry]
-                        
-
-    def _write_line(self, indent_string, entry, this_entry, comment):
-        """Write an individual line, for the write method"""
-        # NOTE: the calls to self._quote here handles non-StringType values.
-        if not self.unrepr:
-            val = self._decode_element(self._quote(this_entry))
-        else:
-            val = repr(this_entry)
-        return '%s%s%s%s%s' % (indent_string,
-                               self._decode_element(self._quote(entry, multiline=False)),
-                               self._a_to_u(' = '),
-                               val,
-                               self._decode_element(comment))
-
-
-    def _write_marker(self, indent_string, depth, entry, comment):
-        """Write a section marker line"""
-        return '%s%s%s%s%s' % (indent_string,
-                               self._a_to_u('[' * depth),
-                               self._quote(self._decode_element(entry), multiline=False),
-                               self._a_to_u(']' * depth),
-                               self._decode_element(comment))
-
-
-    def _handle_comment(self, comment):
-        """Deal with a comment."""
-        if not comment:
-            return ''
-        start = self.indent_type
-        if not comment.startswith('#'):
-            start += self._a_to_u(' # ')
-        return (start + comment)
-
-
-    # Public methods
-
-    def write(self, outfile=None, section=None):
-        """
-        Write the current ConfigObj as a file
-        
-        tekNico: FIXME: use StringIO instead of real files
-        
-        >>> filename = a.filename
-        >>> a.filename = 'test.ini'
-        >>> a.write()
-        >>> a.filename = filename
-        >>> a == ConfigObj('test.ini', raise_errors=True)
-        1
-        >>> import os
-        >>> os.remove('test.ini')
-        """
-        if self.indent_type is None:
-            # this can be true if initialised from a dictionary
-            self.indent_type = DEFAULT_INDENT_TYPE
-            
-        out = []
-        cs = self._a_to_u('#')
-        csp = self._a_to_u('# ')
-        if section is None:
-            int_val = self.interpolation
-            self.interpolation = False
-            section = self
-            for line in self.initial_comment:
-                line = self._decode_element(line)
-                stripped_line = line.strip()
-                if stripped_line and not stripped_line.startswith(cs):
-                    line = csp + line
-                out.append(line)
-                
-        indent_string = self.indent_type * section.depth
-        for entry in (section.scalars + section.sections):
-            if entry in section.defaults:
-                # don't write out default values
-                continue
-            for comment_line in section.comments[entry]:
-                comment_line = self._decode_element(comment_line.lstrip())
-                if comment_line and not comment_line.startswith(cs):
-                    comment_line = csp + comment_line
-                out.append(indent_string + comment_line)
-            this_entry = section[entry]
-            comment = self._handle_comment(section.inline_comments[entry])
-            
-            if isinstance(this_entry, dict):
-                # a section
-                out.append(self._write_marker(
-                    indent_string,
-                    this_entry.depth,
-                    entry,
-                    comment))
-                out.extend(self.write(section=this_entry))
-            else:
-                out.append(self._write_line(
-                    indent_string,
-                    entry,
-                    this_entry,
-                    comment))
-                
-        if section is self:
-            for line in self.final_comment:
-                line = self._decode_element(line)
-                stripped_line = line.strip()
-                if stripped_line and not stripped_line.startswith(cs):
-                    line = csp + line
-                out.append(line)
-            self.interpolation = int_val
-            
-        if section is not self:
-            return out
-        
-        if (self.filename is None) and (outfile is None):
-            # output a list of lines
-            # might need to encode
-            # NOTE: This will *screw* UTF16, each line will start with the BOM
-            if self.encoding:
-                out = [l.encode(self.encoding) for l in out]
-            if (self.BOM and ((self.encoding is None) or
-                (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
-                # Add the UTF8 BOM
-                if not out:
-                    out.append('')
-                out[0] = BOM_UTF8 + out[0]
-            return out
-        
-        # Turn the list to a string, joined with correct newlines
-        newline = self.newlines or os.linesep
-        if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
-            and sys.platform == 'win32' and newline == '\r\n'):
-            # Windows specific hack to avoid writing '\r\r\n'
-            newline = '\n'
-        output = self._a_to_u(newline).join(out)
-        if self.encoding:
-            output = output.encode(self.encoding)
-        if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
-            # Add the UTF8 BOM
-            output = BOM_UTF8 + output
-            
-        if not output.endswith(newline):
-            output += newline
-        if outfile is not None:
-            outfile.write(output)
-        else:
-            # !!! write mode was 'wb' but that fails in PY3K and we dont need
-            h = open(self.filename, 'w')
-            h.write(output)
-            h.close()
-
-
-    def validate(self, validator, preserve_errors=False, copy=False,
-                 section=None):
-        """
-        Test the ConfigObj against a configspec.
-        
-        It uses the ``validator`` object from *validate.py*.
-        
-        To run ``validate`` on the current ConfigObj, call: ::
-        
-            test = config.validate(validator)
-        
-        (Normally having previously passed in the configspec when the ConfigObj
-        was created - you can dynamically assign a dictionary of checks to the
-        ``configspec`` attribute of a section though).
-        
-        It returns ``True`` if everything passes, or a dictionary of
-        pass/fails (True/False). If every member of a subsection passes, it
-        will just have the value ``True``. (It also returns ``False`` if all
-        members fail).
-        
-        In addition, it converts the values from strings to their native
-        types if their checks pass (and ``stringify`` is set).
-        
-        If ``preserve_errors`` is ``True`` (``False`` is default) then instead
-        of a marking a fail with a ``False``, it will preserve the actual
-        exception object. This can contain info about the reason for failure.
-        For example the ``VdtValueTooSmallError`` indicates that the value
-        supplied was too small. If a value (or section) is missing it will
-        still be marked as ``False``.
-        
-        You must have the validate module to use ``preserve_errors=True``.
-        
-        You can then use the ``flatten_errors`` function to turn your nested
-        results dictionary into a flattened list of failures - useful for
-        displaying meaningful error messages.
-        """
-        if section is None:
-            if self.configspec is None:
-                raise ValueError('No configspec supplied.')
-            if preserve_errors:
-                # We do this once to remove a top level dependency on the validate module
-                # Which makes importing configobj faster
-                from .validate import VdtMissingValue
-                self._vdtMissingValue = VdtMissingValue
-                
-            section = self
-
-            if copy:
-                section.initial_comment = section.configspec.initial_comment
-                section.final_comment = section.configspec.final_comment
-                section.encoding = section.configspec.encoding
-                section.BOM = section.configspec.BOM
-                section.newlines = section.configspec.newlines
-                section.indent_type = section.configspec.indent_type
-            
-        #
-        # section.default_values.clear() #??
-        configspec = section.configspec
-        self._set_configspec(section, copy)
-
-        
-        def validate_entry(entry, spec, val, missing, ret_true, ret_false):
-            section.default_values.pop(entry, None)
-                
-            try:
-                section.default_values[entry] = validator.get_default_value(configspec[entry])
-            except (KeyError, AttributeError, validator.baseErrorClass):
-                # No default, bad default or validator has no 'get_default_value'
-                # (e.g. SimpleVal)
-                pass
-            
-            try:
-                check = validator.check(spec,
-                                        val,
-                                        missing=missing
-                                        )
-            except validator.baseErrorClass as e:
-                if not preserve_errors or isinstance(e, self._vdtMissingValue):
-                    out[entry] = False
-                else:
-                    # preserve the error
-                    out[entry] = e
-                    ret_false = False
-                ret_true = False
-            else:
-                ret_false = False
-                out[entry] = True
-                if self.stringify or missing:
-                    # if we are doing type conversion
-                    # or the value is a supplied default
-                    if not self.stringify:
-                        if isinstance(check, (list, tuple)):
-                            # preserve lists
-                            check = [self._str(item) for item in check]
-                        elif missing and check is None:
-                            # convert the None from a default to a ''
-                            check = ''
-                        else:
-                            check = self._str(check)
-                    if (check != val) or missing:
-                        section[entry] = check
-                if not copy and missing and entry not in section.defaults:
-                    section.defaults.append(entry)
-            return ret_true, ret_false
-        
-        #
-        out = {}
-        ret_true = True
-        ret_false = True
-        
-        unvalidated = [k for k in section.scalars if k not in configspec]
-        incorrect_sections = [k for k in configspec.sections if k in section.scalars]        
-        incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
-        
-        for entry in configspec.scalars:
-            if entry in ('__many__', '___many___'):
-                # reserved names
-                continue
-            if (not entry in section.scalars) or (entry in section.defaults):
-                # missing entries
-                # or entries from defaults
-                missing = True
-                val = None
-                if copy and entry not in section.scalars:
-                    # copy comments
-                    section.comments[entry] = (
-                        configspec.comments.get(entry, []))
-                    section.inline_comments[entry] = (
-                        configspec.inline_comments.get(entry, ''))
-                #
-            else:
-                missing = False
-                val = section[entry]
-            
-            ret_true, ret_false = validate_entry(entry, configspec[entry], val, 
-                                                 missing, ret_true, ret_false)
-        
-        many = None
-        if '__many__' in configspec.scalars:
-            many = configspec['__many__']
-        elif '___many___' in configspec.scalars:
-            many = configspec['___many___']
-        
-        if many is not None:
-            for entry in unvalidated:
-                val = section[entry]
-                ret_true, ret_false = validate_entry(entry, many, val, False,
-                                                     ret_true, ret_false)
-            unvalidated = []
-
-        for entry in incorrect_scalars:
-            ret_true = False
-            if not preserve_errors:
-                out[entry] = False
-            else:
-                ret_false = False
-                msg = 'Value %r was provided as a section' % entry
-                out[entry] = validator.baseErrorClass(msg)
-        for entry in incorrect_sections:
-            ret_true = False
-            if not preserve_errors:
-                out[entry] = False
-            else:
-                ret_false = False
-                msg = 'Section %r was provided as a single value' % entry
-                out[entry] = validator.baseErrorClass(msg)
-                
-        # Missing sections will have been created as empty ones when the
-        # configspec was read.
-        for entry in section.sections:
-            # FIXME: this means DEFAULT is not copied in copy mode
-            if section is self and entry == 'DEFAULT':
-                continue
-            if section[entry].configspec is None:
-                unvalidated.append(entry)
-                continue
-            if copy:
-                section.comments[entry] = configspec.comments.get(entry, [])
-                section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
-            check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
-            out[entry] = check
-            if check == False:
-                ret_true = False
-            elif check == True:
-                ret_false = False
-            else:
-                ret_true = False
-        
-        section.extra_values = unvalidated
-        if preserve_errors and not section._created:
-            # If the section wasn't created (i.e. it wasn't missing)
-            # then we can't return False, we need to preserve errors
-            ret_false = False
-        #
-        if ret_false and preserve_errors and out:
-            # If we are preserving errors, but all
-            # the failures are from missing sections / values
-            # then we can return False. Otherwise there is a
-            # real failure that we need to preserve.
-            ret_false = not any(out.values())
-        if ret_true:
-            return True
-        elif ret_false:
-            return False
-        return out
-
-
-    def reset(self):
-        """Clear ConfigObj instance and restore to 'freshly created' state."""
-        self.clear()
-        self._initialise()
-        # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
-        #        requires an empty dictionary
-        self.configspec = None
-        # Just to be sure ;-)
-        self._original_configspec = None
-        
-        
-    def reload(self):
-        """
-        Reload a ConfigObj from file.
-        
-        This method raises a ``ReloadError`` if the ConfigObj doesn't have
-        a filename attribute pointing to a file.
-        """
-        if not isinstance(self.filename, string_types):
-            raise ReloadError()
-
-        filename = self.filename
-        current_options = {}
-        for entry in OPTION_DEFAULTS:
-            if entry == 'configspec':
-                continue
-            current_options[entry] = getattr(self, entry)
-            
-        configspec = self._original_configspec
-        current_options['configspec'] = configspec
-            
-        self.clear()
-        self._initialise(current_options)
-        self._load(filename, configspec)
-        
-
-
-class SimpleVal(object):
-    """
-    A simple validator.
-    Can be used to check that all members expected are present.
-    
-    To use it, provide a configspec with all your members in (the value given
-    will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
-    method of your ``ConfigObj``. ``validate`` will return ``True`` if all
-    members are present, or a dictionary with True/False meaning
-    present/missing. (Whole missing sections will be replaced with ``False``)
-    """
-    
-    def __init__(self):
-        self.baseErrorClass = ConfigObjError
-    
-    def check(self, check, member, missing=False):
-        """A dummy check method, always returns the value unchanged."""
-        if missing:
-            raise self.baseErrorClass()
-        return member
-
-
-def flatten_errors(cfg, res, levels=None, results=None):
-    """
-    An example function that will turn a nested dictionary of results
-    (as returned by ``ConfigObj.validate``) into a flat list.
-    
-    ``cfg`` is the ConfigObj instance being checked, ``res`` is the results
-    dictionary returned by ``validate``.
-    
-    (This is a recursive function, so you shouldn't use the ``levels`` or
-    ``results`` arguments - they are used by the function.)
-    
-    Returns a list of keys that failed. Each member of the list is a tuple::
-    
-        ([list of sections...], key, result)
-    
-    If ``validate`` was called with ``preserve_errors=False`` (the default)
-    then ``result`` will always be ``False``.
-
-    *list of sections* is a flattened list of sections that the key was found
-    in.
-    
-    If the section was missing (or a section was expected and a scalar provided
-    - or vice-versa) then key will be ``None``.
-    
-    If the value (or section) was missing then ``result`` will be ``False``.
-    
-    If ``validate`` was called with ``preserve_errors=True`` and a value
-    was present, but failed the check, then ``result`` will be the exception
-    object returned. You can use this as a string that describes the failure.
-    
-    For example *The value "3" is of the wrong type*.
-    """
-    if levels is None:
-        # first time called
-        levels = []
-        results = []
-    if res == True:
-        return results
-    if res == False or isinstance(res, Exception):
-        results.append((levels[:], None, res))
-        if levels:
-            levels.pop()
-        return results
-    for (key, val) in res.items():
-        if val == True:
-            continue
-        if isinstance(cfg.get(key), dict):
-            # Go down one level
-            levels.append(key)
-            flatten_errors(cfg[key], val, levels, results)
-            continue
-        results.append((levels[:], key, val))
-    #
-    # Go up one level
-    if levels:
-        levels.pop()
-    #
-    return results
-
-
-def get_extra_values(conf, _prepend=()):
-    """
-    Find all the values and sections not in the configspec from a validated
-    ConfigObj.
-    
-    ``get_extra_values`` returns a list of tuples where each tuple represents
-    either an extra section, or an extra value.
-    
-    The tuples contain two values, a tuple representing the section the value 
-    is in and the name of the extra values. For extra values in the top level
-    section the first member will be an empty tuple. For values in the 'foo'
-    section the first member will be ``('foo',)``. For members in the 'bar'
-    subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
-    
-    NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
-    been validated it will return an empty list.
-    """
-    out = []
-    
-    out.extend([(_prepend, name) for name in conf.extra_values])
-    for name in conf.sections:
-        if name not in conf.extra_values:
-            out.extend(get_extra_values(conf[name], _prepend + (name,)))
-    return out
-
-
-"""*A programming language is a medium of expression.* - Paul Graham"""
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/convertgeis.py b/required_pkgs/stsci.tools/lib/stsci/tools/convertgeis.py
deleted file mode 100644
index a74c6d7..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/convertgeis.py
+++ /dev/null
@@ -1,483 +0,0 @@
-#!/usr/bin/env python
-
-# $Id: readgeis.py 10520 2010-10-11 16:39:49Z hack $
-
-"""
-        convertgeis: Read GEIS file and convert it to a waivered-FITS file.
-
-        License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-
-        Usage:
-
-                convertgeis.py [options] GEISname FITSname
-
-                GEISname is the input GEIS file in GEIS format, and FITSname
-                is the output file in FITS format. GEISname can be a
-                directory name.  In this case, it will try to use all `*.??h`
-                files as input file names.
-
-                If FITSname is omitted or is a directory name, this task will
-                try to construct the output names from the input names, i.e.:
-
-                abc.xyh will have an output name of abc_xyf.fits
-
-        :Options:
-
-        -h     print the help (this text)
-
-        -n     do NOT over-write any pre-existing output file
-
-
-        :Example:
-
-        If used in Pythons script, a user can, e. g.::
-
-            >>> import convertgeis
-            >>> hdulist = convertgeis.convert(GEISFileName)
-            (do whatever with hdulist)
-            >>> hdulist.writeto(FITSFileName)
-
-        The most basic usage from the command line::
-
-            convertgeis.py test1.hhh test1_c0f.fits
-
-        This command will convert the input GEIS file test1.hhh to
-        a waivered-FITS file test1_c0f.fits.
-
-        From the command line::
-
-            convertgeis.py .
-
-        this will convert all `*.??h` files in the current directory
-        to waivered-FITS files (of corresponding names) and write them in the
-        current directory.
-
-
-        Another example of usage from the command line::
-
-            convertgeis.py "u*" "*"
-
-        this will convert all `u*.??h` files in the current directory
-        to waivered-FITS files (of corresponding names) and write them in the
-        current directory.  Note that when using wild cards, it is
-        necessary to put them in quotes.
-
-"""
-
-# Developed by Science Software Branch, STScI, USA.
-# This version needs pyfits 0.9.6.3 or later
-# and numpy version 1.0.4 or later
-
-from __future__ import division, print_function # confidence high
-
-__version__ = "1.0 (25 Feb, 2011), \xa9 AURA"
-
-import os, sys
-from astropy.io import fits
-import numpy
-import array
-
-if sys.version_info[0] > 2:
-    from functools import reduce
-
-# definitions used to convert GEIS record into numpy objects
-geis_fmt = {'REAL':'f', 'DOUBLE': 'f', 'INTEGER':'i', 'LOGICAL':'i','CHARACTER':'S'}
-# definitions used to convert data into numpy array for use in fits.Column
-cols_fmt = {'REAL':'float', 'DOUBLE':'float', 'INTEGER':'int', 'LOGICAL':'S', 'CHARACTER': 'S'}
-# definitions used to define print format for fits.Column
-cols_pfmt = {'REAL':'E', 'DOUBLE': 'D', 'INTEGER': 'J', 'LOGICAL':'A', 'CHARACTER': 'A'}
-
-# Keywords which require special unit conversion
-# keywords which are output as long-floats without using exponential formatting
-kw_DOUBLE = ['CRVAL1','CRVAL2','FPKTTIME','LPKTTIME']
-
-def stsci2(hdulist, filename):
-    """For STScI GEIS files, need to do extra steps."""
-
-    # Write output file name to the primary header
-    instrument = hdulist[0].header.get('INSTRUME', '')
-    if instrument in ("WFPC2", "FOC"):
-        hdulist[0].header['FILENAME'] = filename
-
-def convert(input):
-
-    """Input GEIS files "input" will be read and a HDUList object will
-       be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF.
-
-       The user can use the writeto method to write the HDUList object to
-       a FITS file.
-    """
-
-    global dat
-    cardLen = fits.Card.length
-
-    # input file(s) must be of the form *.??h and *.??d
-    if input[-1] != 'h' or input[-4] != '.':
-        raise "Illegal input GEIS file name %s" % input
-
-    data_file = input[:-1]+'d'
-
-    _os = sys.platform
-    if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
-        bytes_per_line = cardLen+1
-    else:
-        raise "Platform %s is not supported (yet)." % _os
-
-    end_card = 'END'+' '* (cardLen-3)
-
-    # open input file
-    im = open(input)
-
-    # Generate the primary HDU
-    cards = []
-    while 1:
-        line = im.read(bytes_per_line)[:cardLen]
-        line = line[:8].upper() + line[8:]
-        if line == end_card:
-            break
-        cards.append(fits.Card.fromstring(line))
-
-    phdr = fits.Header(cards)
-    im.close()
-
-    # Determine starting point for adding Group Parameter Block keywords to Primary header
-    phdr_indx = phdr.index('PSIZE')
-
-    _naxis0 = phdr.get('NAXIS', 0)
-    _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
-    _naxis.insert(0, _naxis0)
-    _bitpix = phdr['BITPIX']
-    _psize = phdr['PSIZE']
-    if phdr['DATATYPE'][:4] == 'REAL':
-        _bitpix = -_bitpix
-    if _naxis0 > 0:
-        size = reduce(lambda x,y:x*y, _naxis[1:])
-        data_size = abs(_bitpix) * size // 8
-    else:
-        data_size = 0
-    group_size = data_size + _psize // 8
-
-    # decode the group parameter definitions,
-    # group parameters will become extension table
-    groups = phdr['GROUPS']
-    gcount = phdr['GCOUNT']
-    pcount = phdr['PCOUNT']
-
-    formats = []
-    bools = []
-    floats = []
-    cols = [] # column definitions used for extension table
-    cols_dict = {} # provides name access to Column defs
-    _range = range(1, pcount+1)
-    key = [phdr['PTYPE'+str(j)] for j in _range]
-    comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
-
-    # delete group parameter definition header keywords
-    _list = ['PTYPE'+str(j) for j in _range] + \
-            ['PDTYPE'+str(j) for j in _range] + \
-            ['PSIZE'+str(j) for j in _range] + \
-            ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
-
-    # Construct record array formats for the group parameters
-    # as interpreted from the Primary header file
-    for i in range(1, pcount+1):
-        ptype = key[i-1]
-        pdtype = phdr['PDTYPE'+str(i)]
-        star = pdtype.find('*')
-        _type = pdtype[:star]
-        _bytes = pdtype[star+1:]
-
-        # collect boolean keywords since they need special attention later
-
-        if _type == 'LOGICAL':
-            bools.append(i)
-        if pdtype == 'REAL*4':
-            floats.append(i)
-
-        # identify keywords which require conversion to special units
-        if ptype in kw_DOUBLE:
-            _type = 'DOUBLE'
-
-        fmt = geis_fmt[_type] + _bytes
-        formats.append((ptype,fmt))
-
-        # Set up definitions for use in creating the group-parameter block table
-        nrpt = ''
-        nbits = str(int(_bytes)*8)
-        if 'CHAR' in _type:
-            nrpt = _bytes
-            nbits = _bytes
-
-        afmt = cols_fmt[_type]+ nbits
-        if 'LOGICAL' in _type:
-            afmt = cols_fmt[_type]
-        cfmt = cols_pfmt[_type]+nrpt
-
-        #print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt
-        cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt))
-        cols.append(cols_dict[ptype]) # This keeps the columns in order
-
-    _shape = _naxis[1:]
-    _shape.reverse()
-    _code = fits.hdu.ImageHDU.NumCode[_bitpix]
-    _bscale = phdr.get('BSCALE', 1)
-    _bzero = phdr.get('BZERO', 0)
-
-    if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
-        _uint16 = 1
-        _bzero = 32768
-    else:
-        _uint16 = 0
-
-    # delete from the end, so it will not conflict with previous delete
-    for i in range(len(phdr)-1, -1, -1):
-        if phdr.cards[i].keyword in _list:
-            del phdr[i]
-
-    # clean up other primary header keywords
-    phdr['SIMPLE'] = True
-    phdr['GROUPS'] = False
-    _after = 'NAXIS'
-    if _naxis0 > 0:
-        _after += str(_naxis0)
-    phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after)
-
-    # Use copy-on-write for all data types since byteswap may be needed
-    # in some platforms.
-    f1 = open(data_file, mode='rb')
-    dat = f1.read()
-    errormsg = ""
-
-    # Define data array for all groups
-    arr_shape = _naxis[:]
-    arr_shape[0] = gcount
-    arr_stack = numpy.zeros(arr_shape,dtype=_code)
-
-    loc = 0
-    for k in range(gcount):
-        ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
-        ext_dat = ext_dat.reshape(_shape)
-        if _uint16:
-            ext_dat += _bzero
-        # Check to see whether there are any NaN's or infs which might indicate
-        # a byte-swapping problem, such as being written out on little-endian
-        #   and being read in on big-endian or vice-versa.
-        if _code.find('float') >= 0 and \
-            (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
-            errormsg += "===================================\n"
-            errormsg += "= WARNING:                        =\n"
-            errormsg += "=  Input image:                   =\n"
-            errormsg += input+"[%d]\n"%(k+1)
-            errormsg += "=  had floating point data values =\n"
-            errormsg += "=  of NaN and/or Inf.             =\n"
-            errormsg += "===================================\n"
-        elif _code.find('int') >= 0:
-            # Check INT data for max values
-            ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat)
-            if ext_dat_exp.max() == int(_bitpix) - 1:
-                # Potential problems with byteswapping
-                errormsg += "===================================\n"
-                errormsg += "= WARNING:                        =\n"
-                errormsg += "=  Input image:                   =\n"
-                errormsg += input+"[%d]\n"%(k+1)
-                errormsg += "=  had integer data values        =\n"
-                errormsg += "=  with maximum bitvalues.        =\n"
-                errormsg += "===================================\n"
-
-        arr_stack[k] = ext_dat
-        #ext_hdu = fits.hdu.ImageHDU(data=ext_dat)
-
-        rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats)
-
-        loc += group_size
-
-        # Add data from this GPB to table
-        for i in range(1, pcount+1):
-            val = rec[0][i-1]
-            if i in bools:
-                if val:
-                    val = 'T'
-                else:
-                    val = 'F'
-            cols[i-1].array[k] = val
-
-        # Based on the first group, add GPB keywords to PRIMARY header
-        if k == 0:
-            # Create separate PyFITS Card objects for each entry in 'rec'
-            # and update Primary HDU with these keywords after PSIZE
-            for i in range(1, pcount+1):
-                #val = rec.field(i-1)[0]
-                val = rec[0][i-1]
-                if val.dtype.kind == 'S':
-                    val = val.decode('ascii')
-
-                if i in bools:
-                    if val:
-                        val = True
-                    else:
-                        val = False
-                    
-                elif i in floats:
-                    # use fromstring, format in Card is deprecated in pyfits 0.9
-                    _str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1])
-                    _card = fits.Card.fromstring(_str)
-                    
-                else:
-                    _card = fits.Card(key=key[i-1], value=val, comment=comm[i-1])
-
-                phdr.insert(phdr_indx+i, _card)
-
-            # deal with bscale/bzero
-            if (_bscale != 1 or _bzero != 0):
-                phdr['BSCALE'] = _bscale
-                phdr['BZERO'] = _bzero
-
-        #hdulist.append(ext_hdu)
-    # Define new table based on Column definitions
-    ext_table = fits.new_table(cols,tbtype='TableHDU')
-    ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS')
-    # Add column descriptions to header of table extension to match stwfits output
-    for i in range(len(key)):
-        ext_table.header.append(fits.Card(keyword=key[i], value=comm[i]))
-
-    if errormsg != "":
-        errormsg += "===================================\n"
-        errormsg += "=  This file may have been        =\n"
-        errormsg += "=  written out on a platform      =\n"
-        errormsg += "=  with a different byte-order.   =\n"
-        errormsg += "=                                 =\n"
-        errormsg += "=  Please verify that the values  =\n"
-        errormsg += "=  are correct or apply the       =\n"
-        errormsg += "=  '.byteswap()' method.          =\n"
-        errormsg += "===================================\n"
-        print(errormsg)
-
-    f1.close()
-
-    hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)])
-    hdulist.append(ext_table)
-
-    stsci2(hdulist,input)
-    return hdulist
-
-def parse_path(f1, f2):
-
-    """Parse two input arguments and return two lists of file names"""
-
-    import glob
-
-    # if second argument is missing or is a wild card, point it
-    # to the current directory
-    f2 = f2.strip()
-    if f2 == '' or f2 == '*':
-        f2 = './'
-
-    # if the first argument is a directory, use all GEIS files
-    if os.path.isdir(f1):
-        f1 = os.path.join(f1, '*.??h')
-    list1 = glob.glob(f1)
-    list1 = [name for name in list1 if name[-1] == 'h' and name[-4] == '.']
-
-    # if the second argument is a directory, use file names in the
-    # first argument to construct file names, i.e.
-    # abc.xyh will be converted to abc_xyf.fits
-    if os.path.isdir(f2):
-        list2 = []
-        for file in list1:
-            name = os.path.split(file)[-1]
-            fitsname = name[:-4] + '_' + name[-3:-1] + 'f.fits'
-            list2.append(os.path.join(f2, fitsname))
-    else:
-        list2 = [s.strip() for s in f2.split(",")]
-        
-    if (list1 == [] or list2 == []):
-        str = ""
-        if (list1 == []): str += "Input files `%s` not usable/available. " % f1
-        if (list2 == []): str += "Input files `%s` not usable/available. " % f2
-        raise IOError(str)
-    else:
-        return list1, list2
-
-#-------------------------------------------------------------------------------
-# special initialization when this is the main program
-
-if __name__ == "__main__":
-
-    import getopt
-
-    try:
-        optlist, args = getopt.getopt(sys.argv[1:], 'hn')
-    except getopt.error as e:
-        print(str(e))
-        print(__doc__)
-        print("\t", __version__)
-
-    # initialize default values
-    help = 0
-    clobber = True
-
-    # read options
-    for opt, value in optlist:
-        if opt == "-h":
-            help = 1
-        if opt == "-n":
-            clobber = False
-
-    if len(args) == 0:
-        help = 1
-
-    if (help):
-        print(__doc__)
-        print("\t", __version__)
-    else:
-        if len(args) == 1:
-            args.append('')
-        list1, list2 = parse_path (args[0], args[1])
-        npairs = min (len(list1), len(list2))
-        for i in range(npairs):
-            if os.path.exists(list2[i]):
-                if clobber:
-                    os.remove(list2[i])
-                else:
-                    print("Output file %s already exists, skip." % list2[i])
-                    break
-            try:
-                hdulist = convert(list1[i])
-                hdulist.writeto(list2[i])
-                hdulist.close()
-                print("%s -> %s" % (list1[i], list2[i]))
-            except Exception as e:
-                print("Conversion fails for %s: %s" % (list1[i], str(e)))
-                break
-
-"""
-
-Copyright (C) 2003 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-      endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-"""
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/convertwaiveredfits.py b/required_pkgs/stsci.tools/lib/stsci/tools/convertwaiveredfits.py
deleted file mode 100755
index c0719ea..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/convertwaiveredfits.py
+++ /dev/null
@@ -1,628 +0,0 @@
-#!/usr/bin/env python
-
-# $Id: convertwaiveredfits.py 41182 2015-06-15 17:11:53Z hack $
-
-"""
-    convertwaiveredfits: Convert a waivered FITS file to various other formats.
-
-    :License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-
-    :Syntax From the command line:
-
-        convertwaiveredfits.py [-hm] [-o <outputFileName>,...] FILE ...
-
-    Convert the waivered FITS files (FILEs) to various formats.
-    The default conversion format is multi-extension FITS.
-
-    :Options:
-
-    -h, --help                       print this help message and exit
-
-    -v, --verbose                    turn on verbose output
-
-    -m, --multiExtensionConversion   convert to multi-extension
-                                     FITS format (Default)
-
-    -o, --outputFileName             comma separated list of
-                                     output file specifications
-                                     one per input FILE
-                                     Default: input file
-                                     specification with the last
-                                     character of the base name
-                                     changed to `h` in multi-extension FITS format
-
-    Examples
-    ========
-    Conversion of a WFPC2 waivered FITS file obtained from the HST archive::
-
-        convertwaiveredfits u9zh010bm_c0f.fits
-
-    This will convert the waivered FITS file `u9zh010bm_c0f.fits`
-    to multi-extension FITS format and generate the output file
-    `u9zh010bm_c0h.fits`.
-
-
-    Conversion of multiple FITS files can be done using::
-
-        convertwaiveredfits -o out1.fits,out2.fits
-                             u9zh010bm_c0f.fits u9zh010bm_c1f.fits
-
-    This will convert the waivered FITS files u9zh010bm_c0f.fits
-    and u9zh010bm_c1f.fits to multi-extension FITS format and
-    generate the output files out1.fits and out2.fits
-
-
-    :Python Syntax: You can run this code interactively from within Python using the syntax:
-
-        >>> from stsci.tools import convertwaiveredfits
-        >>> fobj = convertwaiveredfits.convertwaiveredfits(waiveredObject,
-        >>>                    outputFileName=None,
-        >>>                    forceFileOutput=False,
-        >>>                    convertTo='multiExtension',
-        >>>                    verbose=False)
-
-    The returned object `fobj` is a PyFITS object using the multi-extension FITS format.
-
-
-    Parameters
-    ==========
-    waiveredObject: obj
-        input object representing a waivered FITS
-        file; either a astropy.io.fits.HDUList object, a file
-        object, or a file specification
-
-    outputFileName : string
-        file specification for the output file
-        Default: None - do not generate an output file
-
-    forceFileOutput: boolean
-        force the generation of an output file when
-        the outputFileName parameter is None; the
-        output file specification will be the same as
-        the input file specification with the last
-        character of the base name replaced with the
-        character `h` in multi-extension FITS format.
-
-        Default: False
-
-    convertTo: string
-        target conversion type
-        Default: 'multiExtension'
-
-    verbose: boolean
-        provide verbose output
-        Default: False
-
-    Returns
-    =======
-    hduList
-        fits.HDUList (PyFITS multi-extension FITS object) containing converted output
-
-    Examples
-    ========
-      >>> import convertwaiveredfits
-      >>> hdulist = convertwaiveredfits.convertwaiveredfits('u9zh010bm_c0f.fits',
-                                               forceFileOutput=True)
-
-    this will convert the waivered FITS file u9zh010bm_c0f.fits
-    to multi-extension FITS format and write the output to the
-    file u9zh010bm_c0h.fits;  the returned HDUList is in
-    multi-extension FITS format
-
-
-      >>> import convertwaiveredfits
-      >>> inFile = open('u9zh010bm_c0f.fits',mode='rb')
-      >>> hdulist = convertwaiveredfits.convertwaiveredfits(inFile,
-                                                        'out.fits')
-
-    this will convert the waivered FITS file u9zh010bm_c0f.fits
-    to multi-extension FITS format and write the output to the
-    file out.fits; the returned HDUList is in multi-extension
-    FITS format
-
-      >>> from astropy.io import fits
-      >>> import convertwaiveredfits
-      >>> inHdul = fits.open('u9zh010bm_c0f.fits')
-      >>> hdulist = convertwaiveredfits.convertwaiveredfits(inHdul)
-
-    this will convert the waivered FITS file u9zh010bm_c0f.fits
-    to multi-extension FITS format; no output file is generated;
-    the returned HDUList is in multi-extension format
-
-__version__ = "1.0 (31 January, 2008)"
-
-"""
-
-# Developed by Science Software Branch, STScI, USA.
-
-from __future__ import division, print_function # confidence high
-
-__version__ = "1.1 (15 June, 2015)"
-
-import sys
-if sys.version_info[0] < 3:
-    string_types = basestring
-else:
-    string_types = str
-
-#
-# -----------------------------------------------------------------------------
-# Import required modules
-# -----------------------------------------------------------------------------
-#
-import os
-import sys
-from astropy.io import fits
-#
-# -----------------------------------------------------------------------------
-# Function definitions
-# -----------------------------------------------------------------------------
-#
-def _usage():
-    """
-        Print a usage message.
-
-        Parameters: NONE
-
-        Returns: None
-
-        Exceptions: NONE
-    """
-
-    print("usage: convertwaiveredfits.py [-hmv] [-o <outputFileName>, ...] FILE ...")
-
-def _processCommandLineArgs():
-    """
-        Get the command line arguments
-
-        Parameters: NONE
-
-        Returns:
-
-           files            list of file specifications to be converted
-
-           outputFileNames  list of output file specifications
-                             (one per input file)
-                             Default: a list of None values (one per input file)
-
-           conversionFormat string indicating the conversion format requested
-                             Default: "mulitextension"
-
-           verbose          flag indicating if verbose output is desired
-                             Default: False
-
-        Exceptions: NONE
-    """
-
-    import getopt
-
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "hvmo:",
-                                   ["help",
-                                    "verbose",
-                                    "multiExtensionConversion",
-                                    "outputFileName"])
-    except getopt.GetoptError as e:
-        print(str(e))
-        _usage()
-        sys.exit(1)
-
-    conversionFormat = ""
-    outputFileNames = []
-    verbose = False
-
-    for o, a in opts:
-        if o in ("-h", "--help"):
-            _usage()
-            print("       Convert the waivered FITS Files (FILEs) to various formats.")
-            print("       The default conversion format is multi-extension FITS.")
-            print("       Options:")
-            print("         -h,  --help                       display this help message and exit")
-            print("         -v,  --verbose                    provide verbose output")
-            print("         -m,  --multiExtensionConversion   convert to multiExtension FITS format")
-            print("         -o,  --outputFileName             comma separated list of output file")
-            print("                                           specifications (one per input FILE)")
-            sys.exit()
-
-        if o in ("-v", "--verbose"):
-            verbose = True
-
-        if o in ("-m", "--multiExtensionConversion"):
-            if conversionFormat != "":
-                print("convertwaiveredfits.py: only one conversion format allowed")
-                _usage()
-                sys.exit(1)
-
-            conversionFormat = "multiExtension"
-
-        if o in ("-o", "--outputFileName"):
-            outputFileNames = a.split(',')
-
-    if conversionFormat == "":
-        #
-        # Set the default conversion format if none was provided
-        #
-        conversionFormat = "multiExtension"
-
-    if not args:
-        print("convertwaiveredfits.py: nothing to convert")
-        _usage()
-        sys.exit(1)
-    else:
-        files = args
-
-        if outputFileNames:
-            if len(files) != len(outputFileNames):
-                print("convertwaiveredfits.py: number of output file names does not match")
-                print("                        the number of FILEs to convert")
-                _usage()
-                sys.exit(1)
-        else:
-            for i in range(0,len(files)):
-                outputFileNames.append(None)
-
-    return files,outputFileNames,conversionFormat,verbose
-
-def _verify(waiveredHdul):
-    """
-        Verify that the input HDUList is for a waivered FITS file.
-
-        Parameters:
-
-           waiveredHdul     HDUList object to be verified
-
-        Returns: None
-
-        Exceptions:
-
-           ValueError       Input HDUList is not for a waivered FITS file
-    """
-
-    if len(waiveredHdul) == 2:
-        #
-        # There must be exactly 2 HDU's
-        #
-        if waiveredHdul[0].header['NAXIS'] > 0:
-            #
-            # The Primary HDU must have some data
-            #
-            if isinstance(waiveredHdul[1], fits.TableHDU):
-                #
-                # The Alternate HDU must be a TableHDU
-                #
-                if waiveredHdul[0].data.shape[0] == \
-                   waiveredHdul[1].data.shape[0] or \
-                   waiveredHdul[1].data.shape[0] == 1:
-                    #
-                    # The number of arrays in the Primary HDU must match
-                    # the number of rows in the TableHDU.  This includes
-                    # the case where there is only a single array and row.
-                    #
-                    return
-    #
-    # Not a valid waivered Fits file
-    #
-    raise ValueError("Input object does not represent a valid waivered" + \
-                      " FITS file")
-
-def toMultiExtensionFits(waiveredObject,
-                         multiExtensionFileName=None,
-                         forceFileOutput=False,
-                         verbose=False):
-    """
-        Convert the input waivered FITS object to a multi-extension FITS
-        HDUList object.  Generate an output multi-extension FITS file if
-        requested.
-
-        Parameters:
-
-          waiveredObject  input object representing a waivered FITS file;
-                          either a astroyp.io.fits.HDUList object, a file object, or a
-                          file specification
-
-          outputFileName  file specification for the output file
-                          Default: None - do not generate an output file
-
-          forceFileOutput force the generation of an output file when the
-                          outputFileName parameter is None; the output file
-                          specification will be the same as the input file
-                          specification with the last character of the base
-                          name replaced with the character 'h'.
-                          Default: False
-
-          verbose         provide verbose output
-                          Default: False
-
-        Returns:
-
-          mhdul           an HDUList object in multi-extension FITS format.
-
-        Exceptions:
-
-          TypeError       Input object is not a HDUList, a file object or a
-                          file name
-    """
-
-    if isinstance(waiveredObject, fits.HDUList):
-        whdul = waiveredObject
-        inputObjectDescription = "HDUList object"
-    else:
-        try:
-            whdul = fits.open(waiveredObject)
-            if isinstance(waiveredObject, string_types):
-                inputObjectDescription = "file " + waiveredObject
-            else:
-                inputObjectDescription = "file " + waiveredObject.name
-        except TypeError:
-            raise TypeError("Input object must be HDUList, file object, " + \
-                            "or file name")
-
-    _verify(whdul)
-
-    undesiredPrimaryHeaderKeywords = ['ORIGIN','FITSDATE','FILENAME',
-                                      'ALLG-MAX','ALLG-MIN','ODATTYPE',
-                                      'SDASMGNU','OPSIZE','CTYPE2',
-                                      'CD2_2','CD2_1','CD1_2','CTYPE3',
-                                      'CD3_3','CD3_1','CD1_3','CD2_3',
-                                      'CD3_2']
-    #
-    # Create the multi-extension primary header as a copy of the
-    # wavered file primary header
-    #
-    mPHeader = whdul[0].header
-    originalDataType =  whdul[0].header.get('ODATTYPE','')
-    #
-    # Remove primary header cards with keywords matching the
-    # list of undesired primary header keywords
-    #
-    for keyword in undesiredPrimaryHeaderKeywords:
-        #
-        # Be careful only to delete the first card that matches
-        # the keyword, not all of the cards
-        #
-        if keyword in mPHeader:
-            del mPHeader[mPHeader.index(keyword)]
-    #
-    # Get the columns from the secondary HDU table
-    #
-    wcols = whdul[1].columns
-    #
-    # Remove primary header cards with keywords matching the
-    # column names in the secondary HDU table
-    #
-    for keyword in wcols.names:
-        if keyword in mPHeader:
-            del mPHeader[keyword]
-    #
-    # Create the PrimaryHDU
-    #
-    mPHdu = fits.PrimaryHDU(header=mPHeader)
-    #
-    # Add the EXTEND card
-    #
-    mPHdu.header.set('EXTEND', value=True, after='NAXIS')
-    #
-    # Add the NEXTEND card.  There will be one extension
-    # for each row in the wavered Fits file table HDU.
-    #
-    mPHdu.header['NEXTEND'] = (whdul[1].data.shape[0],
-                               'Number of standard extensions')
-    #
-    # Create the multi-extension file HDUList from the primary header
-    #
-    mhdul = fits.HDUList([mPHdu])
-    #
-    # Create the extension HDUs for the multi-extension file.  There
-    # will be one extension for each row in the wavered file's table.
-    #
-    instrument = mPHeader.get('INSTRUME', '')
-    nrows = whdul[1].data.shape[0]
-
-    for i in range(0,nrows):
-        #
-        # Create the basic HDU from the data
-        #
-        if nrows == 1:
-            #
-            # Handle case where there is only one row in the table
-            #
-            data = whdul[0].data
-        else:
-            data = whdul[0].data[i]
-
-        mhdul.append(fits.ImageHDU(data))
-        #
-        # Add cards to the header for each keyword in the column
-        # names of the secondary HDU table from the wavered file
-        #
-        for keyword,format,unit in zip(wcols.names,wcols.formats,wcols.units):
-            if unit == 'LOGICAL-':
-                #
-                # Handle logical values
-                #
-                if whdul[1].data.field(keyword)[i].strip() == 'T':
-                    d = True
-                else:
-                    d = False
-            elif format[0] == 'E':
-                #
-                # Handle floating point values
-                #
-                fmt = '%'+format[1:]+'G'
-                d = eval(fmt % float(whdul[1].data.field(keyword)[i]))
-            else:
-                d = whdul[1].data.field(keyword)[i]
-
-            kw_descr = ""
-            if keyword in whdul[1].header:
-                kw_descr = whdul[1].header[keyword]
-            mhdul[i+1].header.update(keyword,
-                                     d,
-                                     kw_descr)
-        #
-        # If original data is unsigned short then scale the data.
-        #
-        if originalDataType == 'USHORT':
-            mhdul[i+1].scale('int16','',bscale=1,bzero=32768)
-            mhdul[i+1].header.set('BSCALE', value=1, before='BZERO')
-        #
-        # For WFPC2 and FOS instruments require additional header cards
-        #
-        if instrument in ('WFPC2','FOC'):
-            #
-            # Add EXTNAME card to header
-            #
-            mhdul[i+1].header['EXTNAME'] = (mPHeader.get('FILETYPE',''),
-                                            'extension name')
-            #
-            # Add EXTVER card to the header
-            #
-            mhdul[i+1]._extver = i+1
-            mhdul[i+1].header.set('EXTVER', value=i+1,
-                                  comment='extension version number',
-                                  after='EXTNAME')
-            #
-            # Add the EXPNAME card to the header
-            #
-            mhdul[i+1].header.set('EXPNAME',
-                                  mPHeader.get('ROOTNAME', ''),
-                                  '9 character exposure identifier',
-                                  before='EXTVER')
-            #
-            # Add the INHERIT card to the header.
-            #
-            mhdul[i+1].header.set('INHERIT', True,
-                                  'inherit the primary header',
-                                  after='EXTVER')
-            #
-            # Add the ROOTNAME card to the header
-            #
-            mhdul[i+1].header.set('ROOTNAME',
-                                  mPHeader.get('ROOTNAME', ''),
-                                  'rootname of the observationset',
-                                  after='INHERIT')
-
-    if not multiExtensionFileName and forceFileOutput:
-        base,ext = os.path.splitext(whdul[0]._file.name)
-        multiExtensionFileName = base[:-1]+'h'+ext
-
-    verboseString = "Input " + inputObjectDescription + \
-                    " converted to multi-extension FITS format."
-
-    if multiExtensionFileName:
-        if instrument in ('WFPC2','FOC'):
-            #
-            # write the FILENAME card to the header for the WFPC2 and FOC
-            # instruments
-            #
-            head,tail = os.path.split(multiExtensionFileName)
-            mhdul[0].header.set('FILENAME', value=tail, after='NEXTEND')
-
-        mhdul.writeto(multiExtensionFileName,clobber=True)
-        verboseString = verboseString[:-1] + " and written to " + \
-                        multiExtensionFileName + "."
-
-    if verbose:
-        print(verboseString)
-
-    return mhdul
-
-
-def convertwaiveredfits(waiveredObject,
-                        outputFileName=None,
-                        forceFileOutput=False,
-                        convertTo='multiExtension',
-                        verbose=False):
-    """
-        Convert the input waivered FITS object to various formats.  The
-        default conversion format is multi-extension FITS.  Generate an output
-        file in the desired format if requested.
-
-        Parameters:
-
-          waiveredObject  input object representing a waivered FITS file;
-                          either a astropy.io.fits.HDUList object, a file object, or a
-                          file specification
-
-          outputFileName  file specification for the output file
-                          Default: None - do not generate an output file
-
-          forceFileOutput force the generation of an output file when the
-                          outputFileName parameter is None; the output file
-                          specification will be the same as the input file
-                          specification with the last character of the base
-                          name replaced with the character `h` in
-                          multi-extension FITS format.
-
-                          Default: False
-
-          convertTo       target conversion type
-                          Default: 'multiExtension'
-
-          verbose         provide verbose output
-                          Default: False
-
-        Returns:
-
-          hdul            an HDUList object in the requested format.
-
-        Exceptions:
-
-           ValueError       Conversion type is unknown
-    """
-
-    if convertTo == 'multiExtension':
-        func = toMultiExtensionFits
-    else:
-        raise ValueError('Conversion type ' + convertTo + ' unknown')
-
-    return func(*(waiveredObject,outputFileName,forceFileOutput,verbose))
-#
-# *****************************************************************************
-# Main Program callable from the shell
-# *****************************************************************************
-#
-
-def main() :
-    files,outputFiles,conversionFormat,verbose = _processCommandLineArgs()
-
-    for f,outputfile in zip(files,outputFiles):
-        convertwaiveredfits(f,outputfile,True,conversionFormat,verbose)
-
-    sys.exit()
-
-
-if __name__ == '__main__':
-    main()
-
-"""
-
-Copyright (C) 2005 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-      endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-"""
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/dialog.py b/required_pkgs/stsci.tools/lib/stsci/tools/dialog.py
deleted file mode 100644
index 1ad1982..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/dialog.py
+++ /dev/null
@@ -1,82 +0,0 @@
-####
-#       Class Dialog
-#
-#       Purpose
-#       Base class for many dialog box classes.
-####
-"""
-$Id: dialog.py 38142 2015-03-06 13:42:21Z bsimon $
-"""
-from __future__ import division # confidence high
-
-import sys
-if sys.version_info[0] < 3:
-    from Tkinter import *
-else:
-    from tkinter import *
-
-class Dialog:
-
-    def __init__(self, master):
-        self.master = master
-        self.top = Toplevel(self.master)
-        self.top.title(self.__class__.__name__)
-        self.top.minsize(1, 1)
-        self.myWaitVar = str(self.top) + 'EndDialogVar'
-
-    def Show(self):
-        self.SetupDialog()
-        self.CenterDialog()
-        self.top.deiconify()
-        self.top.focus()
-
-    def TerminateDialog(self, withValue):
-        self.top.setvar(self.myWaitVar, withValue)
-        self.top.withdraw()
-
-    def DialogCleanup(self):
-        self.top.destroy()
-        self.master.focus()
-
-    def SetupDialog(self):
-        pass
-
-    def CenterDialog(self):
-        self.top.withdraw()
-        self.top.update_idletasks()
-        w = self.top.winfo_screenwidth()
-        h = self.top.winfo_screenheight()
-        reqw = self.top.winfo_reqwidth()
-        reqh = self.top.winfo_reqheight()
-        centerx = str((w-reqw)//2)
-        centery = str((h-reqh)//2 - 100)
-        geomStr = "+" + centerx + "+" + centery
-        self.top.geometry(geomStr)
-
-####
-#       Class ModalDialog
-#
-#       Purpose
-#       Base class for many modal dialog box classes.
-####
-
-class ModalDialog(Dialog):
-
-    def __init__(self, master):
-        Dialog__init__(self, master)
-
-    def Show(self):
-        self.SetupDialog()
-        self.CenterDialog()
-        try:
-            self.top.grab_set() # make it modal
-        except TclError:
-            # This fails on Linux, but does it really HAVE to be modal
-            if sys.platform.lower().find('linux') >= 0:
-                pass
-            else:
-                raise
-        self.top.focus()
-        self.top.deiconify()
-        self.top.waitvar(self.myWaitVar)
-        return int(self.top.getvar(self.myWaitVar))
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/editpar.py b/required_pkgs/stsci.tools/lib/stsci/tools/editpar.py
deleted file mode 100644
index 29331ea..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/editpar.py
+++ /dev/null
@@ -1,1769 +0,0 @@
-"""module 'editpar.py' -- main module for generating the EPAR task editor
-
-$Id: editpar.py 38909 2015-04-08 17:41:07Z bsimon $
-
-Taken from pyraf/lib/epar.py, originally signed "M.D. De La Pena, 2000 Feb. 4"
-"""
-from __future__ import absolute_import, division, print_function # confidence high
-
-#System level modules
-import os, sys, tempfile, time
-from . import capable
-
-PY3K = sys.version_info[0] > 2
-
-if capable.OF_GRAPHICS:
-    if PY3K:
-        from tkinter import  _default_root
-        from tkinter import *
-        from tkinter.filedialog import asksaveasfilename
-        from tkinter.messagebox import askokcancel, askyesno, showwarning
-    else:
-        from Tkinter import  _default_root
-        from Tkinter import *
-        from tkFileDialog import asksaveasfilename
-        from tkMessageBox import askokcancel, askyesno, showwarning
-
-# stsci.tools modules
-from .irafglobals import userWorkingHome
-from . import basicpar, eparoption, irafutils, taskpars
-
-# Constants
-MINVIEW     = 500
-MINPARAMS   = 25
-INPUTWIDTH  = 10
-VALUEWIDTH  = 21
-PROMPTWIDTH = 55
-DFT_OPT_FILE = "epar.optionDB"
-TIP = "rollover"
-DBG = "debug"
-
-# The following action types are used within the GUI code.  They define what
-# kind of GUI action actually caused a parameter's value to be adjusted.
-# This is meant to be like an enum.  These values may appear in a task's
-# task.cfgspc file in a rule.  In that file, the value 'always' may be used, in
-# addition to these values, to indicate a match to all possible action types.
-GROUP_ACTIONS = ('defaults','init','fopen','entry')
-# init     -> startup of the GUI
-# defaults -> the user clicked the Defaults or Reset button
-# fopen    -> the user loaded a config file
-# entry    -> the user actually edited a parameter (via mouse or keyboard)
-
-# Use these values for startup geometry ***for now***
-# PARENT is the main editor window
-PARENTX = 50
-PARENTY = 50
-
-# DCHILD[XY] are amounts each successive child shifts
-DCHILDX = 50
-DCHILDY = 50
-
-# CHILD[XY] is a PSET window
-CHILDX = PARENTX
-CHILDY = PARENTY
-
-# HELP[XY] is for the help as displayed in a window
-HELPX   = 300
-HELPY   = 25
-
-
-class UnfoundParamError(Exception): pass
-
-
-class EditParDialog(object):
-
-    def __init__(self, theTask, parent=None, isChild=0,
-                 title="Parameter Editor", childList=None,
-                 resourceDir='.'):
-
-        # Initialize status message stuff first thing
-        self._leaveStatusMsgUntil = 0
-        self._msgHistory = [] # all msgs, of all kinds, since we opened
-        self._statusMsgsToShow = [] # keep a *small* number of late msgs
-        self.debug('Starting up the GUI!')
-
-        # Call our (or a subclass's) _setTaskParsObj() method
-        self._setTaskParsObj(theTask)
-
-        # Now go back and ensure we have the full taskname; set up other items
-        self._canceled = False
-        self._executed = False
-        self._guiName = title
-        self.taskName = self._taskParsObj.getName()
-        self.pkgName = self._taskParsObj.getPkgname()
-        theParamList = self._taskParsObj.getParList(docopy=1)
-        self._rcDir = resourceDir
-        self.debug('TASK: '+self.taskName+', PKG: '+self.pkgName+ \
-                   ', RC: '+self._rcDir)
-        # setting _tmwm=1 is the slowest motion, 7 seems OK, 10 maybe too fast
-        self._tmwm = int(os.getenv('TEAL_MOUSE_WHEEL_MULTIPLIER', 7))
-
-        # Get default parameter values for unlearn - watch return value
-        # NOTE - this may edit/reorder the working paramList
-        if not self._setupDefaultParamList():
-            return
-
-        # Ignore the last parameter which is $nargs
-        self.numParams = len(theParamList) - 1
-
-        # Set all default master GUI settings, then
-        # allow subclasses to override them
-        self._appName             = "Par Editor"
-        self._appHelpString       = "No help yet created for this GUI editor"
-        self._useSimpleAutoClose  = False # certain buttons close GUI also
-        self._showExecuteButton   = True
-        self._showSaveCloseOnExec = True
-        self._saveAndCloseOnExec  = True
-        self._showFlaggingChoice  = True
-        self._flagNonDefaultVals  = None # default not yet set
-        self._showExtraHelpButton = False
-        self._showHelpInBrowser   = False
-        self._knowTaskHelpIsHtml  = False
-        self._unpackagedTaskTitle = "Task"
-        self._writeProtectOnSaveAs= True
-        self._defaultsButtonTitle = "Defaults"
-        self._optFile             = DFT_OPT_FILE
-        self._defSaveAsExt        = '.cfg'
-
-        # Colors
-        self._frmeColor = None  # frame of window
-        self._taskColor = None  # task label area
-        self._bboxColor = None  # button area
-        self._entsColor = None  # entries area
-        self._flagColor = "red" # non-default values
-
-        # give the subclass a chance to disagree
-        self._overrideMasterSettings() # give the subclass a chance to disagree
-
-        # any settings which depend on overrides
-        if self._flagNonDefaultVals is None:
-            self._flagNonDefaultVals = self._showFlaggingChoice # default
-
-        # Create the root window as required, but hide it
-        self.parent = parent
-        if self.parent == None:
-            global _default_root
-            if _default_root is None:
-                if PY3K:
-                    import tkinter as Tkinter
-                else:
-                    import Tkinter
-                if not Tkinter._default_root:
-                    _default_root = Tkinter.Tk()
-                    _default_root.withdraw()
-                else:
-                    _default_root = Tkinter._default_root
-
-        # Track whether this is a parent or child window
-        self.isChild = isChild
-
-        # Set up a color for each of the backgrounds
-        if self.isChild:
-        #    self._frmeColor = "LightSteelBlue"
-            self.iconLabel = "EPAR Child"
-        else:
-            self.iconLabel = "EPAR Parent"
-
-        # help windows do not exist yet
-        self.eparHelpWin = None
-        self.irafHelpWin = None
-        self.logHistWin = None
-
-        # no last focus widget
-        self.lastFocusWidget = None
-
-        # Generate the top epar window
-        self.top = top = Toplevel(self.parent,bg=self._frmeColor,visual="best")
-        self.top.withdraw() # hide it while we fill it up with stuff
-
-        if len(self.pkgName):
-            self.updateTitle(self.pkgName+"."+self.taskName)
-        else:
-            self.updateTitle(self.taskName)
-        self.top.iconname(self.iconLabel)
-
-        # Read in the tk options database file
-        try:
-            # User's current directory
-            self.top.option_readfile(os.path.join(os.curdir, self._optFile))
-        except TclError:
-            try:
-                # User's startup directory
-                self.top.option_readfile(os.path.join(userWorkingHome,
-                                                      self._optFile))
-            except TclError:
-                try:
-                    # App default
-                    self.top.option_readfile(os.path.join(self._rcDir,
-                                                          self._optFile))
-                except TclError:
-                    if self._optFile != DFT_OPT_FILE:
-                        pass
-                    else:
-                        raise
-
-        # Create an empty list to hold child dialogs
-        # *** Not a good way, REDESIGN with Mediator!
-        # Also, build the parent menu bar
-        if (self.parent == None):
-            self.top.childList = []
-        elif childList is not None:
-            # all children share a list
-            self.top.childList = childList
-
-        # Build the EPAR menu bar
-        self.makeMenuBar(self.top)
-
-        # Create a spacer
-        Frame(self.top, bg=self._taskColor, height=10).pack(side=TOP, fill=X)
-
-        # Print the package and task names
-        self.printNames(self.top, self.taskName, self.pkgName)
-
-        # Insert a spacer between the static text and the buttons
-        Frame(self.top, bg=self._taskColor, height=15).pack(side=TOP, fill=X)
-
-        # Set control buttons at the top of the frame
-        self.buttonBox(self.top)
-
-        # Insert a spacer between the static text and the buttons
-        Frame(self.top, bg=self._entsColor, height=15).pack(side=TOP, fill=X)
-
-        # Set up an information Frame at the bottom of the EPAR window
-        # RESIZING is currently disabled.
-        # Do this here so when resizing to a smaller sizes, the parameter
-        # panel is reduced - not the information frame.
-        self.top.status = Label(self.top, text="", relief=SUNKEN,
-                                borderwidth=1, anchor=W, bg=self._frmeColor)
-        self.top.status.pack(side=BOTTOM, fill=X, padx=0, pady=3, ipady=3)
-
-        # Set up a Frame to hold a scrollable Canvas
-        self.top.f = frame = Frame(self.top, relief=RIDGE, borderwidth=1,
-                                   bg=self._entsColor)
-
-        # Overlay a Canvas which will hold a Frame
-        self.top.f.canvas = canvas = Canvas(self.top.f, width=100, height=100,
-            takefocus=FALSE, bg=self._entsColor,
-            highlightbackground=self._entsColor)
-#           highlightcolor="black" # black must be the default, since it is blk
-
-        # Always build the scrollbar, even if number of parameters is small,
-        # to allow window to be resized.
-
-        # Attach a vertical Scrollbar to the Frame/Canvas
-        self.top.f.vscroll = Scrollbar(self.top.f, orient=VERTICAL,
-             width=11, relief=SUNKEN, activerelief=RAISED,
-             takefocus=FALSE, bg=self._entsColor)
-        canvas['yscrollcommand'] = self.top.f.vscroll.set
-        self.top.f.vscroll['command'] = canvas.yview
-
-        # Pack the Scrollbar
-        self.top.f.vscroll.pack(side=RIGHT, fill=Y)
-
-        # enable Page Up/Down keys
-        scroll = canvas.yview_scroll
-        top.bind('<Next>', lambda event, fs=scroll: fs(1, "pages"))
-        top.bind('<Prior>', lambda event, fs=scroll: fs(-1, "pages"))
-
-        # make up, down arrows and return/shift-return do same as Tab, Shift-Tab
-        top.bind('<Up>', self.focusPrev)
-        top.bind('<MouseWheel>', self.mwl) # on OSX, rolled up or down
-        top.bind('<Button-4>', self.mwl)   # on Linux, rolled up
-        top.bind('<Button-5>', self.mwl)   # on Linux, rolled down
-        top.bind('<Down>', self.focusNext)
-        top.bind('<Shift-Return>', self.focusPrev)
-        top.bind('<Return>', self.focusNext)
-        try:
-            # special shift-tab binding needed for (some? all?) linux systems
-            top.bind('<KeyPress-ISO_Left_Tab>', self.focusPrev)
-        except TclError:
-            # Ignore exception here, the binding can't be relevant
-            # if ISO_Left_Tab is unknown.
-            pass
-
-        # Pack the Frame and Canvas
-        canvas.pack(side=TOP, expand=TRUE, fill=BOTH)
-        self.top.f.pack(side=TOP, expand=TRUE, fill=BOTH)
-
-        # Define a Frame to contain the parameter information
-        canvas.entries = Frame(canvas, bg=self._entsColor)
-
-        # Generate the window to hold the Frame which sits on the Canvas
-        cWindow = canvas.create_window(0, 0,
-                           anchor=NW,
-                           window=canvas.entries)
-
-        # Insert a spacer between the Canvas and the information frame
-        Frame(self.top, bg=self._entsColor, height=4).pack(side=TOP, fill=X)
-
-        # The parent has the control, unless there are children
-        # Fix the geometry of where the windows first appear on the screen
-        if (self.parent == None):
-            #self.top.grab_set()
-
-            # Position this dialog relative to the parent
-            self.top.geometry("+%d+%d" % (PARENTX, PARENTY))
-        else:
-            #self.parent.grab_release()
-            #self.top.grab_set()
-
-            # Declare the global variables so they can be updated
-            global CHILDX
-            global CHILDY
-
-            # Position this dialog relative to the parent
-            CHILDX = CHILDX + DCHILDX
-            CHILDY = CHILDY + DCHILDY
-            self.top.geometry("+%d+%d" % (CHILDX, CHILDY))
-
-
-        #
-        # Now fill in the Canvas Window
-        #
-
-        # The makeEntries method creates the parameter entry Frame
-        self.makeEntries(canvas.entries, self.top.status)
-
-        # Force an update of the entry Frame
-        canvas.entries.update()
-
-        # Determine the size of the entry Frame
-        width = canvas.entries.winfo_width()
-        height = canvas.entries.winfo_height()
-
-        # Reconfigure the Canvas size based on the Frame.
-        if (self.numParams <= MINPARAMS):
-            viewHeight = height
-        else:
-            # Set the minimum display
-            viewHeight = MINVIEW
-
-        # Scrollregion is based upon the full size of the entry Frame
-        canvas.config(scrollregion=(0, 0, width, height))
-        # Smooth scroll
-        self.yscrollincrement = 5 # changed Mar2010, had been 50 a long time
-        canvas.config(yscrollincrement=self.yscrollincrement)
-
-        # Set the actual viewable region for the Canvas
-        canvas.config(width=width, height=viewHeight)
-
-        # Force an update of the Canvas
-        canvas.update()
-
-        # Associate deletion of the main window to a Abort
-        self.top.protocol("WM_DELETE_WINDOW", self.abort)
-
-        # Trigger all widgets one time before starting in case they have
-        # values which would run a trigger
-        self.checkAllTriggers('init')
-
-        # Set focus to first parameter
-        self.setViewAtTop()
-
-        # Finally show it
-        self.top.update()
-        self.top.deiconify()
-
-        # Enable interactive resizing in height
-        self.top.resizable(width=FALSE, height=TRUE)
-
-        # Limit maximum window height
-        width = self.top.winfo_width()
-        height = self.top.winfo_height() + height - viewHeight
-        self.top.maxsize(width=width, height=height)
-
-        self.debug('showing '+self._appName+' main window')
-
-        # run the mainloop
-        if not self.isChild:
-            self._preMainLoop()
-            self.top.mainloop()
-            self._postMainLoop()
-
-
-    def _overrideMasterSettings(self):
-        """ Hook for subclasses to override some attributes if wished. """
-        return
-
-
-    def _preMainLoop(self):
-        """ Hook for subclasses to override if wished. """
-        return
-
-
-    def _postMainLoop(self):
-        """ Hook for subclasses to override if wished. """
-        return
-
-
-    def _showOpenButton(self):
-        """ Should we show the "Open..." button?  Subclasses override. """
-        return True
-
-
-    def _setTaskParsObj(self, theTask):
-        """ This method, meant to be overridden by subclasses, generates the
-        _taskParsObj object. theTask can often be either a file name or a
-        TaskPars subclass object. """
-
-        # Here we catch if this version is run by accident
-        raise NotImplementedError("EditParDialog is not to be used directly")
-
-
-    def _saveGuiSettings(self):
-        """ Hook for subclasses to save off GUI settings somewhere. """
-        return # skip this by default
-
-
-    def updateTitle(self, atitle):
-        if atitle:
-            self.top.title('%s:  %s' % (self._guiName, atitle))
-        else:
-            self.top.title('%s' % (self._guiName))
-
-
-    def checkAllTriggers(self, action):
-        """ Go over all widgets and let them know they have been edited
-            recently and they need to check for any trigger actions.  This
-            would be used right after all the widgets have their values
-            set or forced (e.g. via setAllEntriesFromParList). """
-        for entry in self.entryNo:
-            entry.widgetEdited(action=action, skipDups=False)
-
-
-    def freshenFocus(self):
-        """ Did something which requires a new look.  Move scrollbar up.
-            This often needs to be delayed a bit however, to let other
-            events in the queue through first. """
-        self.top.update_idletasks()
-        self.top.after(10, self.setViewAtTop)
-
-
-    def setViewAtTop(self):
-        self.entryNo[0].focus_set()
-        self.top.f.canvas.xview_moveto(0.0)
-        self.top.f.canvas.yview_moveto(0.0)
-
-
-    def getTaskParsObj(self):
-        """ Simple accessor.  Return the _taskParsObj object. """
-        return self._taskParsObj
-
-    def mwl(self, event):
-        """Mouse Wheel - under Tkinter we seem to need Tk v8.5+ for this """
-        if event.num == 4: # up on Linux
-            self.top.f.canvas.yview_scroll(-1*self._tmwm, 'units')
-        elif event.num == 5: # down on Linux
-            self.top.f.canvas.yview_scroll(1*self._tmwm, 'units')
-        else: # assume event.delta has the direction, but reversed sign
-            self.top.f.canvas.yview_scroll(-(event.delta)*self._tmwm, 'units')
-
-# A bug appeared in Python 2.3 that caused tk_focusNext and
-# tk_focusPrev to fail. The follwoing two routines now will
-# trap this error and call "fixed" versions of these tk routines
-# instead in the event of such errors.
-
-    def focusNext(self, event):
-        """Set focus to next item in sequence"""
-        try:
-            event.widget.tk_focusNext().focus_set()
-        except TypeError:
-            # see tkinter equivalent code for tk_focusNext to see
-            # commented original version
-            name = event.widget.tk.call('tk_focusNext', event.widget._w)
-            event.widget._nametowidget(str(name)).focus_set()
-
-    def focusPrev(self, event):
-        """Set focus to previous item in sequence"""
-        try:
-            event.widget.tk_focusPrev().focus_set()
-        except TypeError:
-            # see tkinter equivalent code for tk_focusPrev to see
-            # commented original version
-            name = event.widget.tk.call('tk_focusPrev', event.widget._w)
-            event.widget._nametowidget(str(name)).focus_set()
-
-    def doScroll(self, event):
-        """Scroll the panel down to ensure widget with focus to be visible
-
-        Tracks the last widget that doScroll was called for and ignores
-        repeated calls.  That handles the case where the focus moves not
-        between parameter entries but to someplace outside the hierarchy.
-        In that case the scrolling is not expected.
-
-        Returns false if the scroll is ignored, else true.
-        """
-        canvas = self.top.f.canvas
-        widgetWithFocus = event.widget
-        if widgetWithFocus is self.lastFocusWidget:
-            return FALSE
-        self.lastFocusWidget = widgetWithFocus
-        if widgetWithFocus is None:
-            return TRUE
-        # determine distance of widget from top & bottom edges of canvas
-        y1 = widgetWithFocus.winfo_rooty()
-        y2 = y1 + widgetWithFocus.winfo_height()
-        cy1 = canvas.winfo_rooty()
-        cy2 = cy1 + canvas.winfo_height()
-        yinc = self.yscrollincrement
-        if y1<cy1:
-            # this will continue to work when integer division goes away
-            sdist = int((y1-cy1-yinc+1.)/yinc)
-            canvas.yview_scroll(sdist, "units")
-        elif cy2<y2:
-            sdist = int((y2-cy2+yinc-1.)/yinc)
-            canvas.yview_scroll(sdist, "units")
-        return TRUE
-
-
-    def _handleParListMismatch(self, probStr, extra=False):
-        """ Handle the situation where two par lists do not match.
-        This is meant to allow subclasses to override. Note that this only
-        handles "missing" pars and "extra" pars, not wrong-type pars. """
-
-        errmsg = 'ERROR: mismatch between default and current par lists ' + \
-               'for task "'+self.taskName+'"'
-        if probStr:
-            errmsg += '\n\t'+probStr
-        errmsg += '\n(try: "unlearn '+self.taskName+'")'
-        print(errmsg)
-        return False
-
-
-    def _setupDefaultParamList(self):
-        """ This creates self.defaultParamList.  It also does some checks
-        on the paramList, sets its order if needed, and deletes any extra
-        or unknown pars if found. We assume the order of self.defaultParamList
-        is the correct order. """
-
-        # Obtain the default parameter list
-        self.defaultParamList = self._taskParsObj.getDefaultParList()
-        theParamList = self._taskParsObj.getParList()
-
-        # Lengths are probably equal but this isn't necessarily an error
-        # here, so we check for differences below.
-        if len(self.defaultParamList) != len(theParamList):
-            # whoa, lengths don't match (could be some missing or some extra)
-            pmsg = 'Current list not same length as default list'
-            if not self._handleParListMismatch(pmsg):
-                return False
-
-        # convert current par values to a dict of { par-fullname:par-object }
-        # for use below
-        ourpardict = {}
-        for par in theParamList: ourpardict[par.fullName()] = par
-
-        # Sort our paramList according to the order of the defaultParamList
-        # and repopulate the list according to that order. Create sortednames.
-        sortednames = [p.fullName() for p in self.defaultParamList]
-
-        # Rebuild par list sorted into correct order.  Also find/flag any
-        # missing pars or any extra/unknown pars.  This automatically deletes
-        # "extras" by not adding them to the sorted list in the first place.
-        migrated = []
-        newList = []
-        for fullName in sortednames:
-            if fullName in ourpardict:
-                newList.append(ourpardict[fullName])
-                migrated.append(fullName) # make sure all get moved over
-            else: # this is a missing par - insert the default version
-                theDfltVer = \
-                    [p for p in self.defaultParamList if p.fullName()==fullName]
-                newList.append(copy.deepcopy(theDfltVer[0]))
-
-        # Update!  Next line writes to the self._taskParsObj.getParList() obj
-        theParamList[:] = newList # fill with newList, keep same mem pointer
-
-        # See if any got left out
-        extras = [fn for fn in ourpardict if not fn in migrated]
-        for fullName in extras:
-            # this is an extra/unknown par - let subclass handle it
-            if not self._handleParListMismatch('Unexpected par: "'+\
-                        fullName+'"', extra=True):
-                return False
-            print('Ignoring unexpected par: "'+p+'"')
-
-        # return value indicates that all is well to continue
-        return True
-
-
-    # Method to create the parameter entries
-    def makeEntries(self, master, statusBar):
-
-        # Get model data, the list of pars
-        theParamList = self._taskParsObj.getParList()
-
-        # Determine the size of the longest input string
-        inputLength = INPUTWIDTH
-        for i in range(self.numParams):
-            inputString = theParamList[i].name
-            if len(inputString) > inputLength:
-                inputLength = len(inputString)
-
-        # Set up the field widths
-        # Allow extra spaces for buffer and in case the longest parameter
-        # has the hidden parameter indicator
-        self.fieldWidths = {}
-        self.fieldWidths['inputWidth'] = inputLength + 4
-        self.fieldWidths['valueWidth'] = VALUEWIDTH
-        self.fieldWidths['promptWidth'] = PROMPTWIDTH
-
-        # Loop over the parameters to create the entries
-        self.entryNo = [None] * self.numParams
-        dfltsVerb = self._defaultsButtonTitle
-        if dfltsVerb[-1]=='s': dfltsVerb = dfltsVerb[:-1]
-        for i in range(self.numParams):
-            scope = theParamList[i].scope
-            eparOpt = self._nonStandardEparOptionFor(theParamList[i].type)
-            cbo = self._defineEditedCallbackObjectFor(scope,
-                                                      theParamList[i].name)
-            hcbo = None
-            if self._knowTaskHelpIsHtml:
-                hcbo = self
-            self.entryNo[i] = eparoption.eparOptionFactory(master, statusBar,
-                                  theParamList[i], self.defaultParamList[i],
-                                  self.doScroll, self.fieldWidths,
-                                  plugIn=eparOpt, editedCallbackObj=cbo,
-                                  helpCallbackObj=hcbo, mainGuiObj=self,
-                                  defaultsVerb=dfltsVerb, bg=self._entsColor,
-                                  indent = scope not in (None, '', '.'),
-                                  flagging = self._flagNonDefaultVals,
-                                  flaggedColor=self._flagColor)
-
-
-    def _nonStandardEparOptionFor(self, paramTypeStr):
-        """ Hook to allow subclasses to employ their own GUI option type.
-            Return None or a class which derives from EparOption. """
-        return None
-
-
-    def _defineEditedCallbackObjectFor(self, parScope, parName):
-        """ Hook to allow subclasses to set their own callback-containing
-            object to be used when a given option/parameter is edited.
-            See notes in EparOption. """
-        return None
-
-
-    def _isUnpackagedTask(self):
-        """ Hook to allow subclasses to state that this is a rogue task, not
-            affiliated with a specific package, affecting its display. """
-        return self.pkgName == None or len(self.pkgName) < 1
-
-
-    def _toggleSectionActiveState(self, sectionName, state, skipList):
-        """ Make an entire section (minus skipList items) either active or
-            inactive.  sectionName is the same as the param's scope. """
-
-        # Get model data, the list of pars
-        theParamList = self._taskParsObj.getParList()
-
-        # Loop over their assoc. entries
-        for i in range(self.numParams):
-            if theParamList[i].scope == sectionName:
-                if skipList and theParamList[i].name in skipList:
-#                   self.entryNo[i].setActiveState(True) # these always active
-                    pass # if it started active, we don't need to reactivate it
-                else:
-                    self.entryNo[i].setActiveState(state)
-
-
-    # Method to print the package and task names and to set up the menu
-    # button for the choice of the display for the task help page
-    def printNames(self, top, taskName, pkgName):
-
-        topbox = Frame(top, bg=self._taskColor)
-        textbox = Frame(topbox, bg=self._taskColor)
-#       helpbox = Frame(topbox, bg=self._taskColor)
-
-        # Set up the information strings
-        if self._isUnpackagedTask():
-            # label for a parameter list is just filename
-            packString = " "+self._unpackagedTaskTitle+" = "+taskName
-            Label(textbox, text=packString, bg=self._taskColor).pack(side=TOP,
-                  anchor=W)
-        else:
-            # labels for task
-            packString = "  Package = " + pkgName.upper()
-            Label(textbox, text=packString, bg=self._taskColor).pack(side=TOP,
-                  anchor=W)
-
-            taskString = "       Task = " + taskName.upper()
-            Label(textbox, text=taskString, bg=self._taskColor).pack(side=TOP,
-                  anchor=W)
-        textbox.pack(side=LEFT, anchor=W)
-        topbox.pack(side=TOP, expand=FALSE, fill=X)
-
-    # Method to set up the parent menu bar
-    def makeMenuBar(self, top):
-
-        menubar = Frame(top, bd=1, relief=GROOVE, bg=self._frmeColor)
-
-        # Generate the menus
-        fileMenu = self.makeFileMenu(menubar)
-
-        if self._showOpenButton():
-            openMenu = self.makeOpenMenu(menubar)
-
-        # When redesigned, optionsMenu should only be on the parent
-        #if not self.isChild:
-        #    optionsMenu = self.makeOptionsMenu(menubar)
-        optionsMenu = self.makeOptionsMenu(menubar)
-
-        helpMenu = self.makeHelpMenu(menubar)
-
-        menubar.pack(fill=X)
-
-
-    # Method to generate a "File" menu
-    def makeFileMenu(self, menubar):
-
-        fileButton = Menubutton(menubar, text='File', bg=self._frmeColor)
-        fileButton.pack(side=LEFT, padx=2)
-
-        fileButton.menu = Menu(fileButton, tearoff=0)
-
-#       fileButton.menu.add_command(label="Open...", command=self.pfopen)
-
-        if self._showExecuteButton:
-            fileButton.menu.add_command(label="Execute", command=self.execute)
-            if self.isChild:
-                fileButton.menu.entryconfigure(0, state=DISABLED)
-
-        saqlbl ="Save"
-        if self._useSimpleAutoClose: saqlbl += " & Quit"
-        fileButton.menu.add_command(label=saqlbl,
-                                    command=self.saveAndClose)
-        if not self.isChild:
-            fileButton.menu.add_command(label="Save As...", command=self.saveAs)
-        fileButton.menu.add_separator()
-        fileButton.menu.add_command(label=self._defaultsButtonTitle,
-                                    command=self.unlearn)
-        fileButton.menu.add_separator()
-        if not self._useSimpleAutoClose:
-            fileButton.menu.add_command(label="Close", command=self.closeGui)
-        fileButton.menu.add_command(label="Cancel", command=self.abort)
-
-        # Associate the menu with the menu button
-        fileButton["menu"] = fileButton.menu
-
-        return fileButton
-
-    def _updateOpen(self):
-        # Get new data
-        flist = self._getOpenChoices()
-
-        # Delete old choices
-        if self._numOpenMenuItems > 0:
-            self._openMenu.delete(0, self._numOpenMenuItems-1)
-
-        # Add all new choices
-        self._numOpenMenuItems = len(flist)
-        if self._numOpenMenuItems > 0:
-            for ff in flist:
-                if ff[-3:] == '...':
-                    self._openMenu.add_separator()
-                    self._numOpenMenuItems += 1
-                self._openMenu.add_radiobutton(label=ff, command=self.pfopen,
-                                               variable=self._openMenuChoice,
-                                               indicatoron=0)
-                                               # value=ff) ... (same as label)
-            self._openMenuChoice.set(0) # so nothing has check mark next to it
-        else:
-            showwarning(title="No Files To Open", message="No extra "+ \
-                'parameter files found for task "'+self.taskName+'".')
-
-    def _getOpenChoices(self):
-        """ Get the current list of file name choices for the Open button.
-            This is meant for subclasses to override. """
-        return []
-
-    # Method to generate an "Open" menu
-    def makeOpenMenu(self, menubar):
-
-        self._openMenuChoice = StringVar() # this is used till GUI closes
-        self._numOpenMenuItems = 1 # see dummy
-
-        openBtn = Menubutton(menubar, text='Open...', bg=self._frmeColor)
-        openBtn.bind("<Enter>", self.printOpenInfo)
-        openBtn.pack(side=LEFT, padx=2)
-
-        openBtn.menu = Menu(openBtn, tearoff=0, postcommand=self._updateOpen)
-        openBtn.menu.bind("<Enter>", self.printOpenInfo)
-        openBtn.menu.add_radiobutton(label=' ', # dummy, no command
-                                     variable=self._openMenuChoice)
-                                     # value=fname ... (same as label)
-
-        if self.isChild:
-            openBtn.menu.entryconfigure(0, state=DISABLED)
-
-        # Associate the menu with the menu button
-        openBtn["menu"] = openBtn.menu
-
-        # Keep a ref for ourselves
-        self._openMenu = openBtn.menu
-
-        return openBtn
-
-    # Method to generate the "Options" menu for the parent EPAR only
-    def makeOptionsMenu(self, menubar):
-
-        # Set up the menu for the various choices they have
-        self._helpChoice = StringVar()
-        if self._showHelpInBrowser:
-            self._helpChoice.set("BROWSER")
-        else:
-            self._helpChoice.set("WINDOW")
-
-        if self._showSaveCloseOnExec:
-            self._execChoice = IntVar()
-            self._execChoice.set(int(self._saveAndCloseOnExec))
-
-        optionButton = Menubutton(menubar, text="Options", bg=self._frmeColor)
-        optionButton.pack(side=LEFT, padx=2)
-        optionButton.menu = Menu(optionButton, tearoff=0)
-        optionButton.menu.add_radiobutton(label="Display Task Help in a Window",
-                                     value="WINDOW", command=self.setHelpType,
-                                     variable=self._helpChoice)
-        optionButton.menu.add_radiobutton(label="Display Task Help in a Browser",
-                                     value="BROWSER", command=self.setHelpType,
-                                     variable=self._helpChoice)
-
-        if self._showExecuteButton and self._showSaveCloseOnExec:
-            optionButton.menu.add_separator()
-            optionButton.menu.add_checkbutton(label="Save and Close on Execute",
-                                              command=self.setExecOpt,
-                                              variable=self._execChoice)
-        if self._showFlaggingChoice:
-            self._flagChoice = IntVar()
-            self._flagChoice.set(int(self._flagNonDefaultVals))
-            optionButton.menu.add_separator()
-            optionButton.menu.add_checkbutton(label="Flag Non-default Values",
-                                              command=self.setFlagOpt,
-                                              variable=self._flagChoice)
-
-        # Associate the menu with the menu button
-        optionButton["menu"] = optionButton.menu
-
-        return optionButton
-
-    def capTaskName(self):
-        """ Return task name with first letter capitalized. """
-        return self.taskName[:1].upper() + self.taskName[1:]
-
-    def makeHelpMenu(self, menubar):
-
-        button = Menubutton(menubar, text='Help', bg=self._frmeColor)
-        button.bind("<Enter>", self.printHelpInfo)
-        button.pack(side=RIGHT, padx=2)
-        button.menu = Menu(button, tearoff=0)
-        button.menu.bind("<Enter>", self.printHelpInfo)
-        button.menu.add_command(label=self.capTaskName()+" Help",
-                                command=self.showTaskHelp)
-        button.menu.add_command(label=self._appName+" Help",
-                                command=self.eparHelp)
-        button.menu.add_separator()
-        button.menu.add_command(label='Show '+self._appName+' Log',
-                                command=self.showLogHist)
-        button["menu"] = button.menu
-        return button
-
-    # Method to set up the action buttons
-    # Create the buttons in an order for good navigation
-    def buttonBox(self, top):
-
-        box = Frame(top, bg=self._bboxColor, bd=1, relief=SUNKEN)
-
-        # When the Button is exited, the information clears, and the
-        # Button goes back to the nonactive color.
-        top.bind("<Leave>", self.clearInfo)
-
-        # Execute the task
-        if self._showExecuteButton:
-            buttonExecute = Button(box, text="Execute", bg=self._bboxColor,
-                                   relief=RAISED, command=self.execute,
-                                   highlightbackground=self._bboxColor)
-            buttonExecute.pack(side=LEFT, padx=5, pady=7)
-            buttonExecute.bind("<Enter>", self.printExecuteInfo)
-            if not self._useSimpleAutoClose:
-                # separate this button from the others - it's unusual
-                strut = Label(box, text="", bg=self._bboxColor)
-                strut.pack(side=LEFT, padx=20)
-
-            # EXECUTE button is disabled for child windows
-            if self.isChild:
-                buttonExecute.configure(state=DISABLED)
-
-        # Save the parameter settings and exit from epar
-        saqlbl ="Save"
-        if self._useSimpleAutoClose: saqlbl += " & Quit"
-        btn = Button(box, text=saqlbl, relief=RAISED, command=self.saveAndClose,
-                     bg=self._bboxColor, highlightbackground=self._bboxColor)
-        btn.pack(side=LEFT, padx=5, pady=7)
-        btn.bind("<Enter>", self.printSaveQuitInfo)
-
-        # Unlearn all the parameter settings (set back to the defaults)
-        buttonUnlearn = Button(box, text=self._defaultsButtonTitle,
-                               relief=RAISED, command=self.unlearn,
-                               bg=self._bboxColor,
-                               highlightbackground=self._bboxColor)
-        if self._showExtraHelpButton:
-            buttonUnlearn.pack(side=LEFT, padx=5, pady=7)
-        else:
-            buttonUnlearn.pack(side=RIGHT, padx=5, pady=7)
-        buttonUnlearn.bind("<Enter>", self.printUnlearnInfo)
-
-
-        # Buttons to close versus abort this edit session.
-        if not self._useSimpleAutoClose:
-            buttonClose = Button(box, text="Close",
-                                 relief=RAISED, command=self.closeGui,
-                                 bg=self._bboxColor,
-                                 highlightbackground=self._bboxColor)
-            buttonClose.pack(side=LEFT, padx=5, pady=7)
-            buttonClose.bind("<Enter>", self.printCloseInfo)
-
-        buttonAbort = Button(box, text="Cancel", bg=self._bboxColor,
-                             relief=RAISED, command=self.abort,
-                             highlightbackground=self._bboxColor)
-        buttonAbort.pack(side=LEFT, padx=5, pady=7)
-        buttonAbort.bind("<Enter>", self.printAbortInfo)
-
-        # Generate the Help button
-        if self._showExtraHelpButton:
-            buttonHelp = Button(box, text=self.capTaskName()+" Help",
-                                relief=RAISED, command=self.showTaskHelp,
-                                bg=self._bboxColor,
-                                highlightbackground=self._bboxColor)
-            buttonHelp.pack(side=RIGHT, padx=5, pady=7)
-            buttonHelp.bind("<Enter>", self.printHelpInfo)
-
-        # Pack
-        box.pack(fill=X, expand=FALSE)
-
-    def setExecOpt(self, event=None):
-        self._saveAndCloseOnExec = bool(self._execChoice.get())
-
-    def setFlagOpt(self, event=None):
-        self._flagNonDefaultVals = bool(self._flagChoice.get())
-        for entry in self.entryNo:
-            entry.setIsFlagging(self._flagNonDefaultVals, True)
-
-    def setHelpType(self, event=None):
-        """ Determine which method of displaying the help pages was
-        chosen by the user.  WINDOW displays in a task generated scrollable
-        window.  BROWSER invokes the task's HTML help pages and displays
-        in a browser. """
-        self._showHelpInBrowser = bool(self._helpChoice.get() == "BROWSER")
-
-
-    def eparHelp(self, event=None):     self._showAnyHelp('epar')
-    def showTaskHelp(self, event=None): self._showAnyHelp('task')
-    def showParamHelp(self, parName):   self._showAnyHelp('task', tag=parName)
-    def showLogHist(self, event=None):  self._showAnyHelp('log')
-
-
-    #
-    # Define flyover help text associated with the action buttons
-    #
-
-    def clearInfo(self, event):
-        self.showStatus("")
-
-    def printHelpInfo(self, event):
-        self.showStatus("Display the help page", cat=TIP)
-
-    def printUnlearnInfo(self, event):
-        self.showStatus("Set all parameter values to their default settings",
-                        cat=TIP)
-
-    def printSaveQuitInfo(self, event):
-        if self._useSimpleAutoClose:
-            self.showStatus("Save current entries and exit this edit session",
-                            cat=TIP)
-        else:
-            self.showStatus("Save the current entries to "+ \
-                            self._taskParsObj.getFilename(), cat=TIP)
-
-    def printOpenInfo(self, event):
-        self.showStatus(
-            "Load and edit parameter values from a user-specified file",
-            cat=TIP)
-
-    def printCloseInfo(self, event):
-        self.showStatus("Close this edit session.  Save first?", cat=TIP)
-
-    def printAbortInfo(self, event):
-        self.showStatus(
-            "Abort this edit session, discarding any unsaved changes.",cat=TIP)
-
-    def printExecuteInfo(self, event):
-        if self._saveAndCloseOnExec:
-            self.showStatus(
-                 "Execute the task, and save and exit this edit session",
-                 cat=TIP)
-        else:
-            self.showStatus("Execute the task; this window will remain open",
-                            cat=TIP)
-
-
-    # Process invalid input values and invoke a query dialog
-    def processBadEntries(self, badEntriesList, taskname, canCancel=True):
-
-        badEntriesString = "Task " + taskname.upper() + " --\n" \
-            "Invalid values have been entered.\n\n" \
-            "Parameter   Bad Value   Reset Value\n"
-
-        for i in range (len(badEntriesList)):
-            badEntriesString = badEntriesString + \
-                "%15s %10s %10s\n" % (badEntriesList[i][0], \
-                badEntriesList[i][1], badEntriesList[i][2])
-
-        if canCancel:
-            badEntriesString += '\n"OK" to continue using'+ \
-            ' the reset values, or "Cancel" to re-enter values?\n'
-        else:
-            badEntriesString += \
-            "\n All invalid values will return to their 'Reset Value'.\n"
-
-        # Invoke the modal message dialog
-        if canCancel:
-            return askokcancel("Notice", badEntriesString)
-        else:
-            return showwarning("Notice", badEntriesString)
-
-
-    def hasUnsavedChanges(self):
-        """ Determine if there are any edits in the GUI that have not yet been
-        saved (e.g. to a file).  This needs to be overridden by a subclass.
-        In the meantime, just default (on the safe side) to everything being
-        ready-to-save. """
-        return True
-
-
-    def closeGui(self, event=None):
-        self.saveAndClose(askBeforeSave=True, forceClose=True)
-
-
-    # SAVE/QUIT: save the parameter settings and exit epar
-    def saveAndClose(self, event=None, askBeforeSave=False, forceClose=False):
-
-        # First, see if we can/should skip the save
-        doTheSave = True
-        if askBeforeSave:
-            if self.hasUnsavedChanges():
-                doTheSave = askyesno('Save?', 'Save before closing?')
-            else: # no unsaved changes, so no need to save OR even to prompt
-                doTheSave = False # no need to save OR prompt
-
-        # first save the child parameters, aborting save if
-        # invalid entries were encountered
-        if doTheSave and self.checkSetSaveChildren():
-            return
-
-        # Save all the entries and verify them, keeping track of the
-        # invalid entries which have been reset to their original input values
-        self.badEntriesList = None
-        if doTheSave:
-            self.badEntriesList = self.checkSetSaveEntries()
-            # Note, there is a BUG here - if they hit Cancel, the save to
-            # file has occurred anyway (they may not care) - need to refactor.
-
-        # If there were invalid entries, prepare the message dialog
-        if self.badEntriesList:
-            ansOKCANCEL = self.processBadEntries(self.badEntriesList,
-                          self.taskName)
-            if not ansOKCANCEL: return
-
-        # If there were no invalid entries or the user says OK, continue...
-
-        # Save any GUI settings we care about.  This is a good time to do so
-        # even if the window isn't closing, but especially if it is.
-        self._saveGuiSettings()
-
-        # Done saving.  Only close the window if we are running in that mode.
-        if not (self._useSimpleAutoClose or forceClose):
-            return
-
-        # Remove the main epar window
-        self.top.focus_set()
-        self.top.withdraw()
-
-        # If not a child window, quit the entire session
-        if not self.isChild:
-            self.top.destroy()
-            self.top.quit()
-
-        # Declare the global variables so they can be updated
-        global CHILDX
-        global CHILDY
-
-        # Reset to the start location
-        CHILDX = PARENTX
-        CHILDY = PARENTY
-
-
-    # OPEN: load parameter settings from a user-specified file
-    def pfopen(self, event=None):
-        """ Load the parameter settings from a user-specified file.  Any epar
-        changes here should be coordinated with the corresponding tpar pfopen
-        function. """
-        raise NotImplementedError("EditParDialog is not to be used directly")
-
-
-    def _getSaveAsFilter(self):
-        """ Return a string to be used as the filter arg to the save file
-            dialog during Save-As. Override for more specific behavior. """
-        return "*.*"
-
-
-    def _saveAsPreSave_Hook(self, fnameToBeUsed):
-        """ Allow a subclass any specific checks right before the save. """
-        return None
-
-
-    def _saveAsPostSave_Hook(self, fnameToBeUsed):
-        """ Allow a subclass any specific checks right after the save. """
-        return None
-
-
-    # SAVE AS: save the parameter settings to a user-specified file
-    def saveAs(self, event=None):
-        """ Save the parameter settings to a user-specified file.  Any
-        changes here must be coordinated with the corresponding tpar save_as
-        function. """
-
-        self.debug('Clicked Save as...')
-        # On Linux Pers..Dlg causes the cwd to change, so get a copy of current
-        curdir = os.getcwd()
-
-        # The user wishes to save to a different name
-        writeProtChoice = self._writeProtectOnSaveAs
-        if capable.OF_TKFD_IN_EPAR:
-            # Prompt using native looking dialog
-            fname = asksaveasfilename(parent=self.top,
-                    title='Save Parameter File As',
-                    defaultextension=self._defSaveAsExt,
-                    initialdir=os.path.dirname(self._getSaveAsFilter()))
-        else:
-            # Prompt. (could use Tkinter's FileDialog, but this one is prettier)
-            # initWProtState is only used in the 1st call of a session
-            from . import filedlg
-            fd = filedlg.PersistSaveFileDialog(self.top,
-                         "Save Parameter File As", self._getSaveAsFilter(),
-                         initWProtState=writeProtChoice)
-            if fd.Show() != 1:
-                fd.DialogCleanup()
-                os.chdir(curdir) # in case file dlg moved us
-                return
-            fname = fd.GetFileName()
-            writeProtChoice = fd.GetWriteProtectChoice()
-            fd.DialogCleanup()
-
-        if not fname: return # canceled
-
-        # First check the child parameters, aborting save if
-        # invalid entries were encountered
-        if self.checkSetSaveChildren():
-            os.chdir(curdir) # in case file dlg moved us
-            return
-
-        # Run any subclass-specific steps right before the save
-        self._saveAsPreSave_Hook(fname)
-
-        # Verify all the entries (without save), keeping track of the invalid
-        # entries which have been reset to their original input values
-        self.badEntriesList = self.checkSetSaveEntries(doSave=False)
-
-        # If there were invalid entries, prepare the message dialog
-        if self.badEntriesList:
-            ansOKCANCEL = self.processBadEntries(self.badEntriesList,
-                          self.taskName)
-            if not ansOKCANCEL:
-                os.chdir(curdir) # in case file dlg moved us
-                return
-
-        # If there were no invalid entries or the user says OK, finally
-        # save to their stated file.  Since we have already processed the
-        # bad entries, there should be none returned.
-        mstr = "TASKMETA: task="+self.taskName+" package="+self.pkgName
-        if self.checkSetSaveEntries(doSave=True, filename=fname, comment=mstr,
-                                    set_ro=writeProtChoice,
-                                    overwriteRO=True):
-            os.chdir(curdir) # in case file dlg moved us
-            raise Exception("Unexpected bad entries for: "+self.taskName)
-
-        # Run any subclass-specific steps right after the save
-        self._saveAsPostSave_Hook(fname)
-
-        os.chdir(curdir) # in case file dlg moved us
-
-
-    # EXECUTE: save the parameter settings and run the task
-    def execute(self, event=None):
-
-        self.debug('Clicked Execute')
-        # first save the child parameters, aborting save if
-        # invalid entries were encountered
-        if self.checkSetSaveChildren():
-            return
-
-        # If we are only executing (no save and close) do so here and return
-        if not self._saveAndCloseOnExec:
-            # First check the parameter values
-            self.badEntriesList = self.checkSetSaveEntries(doSave=False)
-            # If there were invalid entries, show the message dialog
-            if self.badEntriesList:
-                ansOKCANCEL = self.processBadEntries(self.badEntriesList,
-                              self.taskName)
-                if not ansOKCANCEL: return
-            self.showStatus("Task "+self.taskName+" is running...", keep=2)
-            self._executed = True # note for later use
-            self.runTask()
-            return
-
-        # Now save the parameter values of the parent
-        self.badEntriesList = self.checkSetSaveEntries()
-
-        # If there were invalid entries in the parent epar dialog, prepare
-        # the message dialog
-        if self.badEntriesList:
-            ansOKCANCEL = self.processBadEntries(self.badEntriesList,
-                          self.taskName)
-            if not ansOKCANCEL: return
-
-        # If there were no invalid entries or the user said OK
-
-        # Save any GUI settings we care about since window is closing
-        self._saveGuiSettings()
-
-        # Remove the main epar window
-        self.top.focus_set()
-        self.top.withdraw()
-        self.top.destroy()
-
-        print("\nTask "+self.taskName+" is running...\n")
-
-        # Before running the task, clear any already-handled exceptions that
-        # will be erroneously picked up by the task's logger utility.
-        # This is temporary.  Remove this line when logging is fixed.
-        try:
-            sys.exc_clear() # not present in PY3K
-        except AttributeError:
-            pass
-
-        # Run the task
-        try:
-            self._executed = True # note for later use
-            self.runTask()
-        finally:
-            self.top.quit()
-
-        # Declare the global variables so they can be updated
-        global CHILDX
-        global CHILDY
-
-        # Reset to the start location
-        CHILDX = PARENTX
-        CHILDY = PARENTY
-
-
-    # ABORT: abort this epar session
-    def abort(self, event=None):
-
-        # Declare the global variables so they can be updated
-        global CHILDX
-        global CHILDY
-
-        # Reset to the start location
-        CHILDX = PARENTX
-        CHILDY = PARENTY
-
-        # Give focus back to parent window and abort
-        self.top.focus_set()
-        self.top.withdraw()
-
-        self._canceled = True # note for later use
-
-        # Do not destroy the window, just hide it for now.
-        # This is so EXECUTE will not get an error - properly use Mediator.
-        #self.top.destroy()
-        if not self.isChild:
-            self.top.destroy()
-            self.top.quit()
-
-
-    # UNLEARN: unlearn all the parameters by setting their values
-    # back to the system default
-    def unlearn(self, event=None):
-
-        self.debug('Clicked Unlearn')
-        # Reset the values of the parameters
-        self.unlearnAllEntries(self.top.f.canvas.entries)
-        self.freshenFocus()
-
-
-    # HTMLHELP: invoke the HTML help
-    def htmlHelp(self, helpString=None, title=None, istask=False, tag=None):
-        """ Pop up the help in a browser window.  By default, this tries to
-        show the help for the current task.  With the option arguments, it can
-        be used to show any help string. """
-        # Check the help string.  If it turns out to be a URL, launch that,
-        # if not, dump it to a quick and dirty tmp html file to make it
-        # presentable, and pass that file name as the URL.
-        if not helpString:
-            helpString = self.getHelpString(self.pkgName+'.'+self.taskName)
-        if not title:
-            title = self.taskName
-        lwr = helpString.lower()
-        if lwr.startswith("http:") or lwr.startswith("https:") or \
-           lwr.startswith("file:"):
-            url = helpString
-            if tag and url.find('#') < 0:
-                url += '#'+tag
-#           print('LAUNCHING: '+url) # DBG
-            irafutils.launchBrowser(url, subj=title)
-        else:
-            # Write it to a temp HTML file to display
-            (fd, fname) = tempfile.mkstemp(suffix='.html', prefix='editpar_')
-            os.close(fd)
-            f = open(fname, 'w')
-            if istask and self._knowTaskHelpIsHtml:
-                f.write(helpString)
-            else:
-                f.write('<html><head><title>'+title+'</title></head>\n')
-                f.write('<body><h3>'+title+'</h3>\n')
-                f.write('<pre>\n'+helpString+'\n</pre></body></html>')
-            f.close()
-            irafutils.launchBrowser("file://"+fname, subj=title)
-
-
-    def _showAnyHelp(self, kind, tag=None):
-        """ Invoke task/epar/etc. help and put the page in a window.
-        This same logic is used for GUI help, task help, log msgs, etc. """
-
-        # sanity check
-        assert kind in ('epar', 'task', 'log'), 'Unknown help kind: '+str(kind)
-
-        #-----------------------------------------
-        # See if they'd like to view in a browser
-        #-----------------------------------------
-        if self._showHelpInBrowser or (kind == 'task' and
-                                       self._knowTaskHelpIsHtml):
-            if kind == 'epar':
-                self.htmlHelp(helpString=self._appHelpString,
-                              title='Parameter Editor Help')
-            if kind == 'task':
-                self.htmlHelp(istask=True, tag=tag)
-            if kind == 'log':
-                self.htmlHelp(helpString='\n'.join(self._msgHistory),
-                              title=self._appName+' Event Log')
-            return
-
-        #-----------------------------------------
-        # Now try to pop up the regular Tk window
-        #-----------------------------------------
-        wins = {'epar':self.eparHelpWin,
-                'task':self.irafHelpWin,
-                'log': self.logHistWin, }
-        window = wins[kind]
-        try:
-            if window.state() != NORMAL:
-                window.deiconify()
-            window.tkraise()
-            return
-        except (AttributeError, TclError):
-            pass
-
-        #---------------------------------------------------------
-        # That didn't succeed (window is still None), so build it
-        #---------------------------------------------------------
-        if kind == 'epar':
-            self.eparHelpWin = self.makeHelpWin(self._appHelpString,
-                                                title='Parameter Editor Help')
-        if kind == 'task':
-            # Acquire the task help as a string
-            # Need to include the package name for the task to
-            # avoid name conflicts with tasks from other packages. WJH
-            self.irafHelpWin = self.makeHelpWin(self.getHelpString(
-                                                self.pkgName+'.'+self.taskName))
-        if kind == 'log':
-            self.logHistWin = self.makeHelpWin('\n'.join(self._msgHistory),
-                                               title=self._appName+' Event Log')
-
-
-    def canceled(self):
-        """ Did the user click Cancel? (or close us via the window manager) """
-        return self._canceled
-
-
-    def executed(self):
-        """ Did the user click Execute? """
-        return self._executed
-
-
-    # Get the task help in a string
-    def getHelpString(self, taskname):
-        """ Provide a task-specific help string. """
-        return self._taskParsObj.getHelpAsString()
-
-
-    # Set up the help dialog (browser)
-    def makeHelpWin(self, helpString, title="Parameter Editor Help Browser"):
-
-        # Generate a new Toplevel window for the browser
-        # hb = Toplevel(self.top, bg="SlateGray3")
-        hb = Toplevel(self.top, bg=None)
-        hb.title(title)
-        hb.iconLabel = title
-
-        # Set up the Menu Bar
-        hb.menubar = Frame(hb, relief=RIDGE, borderwidth=0)
-        hb.menubar.button = Button(hb.menubar, text="Close",
-                                     relief=RAISED,
-                                     command=hb.destroy)
-        hb.menubar.button.pack()
-        hb.menubar.pack(side=BOTTOM, padx=5, pady=5)
-
-        # Define the Frame for the scrolling Listbox
-        hb.frame = Frame(hb, relief=RIDGE, borderwidth=1)
-
-        # Attach a vertical Scrollbar to the Frame
-        hb.frame.vscroll = Scrollbar(hb.frame, orient=VERTICAL,
-                 width=11, relief=SUNKEN, activerelief=RAISED,
-                 takefocus=FALSE)
-
-        # Define the Listbox and setup the Scrollbar
-        hb.frame.list = Listbox(hb.frame,
-                                relief=FLAT,
-                                height=25,
-                                width=80,
-                                takefocus=FALSE,
-                                selectmode=SINGLE,
-                                selectborderwidth=0)
-        hb.frame.list['yscrollcommand'] = hb.frame.vscroll.set
-
-        hb.frame.vscroll['command'] = hb.frame.list.yview
-        hb.frame.vscroll.pack(side=RIGHT, fill=Y)
-        hb.frame.list.pack(side=TOP, expand=TRUE, fill=BOTH)
-        hb.frame.pack(side=TOP, fill=BOTH, expand=TRUE)
-
-        # Insert each line of the helpString onto the Frame
-        listing = helpString.split('\n')
-        for line in listing:
-
-            # Filter the text *** DO THIS A BETTER WAY ***
-            line = line.replace("\x0e", "")
-            line = line.replace("\x0f", "")
-            line = line.replace("\f", "")
-
-            # Insert the text into the Listbox
-            hb.frame.list.insert(END, line)
-
-        # When the Listbox appears, the listing will be at the beginning
-        y = hb.frame.vscroll.get()[0]
-        hb.frame.list.yview(int(y))
-
-        # enable Page Up/Down keys
-        scroll = hb.frame.list.yview_scroll
-        hb.bind('<Next>', lambda event, fs=scroll: fs(1, "pages"))
-        hb.bind('<Prior>', lambda event, fs=scroll: fs(-1, "pages"))
-
-        # Position this dialog relative to the parent
-        hb.geometry("+%d+%d" % (self.top.winfo_rootx() + HELPX,
-                                     self.top.winfo_rooty() + HELPY))
-        return hb
-
-    def validate(self):
-
-        return 1
-
-
-    def setAllEntriesFromParList(self, aParList, updateModel=False):
-        """ Set all the parameter entry values in the GUI to the values
-            in the given par list. If 'updateModel' is True, the internal
-            param list will be updated to the new values as well as the GUI
-            entries (slower and not always necessary). Note the
-            corresponding TparDisplay method. """
-
-        # Get model data, the list of pars
-        theParamList = self._taskParsObj.getParList() # we may modify members
-
-        if len(aParList) != len(theParamList):
-            showwarning(message="Attempting to set parameter values from a "+ \
-                        "list of different length ("+str(len(aParList))+ \
-                        ") than the number shown here ("+ \
-                        str(len(theParamList))+").  Be aware.",
-                        title="Parameter List Length Mismatch")
-
-        # LOOP THRU GUI PAR LIST
-        for i in range(self.numParams):
-            par = theParamList[i]
-            if par.type == "pset":
-                continue # skip PSET's for now
-            gui_entry = self.entryNo[i]
-
-            # Set the value in the paramList before setting it in the GUI
-            # This may be in the form of a list, or an IrafParList (getValue)
-            if isinstance(aParList, list):
-                # Since "aParList" can have them in different order and number
-                # than we do, we'll have to first find the matching param.
-                found = False
-                for newpar in aParList:
-                    if newpar.name==par.name and newpar.scope==par.scope:
-                        par.set(newpar.value) # same as .get(native=1,prompt=0)
-                        found = True
-                        break
-
-                # Now see if newpar was found in our list
-                if not found:
-                    pnm = par.name
-                    if len(par.scope): pnm = par.scope+'.'+par.name
-                    raise UnfoundParamError('Error - Unfound Parameter! \n\n'+\
-                      'Expected parameter "'+pnm+'" for task "'+ \
-                      self.taskName+'". \nThere may be others...')
-
-            else: # assume has getValue()
-                par.set(aParList.getValue(par.name, native=1, prompt=0))
-
-            # gui holds a str, but par.value is native; conversion occurs
-            gui_entry.forceValue(par.value, noteEdited=False) # no triggers yet
-
-        if updateModel:
-            # Update the model values via checkSetSaveEntries
-            self.badEntriesList = self.checkSetSaveEntries(doSave=False)
-
-            # If there were invalid entries, prepare the message dialog
-            if self.badEntriesList:
-                self.processBadEntries(self.badEntriesList,
-                                       self.taskName, canCancel=False)
-
-
-    def unlearnAllEntries(self, master):
-        """ Method to "unlearn" all the parameter entry values in the GUI
-            and set the parameter back to the default value """
-        for entry in self.entryNo:
-            entry.unlearnValue()
-
-
-    def getValue(self, name, scope=None, native=False):
-        """ Return current par value from the GUI. This does not do any
-        validation, and it it not necessarily the same value saved in the
-        model, which is always behind the GUI setting, in time. This is NOT
-        to be used to get all the values - it would not be efficient. """
-
-        # Get model data, the list of pars
-        theParamList = self._taskParsObj.getParList()
-
-        # NOTE: If par scope is given, it will be used, otherwise it is
-        # assumed to be unneeded and the first name-match is returned.
-        fullName = basicpar.makeFullName(scope, name)
-
-        # Loop over the parameters to find the requested par
-        for i in range(self.numParams):
-            par = theParamList[i] # IrafPar or subclass
-            entry = self.entryNo[i] # EparOption or subclass
-            if par.fullName() == fullName or \
-               (scope == None and par.name == name):
-                if native:
-                    return entry.convertToNative(entry.choice.get())
-                else:
-                    return entry.choice.get()
-        # We didn't find the requested par
-        raise RuntimeError('Could not find par: "'+fullName+'"')
-
-
-    # Read, save, and validate the entries
-    def checkSetSaveEntries(self, doSave=True, filename=None, comment=None,
-                            fleeOnBadVals=False, allowGuiChanges=True,
-                            set_ro=False, overwriteRO=False):
-
-        self.badEntries = []
-        asNative = self._taskParsObj.knowAsNative()
-
-        # Get model data, the list of pars
-        theParamList = self._taskParsObj.getParList()
-
-        # Loop over the parameters to obtain the modified information
-        for i in range(self.numParams):
-
-            par = theParamList[i] # IrafPar or subclass
-            entry = self.entryNo[i] # EparOption or subclass
-            # Cannot change an entry if it is a PSET, just skip
-            if par.type == "pset":
-                continue
-
-            # get current state of par in the gui
-            value = entry.choice.get()
-
-            # Set new values for changed parameters - a bit tricky,
-            # since changes that weren't followed by a return or
-            # tab have not yet been checked.  If we eventually
-            # use a widget that can check all changes, we will
-            # only need to check the isChanged flag.
-            if par.isChanged() or value != entry.previousValue:
-
-                # CHECK: Verify the value. If its invalid (and allowGuiChanges),
-                # the value will be converted to its original valid value.
-                # Maintain a list of the reset values for user notification.
-                # Always call entryCheck, no matter what type of _taskParsObj,
-                # since entryCheck can do some basic type checking.
-                failed = False
-                if entry.entryCheck(repair=allowGuiChanges):
-                    failed = True
-                    self.badEntries.append([entry.name, value,
-                                           entry.choice.get()])
-                    if fleeOnBadVals: return self.badEntries
-                # See if we need to do a more serious validity check
-                elif self._taskParsObj.canPerformValidation():
-                    # if we are planning to save in native type, test that way
-                    if asNative:
-                        try:
-                            value = entry.convertToNative(value)
-                        except:
-                            failed = True
-                            prev = entry.previousValue
-                            self.badEntries.append([entry.name, value, prev])
-                            if fleeOnBadVals: return self.badEntries
-                            if allowGuiChanges: entry.choice.set(prev)
-                    # now try the val in it's validator
-                    if not failed:
-                        valOK, prev = self._taskParsObj.tryValue(entry.name,
-                                                        value, scope=par.scope)
-                        if not valOK:
-                            failed = True
-                            self.badEntries.append([entry.name,str(value),prev])
-                            if fleeOnBadVals: return self.badEntries
-                            if allowGuiChanges: entry.choice.set(prev)
-
-                # get value again in case it changed - this version IS valid
-                value = entry.choice.get()
-                if asNative: value = entry.convertToNative(value)
-
-                # SET: Update the task parameter (also does the conversion
-                # from string)
-                self._taskParsObj.setParam(par.name, value, scope=par.scope,
-                                           check=0, idxHint=i)
-
-        # SAVE: Save results to the given file
-        if doSave:
-            self.debug('Saving...')
-            out = self._doActualSave(filename, comment, set_ro=set_ro,
-                                     overwriteRO=overwriteRO)
-            if len(out):
-                self.showStatus(out, keep=2) # inform user on saves
-
-        return self.badEntries
-
-
-    def _doActualSave(self, fname, comment, set_ro=False):
-        """ Here we call the method on the _taskParsObj to do the actual
-        save.  Return a string result to be printed to the screen. """
-        # do something like
-#       return self._taskParsObj.saveParList(filename=fname, comment=comment)
-        raise NotImplementedError("EditParDialog is not to be used directly")
-
-
-    def checkSetSaveChildren(self, doSave=True):
-        """Check, then set, then save the parameter settings for
-        all child (pset) windows.
-
-        Prompts if any problems are found.  Returns None
-        on success, list of bad entries on failure.
-        """
-        if self.isChild:
-            return
-
-        # Need to get all the entries and verify them.
-        # Save the children in backwards order to coincide with the
-        # display of the dialogs (LIFO)
-        for n in range (len(self.top.childList)-1, -1, -1):
-            self.badEntriesList = self.top.childList[n]. \
-                                  checkSetSaveEntries(doSave=doSave)
-            if self.badEntriesList:
-                ansOKCANCEL = self.processBadEntries(self.badEntriesList,
-                              self.top.childList[n].taskName)
-                if not ansOKCANCEL:
-                    return self.badEntriesList
-            # If there were no invalid entries or the user says OK,
-            # close down the child and increment to the next child
-            self.top.childList[n].top.focus_set()
-            self.top.childList[n].top.withdraw()
-            del self.top.childList[n]
-        # all windows saved successfully
-        return
-
-
-    def _pushMessages(self):
-        """ Internal callback used to make sure the msg list keeps moving. """
-        # This continues to get itself called until no msgs are left in list.
-        self.showStatus('')
-        if len(self._statusMsgsToShow) > 0:
-            self.top.after(200, self._pushMessages)
-
-
-    def debug(self, msg):
-        """ Convenience function.  Use showStatus without puting into GUI. """
-        self.showStatus(msg, cat=DBG)
-
-
-    def showStatus(self, msg, keep=0, cat=None):
-        """ Show the given status string, but not until any given delay from
-            the previous message has expired. keep is a time (secs) to force
-            the message to remain without being overwritten or cleared. cat
-            is a string category used only in the historical log. """
-        # prep it, space-wise
-        msg = msg.strip()
-        if len(msg) > 0:
-            # right here is the ideal place to collect a history of messages
-            forhist = msg
-            if cat: forhist = '['+cat+'] '+msg
-            forhist = time.strftime("%a %H:%M:%S")+': '+forhist
-            self._msgHistory.append(forhist)
-            # now set the spacing
-            msg = '  '+msg
-
-        # stop here if it is a category not shown in the GUI
-        if cat == DBG:
-            return
-
-        # see if we can show it
-        now = time.time()
-        if now >= self._leaveStatusMsgUntil: # we are clear, can show a msg
-            # first see if this msg is '' - if so we will show an important
-            # waiting msg instead of the '', and then pop it off our list
-            if len(msg) < 1 and len(self._statusMsgsToShow) > 0:
-                msg, keep = self._statusMsgsToShow[0] # overwrite both args
-                del self._statusMsgsToShow[0]
-            # now actuall print the status out to the status widget
-            self.top.status.config(text = msg)
-            # reset our delay flag
-            self._leaveStatusMsgUntil = 0
-            if keep > 0:
-                self._leaveStatusMsgUntil = now + keep
-        else:
-            # there is a previous message still up, is this one important?
-            if len(msg) > 0 and keep > 0:
-                # Uh-oh, this is an important message that we don't want to
-                # simply skip, but on the other hand we can't show it yet...
-                # So we add it to _statusMsgsToShow and show it later (asap)
-                if (msg,keep) not in self._statusMsgsToShow:
-                    if len(self._statusMsgsToShow) < 7:
-                        self._statusMsgsToShow.append( (msg,keep) ) # tuple
-                        # kick off timer loop to get this one pushed through
-                        if len(self._statusMsgsToShow) == 1:
-                            self._pushMessages()
-                    else:
-                        # should never happen, but just in case
-                        print("Lost message!: "+msg+" (too far behind...)")
-
-    # Run the task
-    def runTask(self):
-
-        # Use the run method of the IrafTask class
-        # Set mode='h' so it does not prompt for parameters (like IRAF epar)
-        # Also turn on parameter saving
-        try:
-            self._taskParsObj.run(mode='h', _save=1)
-        except taskpars.NoExecError as nee:  # catch only this, let all else thru
-            showwarning(message="No way found to run task\n\n"+\
-                        str(nee), title="Can Not Run Task")
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/eparoption.py b/required_pkgs/stsci.tools/lib/stsci/tools/eparoption.py
deleted file mode 100644
index c9264ad..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/eparoption.py
+++ /dev/null
@@ -1,937 +0,0 @@
-"""eparoption.py: module for defining the various parameter display
-   options to be used for the parameter editor task.  The widget that is used
-   for entering the parameter value is the variant.  Instances should be
-   created using the eparOptionFactory function defined at the end of the
-   module.
-
-   Parameter types:
-   string  - Entry widget
-   *gcur   - NOT IMPLEMENTED AT THIS TIME
-   ukey    - NOT IMPLEMENTED AT THIS TIME
-   pset    - Action button
-   real    - Entry widget
-   int     - Entry widget
-   boolean - Radiobutton widget
-   array real - NOT IMPLEMENTED AT THIS TIME
-   array int  - NOT IMPLEMENTED AT THIS TIME
-
-   Enumerated lists - Menubutton/Menu widget
-
-$Id: eparoption.py 38909 2015-04-08 17:41:07Z bsimon $
-
-M.D. De La Pena, 1999 August 05
-"""
-from __future__ import absolute_import, division, print_function # confidence high
-
-# System level modules
-import sys, string
-from . import capable
-
-PY3K = sys.version_info[0] > 2
-
-if capable.OF_GRAPHICS:
-    if PY3K:
-        from tkinter import *
-        from tkinter.filedialog import askdirectory, askopenfilename
-    else:
-        from Tkinter import *
-        from tkFileDialog import askdirectory, askopenfilename
-
-else:
-    StringVar = None
-
-# Are we using X? (see description of logic in pyraf's wutil.py)
-USING_X = True
-if sys.platform == 'darwin':
-    junk = ",".join(sys.path)
-    USING_X = junk.lower().find('/pyobjc') < 0
-    del junk
-
-# Constants
-MAXLIST  =  15
-MAXLINES = 100
-XSHIFT   = 110
-DSCRPTN_FLAG = ' (***)'
-
-
-class EparOption(object):
-
-    """EparOption base class
-
-    Implementation for a specific parameter type must implement
-    the makeInputWidget method and must create an attribute `entry'
-    with the base widget created.  The entry widget is used for
-    focus setting and automatic scrolling.  doScroll is a callback to
-    do the scrolling when tab changes focus.
-    """
-
-    # Chosen option
-    choiceClass = StringVar
-
-    def __init__(self, master, statusBar, paramInfo, defaultParamInfo,
-                 doScroll, fieldWidths, defaultsVerb, bg,
-                 indent=False, helpCallbackObj=None, mainGuiObj=None):
-
-        # Connect to the information/status Label
-        self.status = statusBar
-
-        # Hook to allow scroll when this widget gets focus
-        self.doScroll = doScroll
-        # Track selection at the last FocusOut event
-        self.lastSelection = (0,END)
-
-        # A new Frame is created for each parameter entry
-        self.master       = master
-        self.bkgColor     = bg
-        self.master_frame = Frame(self.master, bg=self.bkgColor)
-        self.paramInfo    = paramInfo
-        self.defaultParamInfo = defaultParamInfo
-        self.defaultsVerb = defaultsVerb
-        self.inputWidth   = fieldWidths.get('inputWidth')
-        self.valueWidth   = fieldWidths.get('valueWidth')
-        self.promptWidth  = fieldWidths.get('promptWidth')
-
-        self.choice = self.choiceClass(self.master_frame)
-        self.name = self.paramInfo.name
-        self.value = self.paramInfo.get(field = "p_filename", native = 0,
-                                        prompt = 0)
-        self.previousValue = self.value
-        self._editedCallbackObj = None
-        self._helpCallbackObj = helpCallbackObj
-        self._mainGuiObj = mainGuiObj
-        self._lastWidgetEditedVal = None
-        self._flagNonDefaultVals = False
-        self._flaggedColor = "red"
-
-        # DISABLE any indent for now - not sure why but this causes odd text
-        # field sizes in other (unrelated and unindented) parameters...  Maybe
-        # because it messes with the total width of the window...
-        if 0 and indent:
-            self.spacer = Label(self.master_frame, anchor=W, takefocus=0,
-                                text="", width=3, bg=self.bkgColor)
-            self.spacer.pack(side=LEFT, fill=X, expand=TRUE)
-
-        # Generate the input label
-        if self.paramInfo.get(field = "p_mode") == "h":
-            self.inputLabel = Label(self.master_frame, anchor = W,
-                                    text  = "("+self.getShowName()+")",
-                                    width = self.inputWidth, bg=self.bkgColor)
-        else:
-            self.inputLabel = Label(self.master_frame, anchor = W,
-                                    text  = self.getShowName(),
-                                    width = self.inputWidth, bg=self.bkgColor)
-        self.inputLabel.pack(side = LEFT, fill = X, expand = TRUE)
-
-        # Get the prompt string and determine if special handling is needed
-        # Use the prompt/description from the default version, in case they
-        # have edited theirs - this is not editable - see ticket #803
-        self.prompt = self.defaultParamInfo.get(field="p_prompt", native=0,
-                                                prompt=0)
-
-        # Check the prompt to determine how many lines of valid text exist
-        lines       = self.prompt.split("\n")
-        nlines      = len(lines)
-        promptLines = " " + lines[0]
-        infoLines   = ""
-        blankLineNo = MAXLINES
-        if (nlines > 1):
-            # Keep all the lines of text before the blank line for the prompt
-            for i in range(1, nlines):
-                ntokens = lines[i].split()
-                if ntokens != []:
-                    promptLines = "\n".join([promptLines, lines[i]])
-                else:
-                    blankLineNo = i
-                    break
-        self._flagged = False
-        if promptLines.endswith(DSCRPTN_FLAG):
-            promptLines = promptLines[:-len(DSCRPTN_FLAG)]
-            self._flagged = True
-        fgColor = "black"
-        # turn off this red coloring for the DSCRPTN_FLAG - see #803
-#       if self._flagged: fgColor = "red"
-
-        # Generate the prompt label
-        self.promptLabel = Label(self.master_frame, anchor=W, fg=fgColor,
-                                 text=promptLines, width=self.promptWidth,
-                                 bg=self.bkgColor)
-        self.promptLabel.pack(side=RIGHT, fill=X, expand=TRUE)
-
-        # Settings for subclasses to override in the makeInputWidget method
-        self.isSelectable = True # ie widget has text (num or str) to select
-
-        # Default is none of items on popup menu are activated
-        # These can be changed by the makeInputWidget method to customize
-        # behavior for each widget.
-        self.browserEnabled = DISABLED
-        self.clearEnabled = DISABLED
-        self.unlearnEnabled = DISABLED
-        self.helpEnabled = DISABLED
-        if self._helpCallbackObj != None:
-            self.helpEnabled = NORMAL
-
-        # Generate the input widget depending upon the datatype
-        self.makeInputWidget()
-#       print(self.name, self.__class__) # DBG line
-
-        self.entry.bind('<FocusOut>', self.focusOut, "+")
-        self.entry.bind('<FocusIn>', self.focusIn, "+")
-
-        # Trap keys that leave field and validate entry
-        self.entry.bind('<Return>', self.entryCheck, "+")
-        self.entry.bind('<Shift-Return>', self.entryCheck, "+")
-        self.entry.bind('<Tab>', self.entryCheck, "+")
-        self.entry.bind('<Shift-Tab>', self.entryCheck, "+")
-        self.entry.bind('<Up>', self.entryCheck, "+")
-        self.entry.bind('<Down>', self.entryCheck, "+")
-        try:
-            # special shift-tab binding needed for (some? all?) linux systems
-            self.entry.bind('<KeyPress-ISO_Left_Tab>', self.entryCheck, "+")
-        except TclError:
-            # Ignore exception here, the binding can't be relevant
-            # if ISO_Left_Tab is unknown.
-            pass
-
-        # Bind the right button to a popup menu of choices
-        if USING_X:
-            self.entry.bind('<Button-3>', self.popupChoices)
-        else:
-            self.entry.bind('<Button-2>', self.popupChoices)
-
-        # Pack the parameter entry Frame
-        self.master_frame.pack(side=TOP, fill=X, ipady=1)
-
-        # If there is more text associated with this entry, join all the
-        # lines of text with the blank line.  This is the "special" text
-        # information.
-        if (blankLineNo < (nlines - 1)):
-
-            # Put the text after the blank line into its own Frame
-            self.master.infoText = Frame(self.master)
-
-            for j in range(blankLineNo + 1, nlines):
-                ntokens = lines[j].split()
-                if ntokens != []:
-                    infoLines = "\n".join([infoLines, lines[j]])
-                else:
-                    break
-
-            # Assign the informational text to the label and pack
-            self.master.infoText.label = Label(self.master.infoText,
-                                               text = infoLines,
-                                               anchor = W,
-                                               bg = self.bkgColor)
-            self.master.infoText.label.pack(side = LEFT)
-            self.master.infoText.pack(side = TOP, anchor = W)
-
-    def setFlaggedColor(self, colorstr):
-        self._flaggedColor = colorstr
-
-    def setIsFlagging(self, isFlagging, redrawImmediately):
-        self._flagNonDefaultVals = isFlagging
-        if redrawImmediately:
-            if self._flagNonDefaultVals:
-                curVal = self.choice.get()
-            else: # otheriwse we don't care; use None; is ok and faster
-                curVal = None
-            self.flagThisPar(curVal, True)
-
-    def getShowName(self):
-        """ Return the name to be shown in the GUI for this par/option. """
-        return self.name
-
-    def extraBindingsForSelectableText(self):
-        """ Collect in 1 place the bindings needed for watchTextSelection() """
-        # See notes in watchTextSelection
-        self.entry.bind('<FocusIn>', self.watchTextSelection, "+")
-        self.entry.bind('<ButtonRelease-1>', self.watchTextSelection, "+")
-        self.entry.bind('<B1-Motion>', self.watchTextSelection, "+")
-        self.entry.bind('<Shift_L>', self.watchTextSelection, "+")
-        self.entry.bind('<Left>', self.watchTextSelection, "+")
-        self.entry.bind('<Right>', self.watchTextSelection, "+")
-
-    def convertToNative(self, aVal):
-        """ The basic type is natively a string. """
-        if aVal == None: return None
-        return str(aVal)
-
-    def focusOut(self, event=None):
-        """Clear selection (if text is selected in this widget)"""
-        # do nothing if this isn't a text-enabled widget
-        if not self.isSelectable:
-            return
-        if self.entryCheck(event) is None:
-            # Entry value is OK
-            # Save the last selection so it can be restored if we
-            # come right back to this widget.  Then clear the selection
-            # before moving on.
-            entry = self.entry
-            try:
-                if not entry.selection_present():
-                    self.lastSelection = None
-                else:
-                    self.lastSelection = (entry.index(SEL_FIRST),
-                                          entry.index(SEL_LAST))
-            except AttributeError:
-                pass
-            if USING_X and sys.platform == 'darwin':
-                pass # do nothing here - we need it left selected for cut/paste
-            else:
-                entry.selection_clear()
-        else:
-            return "break"
-
-    def watchTextSelection(self, event=None):
-        """ Callback used to see if there is a new text selection. In certain
-        cases we manually add the text to the clipboard (though on most
-        platforms the correct behavior happens automatically). """
-        # Note that this isn't perfect - it is a key click behind when
-        # selections are made via shift-arrow.  If this becomes important, it
-        # can likely be fixed with after().
-        if self.entry.selection_present(): # entry must be text entry type
-            i1 = self.entry.index(SEL_FIRST)
-            i2 = self.entry.index(SEL_LAST)
-            if i1 >= 0 and i2 >= 0 and i2 > i1:
-                sel = self.entry.get()[i1:i2]
-                # Add to clipboard on platforms where necessary.
-                print('selected: "'+sel+'"')
-#               The following is unneeded if the selected text stays selected
-#               when focus is lost or another app is bought to the forground.
-#               if sel and USING_X and sys.platform == 'darwin':
-#                   clipboard_helper.put(sel, 'PRIMARY')
-
-    def focusIn(self, event=None):
-        """Select all text (if applicable) on taking focus"""
-        try:
-            # doScroll returns false if the call was ignored because the
-            # last call also came from this widget.  That avoids unwanted
-            # scrolls and text selection when the focus moves in and out
-            # of the window.
-            if self.doScroll(event):
-                self.entry.selection_range(0, END) # select all text in widget
-            else:
-                # restore selection to what it was on the last FocusOut
-                if self.lastSelection:
-                    self.entry.selection_range(*self.lastSelection)
-        except AttributeError:
-            pass
-
-    # Check the validity of the entry
-    # If valid, changes the value of the parameter (note that this
-    # is a copy, so change is not permanent until save)
-    # Parameter change also sets the isChanged flag.
-    def entryCheck(self, event=None, repair=True):
-
-        # Make sure the input is legal
-        value = self.choice.get()
-        try:
-            if value != self.previousValue:
-                # THIS will likely get into IrafPar's _coerceOneValue()
-                self.paramInfo.set(value)
-            # fire any applicable triggers, whether value has changed or not
-            self.widgetEdited(action='entry')
-            return None
-        except ValueError as exceptionInfo:
-            # Reset the entry to the previous (presumably valid) value
-            if repair:
-                self.choice.set(self.previousValue)
-                self.status.bell()
-            errorMsg = str(exceptionInfo)
-            if event != None:
-                self.status.config(text = errorMsg)
-            # highlight the text again and terminate processing so
-            # focus stays in this widget
-            self.focusIn(event)
-            return "break"
-
-
-    def widgetEdited(self, event=None, val=None, action='entry', skipDups=True):
-        """ A general method for firing any applicable triggers when
-            a value has been set.  This is meant to be easily callable from any
-            part of this class (or its subclasses), so that it can be called
-            as soon as need be (immed. on click?).  This is smart enough to
-            be called multiple times, itself handling the removal of any/all
-            duplicate successive calls (unless skipDups is False). If val is
-            None, it will use the GUI entry's current value via choice.get().
-            See teal.py for a description of action.
-        """
-
-        # be as lightweight as possible if obj doesn't care about this stuff
-        if not self._editedCallbackObj and not self._flagNonDefaultVals:
-            return
-
-        # get the current value
-        curVal = val # take this first, if it is given
-        if curVal == None:
-            curVal = self.choice.get()
-
-        # do any flagging
-        self.flagThisPar(curVal, False)
-
-        # see if this is a duplicate successive call for the same value
-        if skipDups and curVal==self._lastWidgetEditedVal: return
-
-        # pull trigger
-        if not self._editedCallbackObj: return
-        self._editedCallbackObj.edited(self.paramInfo.scope,
-                                       self.paramInfo.name,
-                                       self.previousValue, curVal,
-                                       action)
-        # for our duplicate checker
-        self._lastWidgetEditedVal = curVal
-
-
-    def focus_set(self, event=None):
-        """Set focus to input widget"""
-        self.entry.focus_set()
-
-
-    # Generate the the input widget as appropriate to the parameter datatype
-    def makeInputWidget(self):
-        pass
-
-    def popupChoices(self, event=None):
-        """Popup right-click menu of special parameter operations
-
-        Relies on browserEnabled, clearEnabled, unlearnEnabled, helpEnabled
-        instance attributes to determine which items are available.
-        """
-        # don't bother if all items are disabled
-        if NORMAL not in (self.browserEnabled, self.clearEnabled,
-                          self.unlearnEnabled, self.helpEnabled):
-            return
-
-        self.menu = Menu(self.entry, tearoff = 0)
-        if self.browserEnabled != DISABLED:
-            # Handle file and directory in different functions (tkFileDialog)
-            if capable.OF_TKFD_IN_EPAR:
-                self.menu.add_command(label   = "File Browser",
-                                      state   = self.browserEnabled,
-                                      command = self.fileBrowser)
-                self.menu.add_command(label   = "Directory Browser",
-                                      state   = self.browserEnabled,
-                                      command = self.dirBrowser)
-            # Handle file and directory in the same function (filedlg)
-            else:
-                self.menu.add_command(label   = "File/Directory Browser",
-                                      state   = self.browserEnabled,
-                                      command = self.fileBrowser)
-            self.menu.add_separator()
-        self.menu.add_command(label   = "Clear",
-                              state   = self.clearEnabled,
-                              command = self.clearEntry)
-        self.menu.add_command(label   = self.defaultsVerb,
-                              state   = self.unlearnEnabled,
-                              command = self.unlearnValue)
-        self.menu.add_command(label   = 'Help',
-                              state   = self.helpEnabled,
-                              command = self.helpOnParam)
-
-        # Get the current y-coordinate of the Entry
-        ycoord = self.entry.winfo_rooty()
-
-        # Get the current x-coordinate of the cursor
-        xcoord = self.entry.winfo_pointerx() - XSHIFT
-
-        # Display the Menu as a popup as it is not associated with a Button
-        self.menu.tk_popup(xcoord, ycoord)
-
-    def fileBrowser(self):
-        """Invoke a Tkinter file dialog"""
-        if capable.OF_TKFD_IN_EPAR:
-           fname = askopenfilename(parent=self.entry, title="Select File")
-        else:
-            from . import filedlg
-            self.fd = filedlg.PersistLoadFileDialog(self.entry,
-                              "Select File", "*")
-            if self.fd.Show() != 1:
-                self.fd.DialogCleanup()
-                return
-            fname = self.fd.GetFileName()
-            self.fd.DialogCleanup()
-        if not fname: return # canceled
-
-        self.choice.set(fname)
-        # don't select when we go back to widget to reduce risk of
-        # accidentally typing over the filename
-        self.lastSelection = None
-
-    def dirBrowser(self):
-        """Invoke a Tkinter directory dialog"""
-        if capable.OF_TKFD_IN_EPAR:
-            fname = askdirectory(parent=self.entry, title="Select Directory")
-        else:
-            raise NotImplementedError('Fix popupChoices() logic.')
-        if not fname: return # canceled
-
-        self.choice.set(fname)
-        # don't select when we go back to widget to reduce risk of
-        # accidentally typing over the filename
-        self.lastSelection = None
-
-    def clearEntry(self):
-        """Clear just this Entry"""
-        self.entry.delete(0, END)
-
-    def forceValue(self, newVal, noteEdited=False):
-        """Force-set a parameter entry to the given value"""
-        if newVal == None: newVal = ""
-        self.choice.set(newVal)
-        if noteEdited:
-            self.widgetEdited(val=newVal, skipDups=False)
-        # WARNING: the value of noteEdited really should be false (default)
-        # in most cases because we need the widgetEdited calls to be arranged
-        # at one level higher than we are (single param).  We need to allow the
-        # caller to first loop over all eparoptions, setting their values
-        # without triggering anything, and THEN go through again and run any
-        # triggers.
-
-    def unlearnValue(self):
-        """Unlearn a parameter value by setting it back to its default"""
-        defaultValue = self.defaultParamInfo.get(field = "p_filename",
-                            native = 0, prompt = 0)
-        self.choice.set(defaultValue)
-
-    def helpOnParam(self):
-        """ Try to display help specific to this parameter. """
-        if self._helpCallbackObj != None:
-            self._helpCallbackObj.showParamHelp(self.name)
-
-    def setEditedCallbackObj(self, ecbo):
-        """ Sets a callback object to be triggred when this option/parameter
-            is edited.  The object is expected to have an "edited()" method
-            which takes args as shown where it is called in widgetEdited. """
-        self._editedCallbackObj = ecbo
-
-    def setActiveState(self, active):
-        """ Use this to enable or disable (grey out) a parameter. """
-        st = DISABLED
-        if active: st = NORMAL
-        self.entry.configure(state=st)
-        self.inputLabel.configure(state=st)
-        self.promptLabel.configure(state=st)
-
-    def flagThisPar(self, currentVal, force):
-        """ If this par's value is different from the default value, it is here
-        that we flag it somehow as such.  This basic version simply makes the
-        surrounding text red (or returns it to normal). May be overridden.
-        Leave force at False if you want to allow this mehtod to make smart
-        time-saving decisions about when it can skip recoloring because it is
-        already the right color. Set force to true if you think we got out
-        of sync and need to be fixed. """
-
-        # Get out ASAP if we can
-        if (not force) and (not self._flagNonDefaultVals): return
-
-        # handle simple case before comparing values (quick return)
-        if force and not self._flagNonDefaultVals:
-            self._flagged = False
-            self.promptLabel.configure(fg="black")
-            return
-
-        # Get/format values to compare
-        currentNative = self.convertToNative(currentVal)
-        defaultNative = self.convertToNative(self.defaultParamInfo.value)
-        # par.value is same as par.get(native=1,prompt=0)
-
-        # flag or unflag as needed
-        if currentNative != defaultNative:
-            if not self._flagged or force:
-                self._flagged = True
-                self.promptLabel.configure(fg=self._flaggedColor) # was red
-        else: # same as def
-            if self._flagged or force:
-                self._flagged = False
-                self.promptLabel.configure(fg="black")
-        # ['red','blue','green','purple','yellow','orange','black']
-
-
-class EnumEparOption(EparOption):
-
-    def makeInputWidget(self):
-
-        self.unlearnEnabled = NORMAL
-        self.isSelectable = False
-
-        # Set the initial value for the button
-        self.choice.set(self.value)
-
-        # Need to adjust the value width so the menu button is
-        # aligned properly
-        if USING_X:
-            self.valueWidth = self.valueWidth - 4
-        else:
-            pass
-#           self.valueWidth = self.valueWidth - 0 # looks right on Aqua
-
-        # Generate the button
-        self.entry = Menubutton(self.master_frame,
-                                 width  = self.valueWidth,
-                                 text   = self.choice.get(),      # label
-                                 relief = RAISED,
-                                 anchor = W,                      # alignment
-                                 textvariable = self.choice,      # var to sync
-                                 indicatoron  = 1,
-                                 takefocus    = 1,
-                                 highlightthickness = 1,
-                                 activeforeground='black',
-                                 fg='black',
-                                 bg=self.bkgColor)
-
-        self.entry.menu = Menu(self.entry, tearoff=0,
-                               postcommand=self.postcmd,
-                               fg = 'black',
-                               bg=self.bkgColor)
-
-        # Generate the dictionary of shortcuts using first letter,
-        # second if first not available, etc.
-        self.shortcuts = {}
-        trylist = self.paramInfo.choice
-        underline = {}
-        charset = string.ascii_lowercase + string.digits
-        i = 0
-        while trylist:
-            trylist2 = []
-            for option in trylist:
-                # shortcuts dictionary is case-insensitive
-                letter = option[i:i+1].lower()
-                if letter in self.shortcuts:
-                    # will try again with next letter
-                    trylist2.append(option)
-                elif letter:
-                    if letter in charset:
-                        self.shortcuts[letter] = option
-                        self.shortcuts[letter.upper()] = option
-                        underline[option] = i
-                    else:
-                        # only allow letters, numbers to be shortcuts
-                        # keep going in case this is an embedded blank (e.g.)
-                        trylist2.append(option)
-                else:
-                    # no letters left, so no shortcut for this item
-                    underline[option] = -1
-            trylist = trylist2
-            i = i+1
-
-        # Generate the menu options with shortcuts underlined
-        for option in self.paramInfo.choice:
-            lbl = option
-            if lbl=='-': lbl = ' -' # Tk treats '-' as a separator request
-            self.entry.menu.add_radiobutton(label       = lbl,
-                                            value       = option,
-                                            variable    = self.choice,
-                                            command     = self.selected,
-                                            indicatoron = 0,
-                                            underline   = underline[option])
-
-        # set up a pointer from the menubutton back to the menu
-        self.entry['menu'] = self.entry.menu
-
-        self.entry.pack(side = LEFT)
-
-        # shortcut keys jump to items
-        for letter in self.shortcuts:
-            self.entry.bind('<%s>' % letter, self.keypress)
-
-        # Left button sets focus (as well as popping up menu)
-        self.entry.bind('<Button-1>', self.focus_set)
-
-    def keypress(self, event):
-        """Allow keys typed in widget to select items"""
-        try:
-            self.choice.set(self.shortcuts[event.keysym])
-        except KeyError:
-            # key not found (probably a bug, since we intend to catch
-            # only events from shortcut keys, but ignore it anyway)
-            pass
-
-    def postcmd(self):
-        """Make sure proper entry is activated when menu is posted"""
-        value = self.choice.get()
-        try:
-            index = self.paramInfo.choice.index(value)
-            self.entry.menu.activate(index)
-        except ValueError:
-            # initial null value may not be in list
-            pass
-
-    def selected(self):
-        """They have chosen an enumerated option."""
-        self.widgetEdited(action='entry') # kick off any checks that need doin
-
-#   def setActiveState(self, active):
-#       [...]
-#       for i in range(len(self.paramInfo.choice)):  # this doesn't seem to
-#           self.entry.menu.entryconfig(i, state=st) # make the menu text grey
-#       [...]
-
-
-
-class BooleanEparOption(EparOption):
-
-    def convertToNative(self, aVal):
-        """ Convert to native bool; interpret certain strings. """
-        if aVal == None: return None
-        if isinstance(aVal, bool): return aVal
-        # otherwise interpret strings
-        return str(aVal).lower() in ('1','on','yes','true')
-
-    def makeInputWidget(self):
-
-        self.unlearnEnabled = NORMAL
-        self.isSelectable = False
-
-        # Need to buffer the value width so the radio buttons and
-        # the adjoining labels are aligned properly
-        self.valueWidth = self.valueWidth + 10
-        if USING_X:
-            self.padWidth = (self.valueWidth // 2) + 5 # looks right
-        else:
-            self.padWidth = 2 # looks right on Aqua
-
-        # boolean parameters have 3 values: yes, no & undefined
-        # Just display two choices (but variable may initially be
-        # undefined)
-        self.choice.set(self.value)
-
-        self.entry = Frame(self.master_frame,
-                           relief    = FLAT,
-                           width     = self.valueWidth,
-                           takefocus = 1,
-                           highlightthickness = 1,
-                           bg=self.bkgColor,
-                           highlightbackground=self.bkgColor)
-        if not USING_X:
-            spacerL= Label(self.entry, takefocus=0, text="", width=2,
-                           bg=self.bkgColor)
-            spacerL.pack(side=LEFT, fill=X, expand=TRUE)
-        self.rbyes = Radiobutton(self.entry, text = "Yes",
-                                 variable    = self.choice,
-                                 value       = "yes",
-                                 anchor      = W,
-                                 takefocus   = 0,
-                                 underline   = 0,
-                                 bg = self.bkgColor,
-                                 highlightbackground=self.bkgColor)
-        self.rbyes.pack(side=LEFT, ipadx=self.padWidth)
-        if not USING_X:
-            spacerM= Label(self.entry, takefocus=0, text="", width=3,
-                           bg=self.bkgColor)
-            spacerM.pack(side=LEFT, fill=X, expand=TRUE)
-            spacerR = Label(self.entry, takefocus=0, text="", width=2,
-                           bg=self.bkgColor)
-            spacerR.pack(side=RIGHT, fill=X, expand=TRUE)
-        self.rbno  = Radiobutton(self.entry, text = "No",
-                                 variable    = self.choice,
-                                 value       = "no",
-                                 anchor      = W,
-                                 takefocus   = 0,
-                                 underline   = 0,
-                                 bg = self.bkgColor,
-                                 highlightbackground=self.bkgColor)
-        self.rbno.pack(side = RIGHT, ipadx = self.padWidth)
-        self.entry.pack(side = LEFT)
-
-        # keyboard accelerators
-        # Y/y sets yes, N/n sets no, space toggles selection
-        self.entry.bind('<y>', self.set)
-        self.entry.bind('<Y>', self.set)
-        self.entry.bind('<n>', self.unset)
-        self.entry.bind('<N>', self.unset)
-        self.entry.bind('<space>', self.toggle)
-        # When variable changes, make sure widget gets focus
-        self.choice.trace("w", self.trace)
-
-        # Right-click menu is bound to individual widgets too
-        if USING_X:
-            self.rbno.bind('<Button-3>', self.popupChoices)
-            self.rbyes.bind('<Button-3>', self.popupChoices)
-        else:
-            self.rbno.bind('<Button-2>', self.popupChoices)
-            self.rbyes.bind('<Button-2>', self.popupChoices)
-            spacerM.bind('<Button-2>', self.popupChoices)
-
-        # Regular selection - allow immediate trigger/check
-        self.rbyes.bind('<Button-1>', self.boolWidgetEditedYes)
-        self.rbno.bind('<Button-1>', self.boolWidgetEditedNo)
-
-    def trace(self, *args):
-        self.entry.focus_set()
-
-    # Only needed over widgetEdited because the Yes isn't set yet
-    def boolWidgetEditedYes(self, event=None): self.widgetEdited(val="yes")
-
-    # Only needed over widgetEdited because the No isn't set yet
-    def boolWidgetEditedNo(self, event=None): self.widgetEdited(val="no")
-
-    def set(self, event=None):
-        """Set value to Yes"""
-        self.rbyes.select()
-        self.widgetEdited()
-
-    def unset(self, event=None):
-        """Set value to No"""
-        self.rbno.select()
-        self.widgetEdited()
-
-    def toggle(self, event=None):
-        """Toggle value between Yes and No"""
-        if self.choice.get() == "yes":
-            self.rbno.select()
-        else:
-            self.rbyes.select()
-        self.widgetEdited()
-
-    def setActiveState(self, active):
-        st = DISABLED
-        if active: st = NORMAL
-        self.rbyes.configure(state=st)
-        self.rbno.configure(state=st)
-        self.inputLabel.configure(state=st)
-        self.promptLabel.configure(state=st)
-
-
-class StringEparOption(EparOption):
-
-    def makeInputWidget(self):
-
-        self.browserEnabled = NORMAL
-        self.clearEnabled = NORMAL
-        self.unlearnEnabled = NORMAL
-
-        self.choice.set(self.value)
-        self.entry = Entry(self.master_frame, width = self.valueWidth,
-                     textvariable = self.choice) # , bg=self.bkgColor)
-        self.entry.pack(side = LEFT, fill = X, expand = TRUE)
-#       self.extraBindingsForSelectableText() # do not use yet
-
-
-class ActionEparButton(EparOption):
-
-    def getButtonLabel(self):
-        return self.value
-
-    def makeInputWidget(self):
-#       self.choice.set(self.value)
-
-        self.browserEnabled = DISABLED
-        self.clearEnabled = DISABLED
-        self.unlearnEnabled = DISABLED
-        self.helpEnabled = NORMAL
-
-        # Need to adjust the value width so the button is aligned properly
-        if USING_X:
-            self.valueWidth = self.valueWidth - 3
-        else:
-            self.valueWidth = self.valueWidth - 2
-
-        self.isSelectable = False
-
-        # Generate the button
-        self.entry = Button(self.master_frame,
-                            width   = self.valueWidth,
-                            text    = self.getButtonLabel(),
-                            relief  = RAISED,
-                            background = self.bkgColor,
-                            highlightbackground = self.bkgColor,
-                            command = self.clicked)
-        self.entry.pack(side = LEFT)
-
-    def clicked(self):
-        raise NotImplementedError('clicked() must be implemented')
-
-    def unlearnValue(self):
-        pass
-
-
-# widget class that works for numbers and arrays of numbers
-
-class NumberEparOption(EparOption):
-
-    def convertToNative(self, aVal):
-        """ Natively as an int. """
-        if aVal in (None, '', 'None', 'NONE', 'INDEF'): return None
-        return int(aVal)
-
-    def notNull(self, value):
-        vsplit = value.split()
-        return vsplit.count("INDEF") != len(vsplit)
-
-    def makeInputWidget(self):
-
-        self.browserEnabled = DISABLED
-        self.clearEnabled = NORMAL
-        self.unlearnEnabled = NORMAL
-
-        # Retain the original parameter value in case of bad entry
-        self.previousValue = self.value
-
-        self.choice.set(self.value)
-        self.entry = Entry(self.master_frame, width = self.valueWidth,
-                           textvariable = self.choice) #, bg=self.bkgColor)
-        self.entry.pack(side = LEFT)
-#       self.extraBindingsForSelectableText() # do not use yet
-
-    # Check the validity of the entry
-    # Note that doing this using the parameter set method automatically
-    # checks max, min, special value (INDEF, parameter indirection), etc.
-    def entryCheck(self, event = None, repair = True):
-        """ Ensure any INDEF entry is uppercase, before base class behavior """
-        valupr = self.choice.get().upper()
-        if valupr.strip() == 'INDEF':
-            self.choice.set(valupr)
-        return EparOption.entryCheck(self, event, repair = repair)
-
-# numeric widget class specific to floats
-
-class FloatEparOption(NumberEparOption):
-
-    def convertToNative(self, aVal):
-        """ Natively as a float. """
-        if aVal in (None, '', 'None', 'NONE', 'INDEF'): return None
-        return float(aVal)
-
-
-# EparOption values for non-string types
-_eparOptionDict = { "b": BooleanEparOption,
-                    "r": FloatEparOption,
-                    "R": FloatEparOption,
-                    "d": FloatEparOption,
-                    "I": NumberEparOption,
-                    "i": NumberEparOption,
-                    "z": ActionEparButton,
-                    "ar": FloatEparOption,
-                    "ai": NumberEparOption,
-                  }
-
-def eparOptionFactory(master, statusBar, param, defaultParam,
-                      doScroll, fieldWidths,
-                      plugIn=None, editedCallbackObj=None,
-                      helpCallbackObj=None, mainGuiObj=None,
-                      defaultsVerb="Default", bg=None, indent=False,
-                      flagging=False, flaggedColor=None):
-
-    """Return EparOption item of appropriate type for the parameter param"""
-
-    # Allow passed-in overrides
-    if plugIn != None:
-        eparOption = plugIn
-
-    # If there is an enumerated list, regardless of datatype use EnumEparOption
-    elif param.choice != None:
-        eparOption = EnumEparOption
-
-    else:
-        # Use String for types not in the dictionary
-        eparOption = _eparOptionDict.get(param.type, StringEparOption)
-
-    # Create it
-    eo = eparOption(master, statusBar, param, defaultParam, doScroll,
-                    fieldWidths, defaultsVerb, bg,
-                    indent=indent, helpCallbackObj=helpCallbackObj,
-                    mainGuiObj=mainGuiObj)
-    eo.setEditedCallbackObj(editedCallbackObj)
-    eo.setIsFlagging(flagging, False)
-    if flaggedColor:
-        eo.setFlaggedColor(flaggedColor)
-    return eo
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/filedlg.py b/required_pkgs/stsci.tools/lib/stsci/tools/filedlg.py
deleted file mode 100644
index 374fc5c..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/filedlg.py
+++ /dev/null
@@ -1,445 +0,0 @@
-####
-#       Class FileDialog
-#
-#       Purpose
-#       -------
-#
-#       FileDialog's are widgets that allow one to select file names by
-#       clicking on file names, directory names, filters, etc.
-#
-#       Standard Usage
-#       --------------
-#
-#       F = FileDialog(widget, some_title, some_filter)
-#       if F.Show() != 1:
-#               F.DialogCleanup()
-#       return
-#               file_name = F.GetFileName()
-#               F.DialogCleanup()
-####
-"""
-$Id: filedlg.py 38909 2015-04-08 17:41:07Z bsimon $
-"""
-from __future__ import absolute_import, division # confidence high
-
-import sys, os
-from . import capable
-
-PY3K = sys.version_info[0] > 2
-if PY3K:
-    from subprocess import getoutput
-else:
-    from commands import getoutput
-
-if capable.OF_GRAPHICS:
-    if PY3K:
-        import tkinter as Tkinter
-    else:
-        import Tkinter
-        
-    from . import alert
-    from .dialog import *
-else:
-    ModalDialog = object
-
-
-class FileDialog(ModalDialog):
-
-    # constructor
-
-    lastWrtPrtChoice = None
-
-    def __init__(self, widget, title, filter="*", initWProtState=None):
-        """ Supply parent widget, title, filter, and initWProtState (True or
-        False).  Set initWProtState to None to hide the write-protect
-        check-box. """
-        self.widget = widget
-        self.filter = filter.strip()
-        self.orig_dir = os.getcwd()
-        self.cwd = os.getcwd()       # the logical current working directory
-        self.showChmod = initWProtState != None
-        # normally we use persistence for lastWrtPrtChoice; use this 1st time
-        if FileDialog.lastWrtPrtChoice == None:
-            FileDialog.lastWrtPrtChoice = initWProtState
-        # Allow a start-directory as part of the given filter
-        if self.filter.find(os.sep) >= 0:
-            self.cwd = os.path.dirname(self.filter)
-            self.filter = os.path.basename(self.filter) # do this second!
-        # main Dialog code
-        Dialog.__init__(self, widget)
-
-    # setup routine called back from Dialog
-
-    def SetupDialog(self):
-
-        # directory label
-
-        self.dirFrame = Frame(self.top)
-        self.dirFrame['relief'] = 'raised'
-        self.dirFrame['bd']      = '2'
-        self.dirFrame.pack({'expand':'no', 'side':'top', 'fill':'both'})
-        self.dirLabel = Label(self.dirFrame)
-        self.dirLabel["text"] = "Directory:"
-        self.dirLabel.pack({'expand':'no', 'side':'left', 'fill':'none'})
-
-        # editable filter
-
-        self.filterFrame = Frame(self.top)
-        self.filterFrame['relief'] = 'raised'
-        self.filterFrame['bd']   = '2'
-        self.filterFrame.pack({'expand':'no', 'side':'top', 'fill':'both'})
-        self.filterLabel = Label(self.filterFrame)
-        self.filterLabel["text"] = "Filter:"
-        self.filterLabel.pack({'expand':'no', 'side':'left', 'fill':'none'})
-        self.filterEntry = Entry(self.filterFrame)
-        self.filterEntry.bind('<Return>', self.FilterReturnKey)
-        self.filterEntry["width"]  = "40"
-        self.filterEntry["relief"] = "ridge"
-        self.filterEntry.pack({'expand':'yes', 'side':'right', 'fill':'x'})
-        self.filterEntry.insert(0, self.filter)
-
-        # the directory and file listboxes
-
-        self.listBoxFrame = Frame(self.top)
-        self.listBoxFrame['relief'] = 'raised'
-        self.listBoxFrame['bd']  = '2'
-        self.listBoxFrame.pack({'expand':'yes', 'side' :'top',
-                'pady' :'2', 'padx': '0', 'fill' :'both'})
-        self.CreateDirListBox()
-        self.CreateFileListBox()
-        self.UpdateListBoxes()
-
-        # write-protect option
-
-        junk = FileDialog.lastWrtPrtChoice
-        if junk == None: junk = 0
-        self.wpVar = IntVar(value=junk) # use class attr
-        if self.showChmod:
-            self.writeProtFrame = Frame(self.top)
-            self.writeProtFrame['relief'] = 'raised'
-            self.writeProtFrame['bd'] = '2'
-            self.writeProtFrame.pack({'expand':'no','side':'top','fill':'both'})
-            self.wpButton = Checkbutton(self.writeProtFrame,
-                                        text="Write-protect after save",
-                                        command=self.wrtPrtClick,
-                                        var=self.wpVar)
-            self.wpButton.pack({'expand':'no', 'side':'left'})
-
-        # editable filename
-
-        self.fileNameFrame = Frame(self.top)
-        self.fileNameFrame.pack({'expand':'no', 'side':'top', 'fill':'both'})
-        self.fileNameFrame['relief'] = 'raised'
-        self.fileNameFrame['bd']         = '2'
-        self.fileNameLabel = Label(self.fileNameFrame)
-        self.fileNameLabel["text"] = "File:"
-        self.fileNameLabel.pack({'expand':'no', 'side':'left', 'fill':'none'})
-        self.fileNameEntry = Entry(self.fileNameFrame)
-        self.fileNameEntry["width"]  = "40"
-        self.fileNameEntry["relief"] = "ridge"
-        self.fileNameEntry.pack({'expand':'yes', 'side':'right', 'fill':'x',
-                                 'pady': '2'})
-        self.fileNameEntry.bind('<Return>', self.FileNameReturnKey)
-
-        # buttons - ok, filter, cancel
-
-        self.buttonFrame = Frame(self.top)
-        self.buttonFrame['relief'] = 'raised'
-        self.buttonFrame['bd']   = '2'
-        self.buttonFrame.pack({'expand':'no', 'side':'top', 'fill':'x'})
-        self.okButton = Button(self.buttonFrame)
-        self.okButton["text"]     = "OK"
-        self.okButton["command"]   = self.OkPressed
-        self.okButton["width"] = 8
-        self.okButton.pack({'expand':'yes', 'pady':'2', 'side':'left'})
-        self.filterButton = Button(self.buttonFrame)
-        self.filterButton["text"]         = "Filter"
-        self.filterButton["command"]   = self.FilterPressed
-        self.filterButton["width"] = 8
-        self.filterButton.pack({'expand':'yes', 'pady':'2', 'side':'left'})
-        button = Button(self.buttonFrame)
-        button["text"] = "Cancel"
-        button["command"] = self.CancelPressed
-        button["width"] = 8
-        button.pack({'expand':'yes', 'pady':'2', 'side':'left'})
-
-    # create the directory list box
-
-    def CreateDirListBox(self):
-        frame = Frame(self.listBoxFrame)
-        frame.pack({'expand':'yes', 'side' :'left', 'pady' :'1',
-                'fill' :'both'})
-        frame['relief'] = 'raised'
-        frame['bd']      = '2'
-        filesFrame = Frame(frame)
-        filesFrame['relief'] = 'flat'
-        filesFrame['bd']         = '2'
-        filesFrame.pack({'side':'top', 'expand':'no', 'fill':'x'})
-        label = Label(filesFrame)
-        label['text'] = 'Directories:'
-        label.pack({'side':'left', 'expand':'yes', 'anchor':'w',
-                'fill':'none'})
-        scrollBar = Scrollbar(frame, {'orient':'vertical'})
-        scrollBar.pack({'expand':'no', 'side':'right', 'fill':'y'})
-        self.dirLb = Listbox(frame, {'yscroll':scrollBar.set})
-        self.dirLb.pack({'expand':'yes', 'side' :'top', 'pady' :'1',
-                'fill' :'both'})
-        self.dirLb.bind('<1>', self.DoSelection)
-        self.dirLb.bind('<Double-Button-1>', self.DoDoubleClickDir)
-        scrollBar['command'] = self.dirLb.yview
-
-    # create the files list box
-
-    def CreateFileListBox(self):
-        frame = Frame(self.listBoxFrame)
-        frame['relief'] = 'raised'
-        frame['bd']      = '2'
-        frame.pack({'expand':'yes', 'side' :'left', 'pady' :'1', 'padx' :'1',
-                'fill' :'both'})
-        filesFrame = Frame(frame)
-        filesFrame['relief'] = 'flat'
-        filesFrame['bd']         = '2'
-        filesFrame.pack({'side':'top', 'expand':'no', 'fill':'x'})
-        label = Label(filesFrame)
-        label['text'] = 'Files:'
-        label.pack({'side':'left', 'expand':'yes', 'anchor':'w',
-                'fill':'none'})
-        scrollBar = Scrollbar(frame, {'orient':'vertical'})
-        scrollBar.pack({'side':'right', 'fill':'y'})
-        self.fileLb = Listbox(frame, {'yscroll':scrollBar.set})
-        self.fileLb.pack({'expand':'yes', 'side' :'top', 'pady' :'0',
-                'fill' :'both'})
-        self.fileLb.bind('<1>', self.DoSelection)
-        self.fileLb.bind('<Double-Button-1>', self.DoDoubleClickFile)
-        scrollBar['command'] = self.fileLb.yview
-
-    # update the listboxes and directory label after a change of directory
-
-    def UpdateListBoxes(self):
-        cwd = self.cwd
-        self.fileLb.delete(0, self.fileLb.size())
-        filter = self.filterEntry.get()
-        # '*' will list recurively, we don't want that.
-        if filter == '*':
-            filter = ''
-        cmd = "/bin/ls " + os.path.join(cwd, filter)
-        cmdOutput = getoutput(cmd)
-        files = cmdOutput.split("\n")
-        files.sort()
-        for i in range(len(files)):
-            if os.path.isfile(os.path.join(cwd, files[i])):
-                self.fileLb.insert('end', os.path.basename(files[i]))
-        self.dirLb.delete(0, self.dirLb.size())
-        files = os.listdir(cwd)
-        if cwd != '/':
-            files.append('..')
-        files.sort()
-        for i in range(len(files)):
-            if os.path.isdir(os.path.join(cwd, files[i])):
-                self.dirLb.insert('end', files[i])
-        self.dirLabel['text'] = "Directory:" + self.cwd_print()
-
-    # selection handlers
-
-    def DoSelection(self, event):
-        lb = event.widget
-        field = self.fileNameEntry
-        field.delete(0, AtEnd())
-        field.insert(0, os.path.join(self.cwd_print(), lb.get(lb.nearest(event.y))))
-        if Tkinter.TkVersion >= 4.0:
-            lb.select_clear(0, "end")
-            lb.select_anchor(lb.nearest(event.y))
-        else:
-            lb.select_clear()
-            lb.select_from(lb.nearest(event.y))
-
-    def DoDoubleClickDir(self, event):
-        lb = event.widget
-        self.cwd = os.path.join(self.cwd, lb.get(lb.nearest(event.y)))
-        self.UpdateListBoxes()
-
-    def DoDoubleClickFile(self, event):
-        self.OkPressed()
-
-    def OkPressed(self):
-        self.TerminateDialog(1)
-
-    def wrtPrtClick(self):
-        FileDialog.lastWrtPrtChoice = self.wpVar.get() # update class attr
-
-    def FileNameReturnKey(self, event):
-        # if its a relative path then include the cwd in the name
-        name = self.fileNameEntry.get().strip()
-        if not os.path.isabs(os.path.expanduser(name)):
-            self.fileNameEntry.delete(0, 'end')
-            self.fileNameEntry.insert(0, os.path.join(self.cwd_print(), name))
-        self.okButton.flash()
-        self.OkPressed()
-
-    def FilterReturnKey(self, event):
-        filter = self.filterEntry.get().strip()
-        self.filterEntry.delete(0, 'end')
-        self.filterEntry.insert(0, filter)
-        self.filterButton.flash()
-        self.UpdateListBoxes()
-
-    def FilterPressed(self):
-        self.UpdateListBoxes()
-
-    def CancelPressed(self):
-        self.TerminateDialog(0)
-
-    def GetFileName(self):
-        return self.fileNameEntry.get()
-
-    def GetWriteProtectChoice(self):
-        return bool(self.wpVar.get())
-
-    # return the logical current working directory in a printable form
-    # ie. without all the X/.. pairs. The easiest way to do this is to
-    # chdir to cwd and get the path there.
-
-    def cwd_print(self):
-        os.chdir(self.cwd)
-        p = os.getcwd()
-        os.chdir(self.orig_dir)
-        return p
-####
-#       Class LoadFileDialog
-#
-#       Purpose
-#       -------
-#
-#       Specialisation of FileDialog for loading files.
-####
-
-class LoadFileDialog(FileDialog):
-
-    def __init__(self, master, title, filter):
-        FileDialog.__init__(self, master, title, filter)
-        self.top.title(title)
-
-    def OkPressed(self):
-        fileName = self.GetFileName()
-        if os.path.exists(fileName) == 0:
-            str = 'File ' + fileName + ' not found.'
-            errorDlg = alert.ErrorDialog(self.top, str)
-            errorDlg.Show()
-            errorDlg.DialogCleanup()
-            return
-        FileDialog.OkPressed(self)
-
-####
-#       Class SaveFileDialog
-#
-#       Purpose
-#       -------
-#
-#       Specialisation of FileDialog for saving files.
-####
-
-class SaveFileDialog(FileDialog):
-
-    def __init__(self, master, title, filter):
-        FileDialog.__init__(self, master, title, filter)
-        self.top.title(title)
-
-    def OkPressed(self):
-        fileName = self.GetFileName()
-        if os.path.exists(fileName) == 1:
-            str = 'File ' + fileName + ' exists.\nDo you wish to overwrite it?'
-            warningDlg = alert.WarningDialog(self.top, str)
-            if warningDlg.Show() == 0:
-                warningDlg.DialogCleanup()
-                return
-            warningDlg.DialogCleanup()
-        FileDialog.OkPressed(self)
-
-#----------------------------------------------------------------------------
-
-#############################################################################
-#
-# Class:   PersistFileDialog
-# Purpose: Essentially the same as FileDialog, except this class contains
-#          a class variable (lastAccessedDir) which keeps track of the last
-#          directory from which a file was chosen.  Subsequent invocations of
-#          this dialog in the same Python session will start up in the last
-#          directory where a file was successfully chosen, rather than in the
-#          current working directory.
-#
-# History: M.D. De La Pena, 08 June 2000
-#
-#############################################################################
-
-class PersistFileDialog(FileDialog):
-
-    # Define a class variable to track the last accessed directory
-    lastAccessedDir = None
-
-    def __init__(self, widget, title, filter="*", initWProtState=None):
-
-        FileDialog.__init__(self, widget, title, filter, initWProtState)
-
-        # If the last accessed directory were not None, start up
-        # the file browser in the last accessed directory.
-        if self.__class__.lastAccessedDir:
-            self.cwd      = self.__class__.lastAccessedDir
-
-    # Override the OkPressed method from the parent in order to
-    # update the class variable.
-    def OkPressed(self):
-        self.__class__.lastAccessedDir = self.cwd_print()
-        self.TerminateDialog(1)
-
-
-#############################################################################
-#
-# Class:   PersistLoadFileDialog
-# Purpose: Essentially the same as LoadFileDialog, except this class invokes
-#          PersistFileDialog instead of FileDialog.
-#
-# History: M.D. De La Pena, 08 June 2000
-#
-#############################################################################
-
-class PersistLoadFileDialog(PersistFileDialog):
-
-    def __init__(self, master, title, filter):
-        PersistFileDialog.__init__(self, master, title, filter)
-        self.top.title(title)
-
-    def OkPressed(self):
-        fileName = self.GetFileName()
-        if os.path.exists(fileName) == 0:
-            str = 'File ' + fileName + ' not found.'
-            errorDlg = alert.ErrorDialog(self.top, str)
-            errorDlg.Show()
-            errorDlg.DialogCleanup()
-            return
-        PersistFileDialog.OkPressed(self)
-
-
-#############################################################################
-#
-# Class:   PersistSaveFileDialog
-# Purpose: Essentially the same as SaveFileDialog, except this class invokes
-#          PersistFileDialog instead of FileDialog.
-#
-#############################################################################
-
-class PersistSaveFileDialog(PersistFileDialog):
-
-    def __init__(self, master, title, filter, initWProtState=None):
-        PersistFileDialog.__init__(self, master, title, filter, initWProtState)
-        self.top.title(title)
-
-    def OkPressed(self):
-        fileName = self.GetFileName()
-        if os.path.exists(fileName) == 1:
-            str = 'File ' + fileName + ' exists.\nDo you wish to overwrite it?'
-            warningDlg = alert.WarningDialog(self.top, str)
-            if warningDlg.Show() == 0:
-                warningDlg.DialogCleanup()
-                return
-            warningDlg.DialogCleanup()
-        PersistFileDialog.OkPressed(self)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/fileutil.py b/required_pkgs/stsci.tools/lib/stsci/tools/fileutil.py
deleted file mode 100644
index 648bbda..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/fileutil.py
+++ /dev/null
@@ -1,1518 +0,0 @@
-"""fileutil.py -- General file functions
-
-These were initially designed for use with PyDrizzle.
-These functions only rely on booleans 'yes' and 'no', PyFITS and readgeis.
-
-This file contains both IRAF-compatibility and general file access functions.
-General functions included are::
-
-    DEGTORAD(deg), RADTODEG(rad)
-
-    DIVMOD(num,val)
-
-    convertDate(date)
-        Converts the DATE date string into a decimal year.
-
-    decimal_date(date-obs,time-obs=None)
-        Converts the DATE-OBS (with optional TIME-OBS) string into a decimal year
-
-    buildRootname(filename, extn=None, extlist=None)
-
-    buildNewRootname(filename, ext=None)
-
-    parseFilename(filename)
-        Splits a input name into a tuple containing (filename, group/extension)
-
-    getKeyword(filename, keyword, default=None, handle=None)
-
-    getHeader(filename,handle=None)
-         Return a copy of the PRIMARY header, along with any group/extension
-         header, for this filename specification.
-
-    getExtn(fimg,extn=None)
-        Returns a copy of the specified extension with data from PyFITS object
-        'fimg' for desired file.
-
-    updateKeyword(filename, key, value)
-
-    openImage(filename,mode='readonly',memmap=0,fitsname=None)
-         Opens file and returns PyFITS object.
-         It will work on both FITS and GEIS formatted images.
-
-    findFile(input)
-
-    checkFileExists(filename,directory=None)
-
-    removeFile(inlist):
-        Utility function for deleting a list of files or a single file.
-
-    rAsciiLine(ifile)
-        Returns the next non-blank line in an ASCII file.
-
-    readAsnTable(input,output=None,prodonly=yes)
-        Reads an association (ASN) table and interprets inputs and output.
-        The 'prodonly' parameter specifies whether to use products as inputs
-            or not; where 'prodonly=no' specifies to only use EXP as inputs.
-
-    isFits(input) - returns (True|False, fitstype), fitstype is one of
-                    ('simple', 'mef', 'waiver')
-
-IRAF compatibility functions (abbreviated list)::
-
-    osfn(filename)
-        Convert IRAF virtual path name to OS pathname
-
-    show(*args, **kw)
-        Print value of IRAF or OS environment variables
-
-    time()
-        Print current time and date
-
-    access(filename)
-        Returns true if file exists, where filename can include IRAF variables
-"""
-
-from __future__ import division, print_function # confidence high
-
-from . import numerixenv
-numerixenv.check()
-
-from . import stpyfits as fits
-from . import readgeis
-from . import convertwaiveredfits
-
-import datetime
-import copy
-import os
-import re
-import shutil
-import sys
-
-import time as _time
-import numpy as np
-
-PY3K = sys.version_info[0] > 2
-if PY3K:
-    string_types = str
-else:
-    string_types = basestring
-
-# Environment variable handling - based on iraffunctions.py
-# define INDEF, yes, no, EOF, Verbose, userIrafHome
-
-# Set up IRAF-compatible Boolean values
-yes = True
-no = False
-
-# List of supported default file types
-# It will look for these file types by default
-# when trying to recognize input rootnames.
-EXTLIST =  ['_crj.fits', '_flt.fits', '_flc.fits', '_sfl.fits', '_cal.fits',
-            '_raw.fits', '.c0h', '.hhh', '_c0h.fits', '_c0f.fits', '_c1f.fits',
-            '.fits']
-
-
-BLANK_ASNDICT = {
-    'output': None,
-    'order': [],
-    'members': {
-        'abshift': no,
-        'dshift': no
-    }
-}
-
-
-def help():
-    print(__doc__)
-
-
-#################
-#
-#
-#               Generic Functions
-#
-#
-#################
-def DEGTORAD(deg):
-    return (deg * np.pi / 180.)
-
-
-def RADTODEG(rad):
-    return (rad * 180. / np.pi)
-
-
-def DIVMOD(num,val):
-    if isinstance(num, np.ndarray):
-    # Treat number as numpy object
-        _num = np.remainder(num, val)
-    else:
-        _num = divmod(num, val)[1]
-    return _num
-
-
-def getLTime():
-    """Returns a formatted string with the current local time."""
-
-    _ltime = _time.localtime(_time.time())
-    tlm_str = _time.strftime('%H:%M:%S (%d/%m/%Y)', _ltime)
-    return tlm_str
-
-
-def getDate():
-    """Returns a formatted string with the current date."""
-
-    _ltime = _time.localtime(_time.time())
-    date_str = _time.strftime('%Y-%m-%dT%H:%M:%S',_ltime)
-
-    return date_str
-
-
-def convertDate(date):
-    """Convert DATE string into a decimal year."""
-
-    d, t = date.split('T')
-    return decimal_date(d, timeobs=t)
-
-
-def decimal_date(dateobs, timeobs=None):
-    """Convert DATE-OBS (and optional TIME-OBS) into a decimal year."""
-
-    year, month, day = dateobs.split('-')
-    if timeobs is not None:
-        hr, min, sec = timeobs.split(':')
-    else:
-        hr, min, sec = 0, 0, 0
-
-    rdate = datetime.datetime(int(year), int(month), int(day), int(hr),
-                              int(min), int(sec))
-    dday = (float(rdate.strftime("%j")) + rdate.hour / 24.0 +
-            rdate.minute / (60. * 24) + rdate.second / (3600 * 24.)) / 365.25
-    ddate = int(year) + dday
-
-    return ddate
-
-
-def interpretDQvalue(input):
-    """
-    Converts an integer 'input' into its component bit values as a list of
-    power of 2 integers.
-
-    For example, the bit value 1027 would return [1, 2, 1024]
-    """
-
-    nbits = 16
-    # We will only support integer values up to 2**128
-    for iexp in [16, 32, 64, 128]:
-        # Find out whether the input value is less than 2**iexp
-        if (input // (2 ** iexp)) == 0:
-            # when it finally is, we have identified how many bits can be used to
-            # describe this input bitvalue
-            nbits = iexp
-            break
-
-    # Find out how 'dtype' values are described on this machine
-    a = np.zeros(1, dtype='int16')
-    atype_descr = a.dtype.descr[0][1]
-    # Use this description to build the description we need for our input integer
-    dtype_str = atype_descr[:2] + str(nbits // 8)
-    result = np.zeros(nbits + 1, dtype=dtype_str)
-
-    # For each bit, determine whether it has been set in the input value or not
-    for n in range(nbits + 1):
-        i = 2 ** n
-        if input & i > 0:
-            # record which bit has been set as the power-of-2 integer
-            result[n] = i
-
-    # Return the non-zero unique values as a Python list
-    return np.delete(np.unique(result), 0).tolist()
-
-
-def isFits(input):
-    """
-    Returns
-    --------
-    isFits: tuple
-        An ``(isfits, fitstype)`` tuple.  The values of ``isfits`` and
-        ``fitstype`` are specified as:
-
-         - ``isfits``: True|False
-         - ``fitstype``: if True, one of 'waiver', 'mef', 'simple'; if False, None
-
-    Notes
-    -----
-    Input images which do not have a valid FITS filename will automatically
-    result in a return of (False, None).
-
-    In the case that the input has a valid FITS filename but runs into some
-    error upon opening, this routine will raise that exception for the calling
-    routine/user to handle.
-    """
-
-    isfits = False
-    fitstype = None
-    names = ['fits', 'fit', 'FITS', 'FIT']
-    #determine if input is a fits file based on extension
-    # Only check type of FITS file if filename ends in valid FITS string
-    f = None
-    fileclose = False
-    if isinstance(input, fits.HDUList):
-        isfits = True
-        f = input
-    else:
-        isfits = True in [input.endswith(l) for l in names]
-
-    # if input is a fits file determine what kind of fits it is
-    #waiver fits len(shape) == 3
-    if isfits:
-        if not f:
-            try:
-                f = fits.open(input, mode='readonly')
-                fileclose = True
-            except Exception:
-                if f is not None:
-                    f.close()
-                raise
-        data0 = f[0].data
-        if data0 is not None:
-            try:
-                if isinstance(f[1], fits.TableHDU):
-                    fitstype = 'waiver'
-            except IndexError:
-                fitstype = 'simple'
-
-        else:
-            fitstype = 'mef'
-        if fileclose:
-            f.close()
-
-    return isfits, fitstype
-
-
-def buildRotMatrix(theta):
-    _theta = DEGTORAD(theta)
-    _mrot = np.zeros(shape=(2,2), dtype=np.float64)
-    _mrot[0] = (np.cos(_theta), np.sin(_theta))
-    _mrot[1] = (-np.sin(_theta), np.cos(_theta))
-
-    return _mrot
-
-
-#################
-#
-#
-#               Generic File/Header Functions
-#
-#
-#################
-def verifyWriteMode(files):
-    """
-    Checks whether files are writable. It is up to the calling routine to raise
-    an Exception, if desired.
-
-    This function returns True, if all files are writable and False, if any are
-    not writable.  In addition, for all files found to not be writable, it will
-    print out the list of names of affected files.
-    """
-
-    # Start by insuring that input is a list of filenames,
-    # if only a single filename has been given as input,
-    # convert it to a list with len == 1.
-    if not isinstance(files, list):
-        files = [files]
-
-    # Keep track of the name of each file which is not writable
-    not_writable = []
-    writable = True
-
-    # Check each file in input list
-    for fname in files:
-        try:
-            f = open(fname,'a')
-            f.close()
-            del f
-        except:
-            not_writable.append(fname)
-            writable = False
-
-    if not writable:
-        print('The following file(s) do not have write permission!')
-        for fname in not_writable:
-            print('    ', fname)
-
-    return writable
-
-
-def getFilterNames(header, filternames=None):
-    """
-    Returns a comma-separated string of filter names extracted from the input
-    header (PyFITS header object).  This function has been hard-coded to
-    support the following instruments:
-
-        ACS, WFPC2, STIS
-
-    This function relies on the 'INSTRUME' keyword to define what instrument
-    has been used to generate the observation/header.
-
-    The 'filternames' parameter allows the user to provide a list of keyword
-    names for their instrument, in the case their instrument is not supported.
-    """
-
-    # Define the keyword names for each instrument
-    _keydict = {
-        'ACS': ['FILTER1', 'FILTER2'],
-        'WFPC2': ['FILTNAM1', 'FILTNAM2'],
-        'STIS': ['OPT_ELEM', 'FILTER'],
-        'NICMOS': ['FILTER', 'FILTER2'],
-        'WFC3': ['FILTER', 'FILTER2']
-    }
-
-    # Find out what instrument the input header came from, based on the
-    # 'INSTRUME' keyword
-    if 'INSTRUME' in header:
-        instrument = header['INSTRUME']
-    else:
-        raise ValueError('Header does not contain INSTRUME keyword.')
-
-    # Check to make sure this instrument is supported in _keydict
-    if instrument in _keydict:
-        _filtlist = _keydict[instrument]
-    else:
-        _filtlist = filternames
-
-    # At this point, we know what keywords correspond to the filter names
-    # in the header.  Now, get the values associated with those keywords.
-    # Build a list of all filter name values, with the exception of the
-    # blank keywords. Values containing 'CLEAR' or 'N/A' are valid.
-    _filter_values = []
-    for _key in _filtlist:
-        if _key in header:
-            _val = header[_key]
-        else:
-            _val = ''
-        if _val.strip() != '':
-            _filter_values.append(header[_key])
-
-    # Return the comma-separated list
-    return ','.join(_filter_values)
-
-
-def buildNewRootname(filename, extn=None, extlist=None):
-    """
-    Build rootname for a new file.
-
-    Use 'extn' for new filename if given, does NOT append a suffix/extension at
-    all.
-
-    Does NOT check to see if it exists already.  Will ALWAYS return a new
-    filename.
-    """
-
-    # Search known suffixes to replace ('_crj.fits',...)
-    _extlist = copy.deepcopy(EXTLIST)
-    # Also, add a default where '_dth.fits' replaces
-    # whatever extension was there ('.fits','.c1h',...)
-    #_extlist.append('.')
-    # Also append any user-specified extensions...
-    if extlist:
-        _extlist += extlist
-
-    for suffix in _extlist:
-        _indx = filename.find(suffix)
-        if _indx > 0: break
-
-    if _indx < 0:
-         # default to entire rootname
-        _indx = len(filename)
-
-    if extn is None:
-        extn = ''
-
-    return filename[:_indx] + extn
-
-
-def buildRootname(filename, ext=None):
-    """
-    Build a new rootname for an existing file and given extension.
-
-    Any user supplied extensions to use for searching for file need to be
-    provided as a list of extensions.
-
-    Examples
-    --------
-
-    ::
-
-        >>> rootname = buildRootname(filename, ext=['_dth.fits'])
-
-    """
-
-    if filename in ['' ,' ', None]:
-        return None
-
-    fpath, froot = os.path.split(filename)
-    if fpath in ['', ' ', None]:
-        fpath = os.curdir
-    # Get complete list of filenames from current directory
-    flist = os.listdir(fpath)
-
-    #First, assume given filename is complete and verify
-    # it exists...
-    rootname = None
-
-    for name in flist:
-        if name == froot:
-            rootname = froot
-            break
-        elif name == froot + '.fits':
-            rootname = froot + '.fits'
-            break
-    # If we have an incomplete filename, try building a default
-    # name and seeing if it exists...
-    #
-    # Set up default list of suffix/extensions to add to rootname
-    _extlist = []
-    for extn in EXTLIST:
-        _extlist.append(extn)
-
-    if rootname is None:
-        # Add any user-specified extension to list of extensions...
-        if ext is not None:
-            for i in ext:
-                _extlist.insert(0,i)
-        # loop over all extensions looking for a filename that matches...
-        for extn in _extlist:
-            # Start by looking for filename with exactly
-            # the same case a provided in ASN table...
-            rname = froot + extn
-            for name in flist:
-                if rname == name:
-                    rootname = name
-                    break
-            if rootname is None:
-                # Try looking for all lower-case filename
-                # instead of a mixed-case filename as required
-                # by the pipeline.
-                rname = froot.lower() + extn
-                for name in flist:
-                    if rname == name:
-                        rootname = name
-                        break
-
-            if rootname is not None:
-                break
-
-    # If we still haven't found the file, see if we have the
-    # info to build one...
-    if rootname is None and ext is not None:
-        # Check to see if we have a full filename to start with...
-        _indx = froot.find('.')
-        if _indx > 0:
-            rootname = froot[:_indx] + ext[0]
-        else:
-            rootname = froot + ext[0]
-
-    if fpath not in ['.', '', ' ', None]:
-        rootname = os.path.join(fpath, rootname)
-    # It will be up to the calling routine to verify
-    # that a valid rootname, rather than 'None', was returned.
-    return rootname
-
-
-def getKeyword(filename, keyword, default=None, handle=None):
-    """
-    General, write-safe method for returning a keyword value from the header of
-    a IRAF recognized image.
-
-    Returns the value as a string.
-    """
-
-    # Insure that there is at least 1 extension specified...
-    if filename.find('[') < 0:
-        filename += '[0]'
-
-    _fname, _extn = parseFilename(filename)
-
-    if not handle:
-        # Open image whether it is FITS or GEIS
-        _fimg = openImage(_fname)
-    else:
-        # Use what the user provides, after insuring
-        # that it is a proper PyFITS object.
-        if isinstance(handle, fits.HDUList):
-            _fimg = handle
-        else:
-            raise ValueError('Handle must be %r object!' % fits.HDUList)
-
-    # Address the correct header
-    _hdr = getExtn(_fimg, _extn).header
-
-    try:
-        value =  _hdr[keyword]
-    except KeyError:
-        _nextn = findKeywordExtn(_fimg, keyword)
-        try:
-            value = _fimg[_nextn].header[keyword]
-        except KeyError:
-            value = ''
-
-    if not handle:
-        _fimg.close()
-        del _fimg
-
-    if value == '':
-        if default is None:
-            value = None
-        else:
-            value = default
-
-    # NOTE:  Need to clean up the keyword.. Occasionally the keyword value
-    # goes right up to the "/" FITS delimiter, and iraf.keypar is incapable
-    # of realizing this, so it incorporates "/" along with the keyword value.
-    # For example, after running "pydrizzle" on the image "j8e601bkq_flt.fits",
-    # the CD keywords look like this:
-    #
-    #   CD1_1   = 9.221627430999639E-06/ partial of first axis coordinate w.r.t. x
-    #   CD1_2   = -1.0346992614799E-05 / partial of first axis coordinate w.r.t. y
-    #
-    # so for CD1_1, iraf.keypar returns:
-    #       "9.221627430999639E-06/"
-    #
-    # So, the following piece of code CHECKS for this and FIXES the string,
-    # very simply by removing the last character if it is a "/".
-    # This fix courtesy of Anton Koekemoer, 2002.
-    elif isinstance(value, string_types):
-        if value[-1:] == '/':
-            value = value[:-1]
-
-    return value
-
-
-def getHeader(filename, handle=None):
-    """
-    Return a copy of the PRIMARY header, along with any group/extension header
-    for this filename specification.
-    """
-
-    _fname, _extn = parseFilename(filename)
-    # Allow the user to provide an already opened PyFITS object
-    # to derive the header from...
-    #
-    if not handle:
-        # Open image whether it is FITS or GEIS
-        _fimg = openImage(_fname, mode='readonly')
-    else:
-        # Use what the user provides, after insuring
-        # that it is a proper PyFITS object.
-        if isinstance(handle, fits.HDUList):
-            _fimg = handle
-        else:
-            raise ValueError('Handle must be a %r object!' % fits.HDUList)
-
-    _hdr = _fimg['PRIMARY'].header.copy()
-
-    # if the data is not in the primary array delete NAXIS
-    # so that the correct value is read from the extension header
-    if _hdr['NAXIS'] == 0:
-        del _hdr['NAXIS']
-
-    if not (_extn is None or (_extn.isdigit() and int(_extn) == 0)):
-        # Append correct extension/chip/group header to PRIMARY...
-        #for _card in getExtn(_fimg,_extn).header.ascard:
-            #_hdr.ascard.append(_card)
-        for _card in getExtn(_fimg, _extn).header.cards:
-            _hdr.append(_card)
-    if not handle:
-        # Close file handle now...
-        _fimg.close()
-        del _fimg
-
-    return _hdr
-
-
-def updateKeyword(filename, key, value,show=yes):
-    """Add/update keyword to header with given value."""
-
-    _fname, _extn = parseFilename(filename)
-
-    # Open image whether it is FITS or GEIS
-    _fimg = openImage(_fname, mode='update')
-
-    # Address the correct header
-    _hdr = getExtn(_fimg, _extn).header
-
-    # Assign a new value or add new keyword here.
-    try:
-        _hdr[key] = value
-    except KeyError:
-        if show:
-            print('Adding new keyword ', key, '=', value)
-        _hdr[key] = value
-
-    # Close image
-    _fimg.close()
-    del _fimg
-
-
-def buildFITSName(geisname):
-    """Build a new FITS filename for a GEIS input image."""
-
-    # User wants to make a FITS copy and update it...
-    _indx = geisname.rfind('.')
-    _fitsname = geisname[:_indx] + '_' + geisname[_indx + 1:-1] + 'h.fits'
-
-    return _fitsname
-
-
-def openImage(filename, mode='readonly', memmap=0, writefits=True,
-              clobber=True, fitsname=None):
-    """
-    Opens file and returns PyFITS object.  Works on both FITS and GEIS
-    formatted images.
-
-    Notes
-    -----
-    If a GEIS or waivered FITS image is used as input, it will convert it to a
-    MEF object and only if ``writefits = True`` will write it out to a file. If
-    ``fitsname = None``, the name used to write out the new MEF file will be
-    created using `buildFITSName`.
-
-    Parameters
-    ----------
-    filename: str
-        name of input file
-    mode: str
-        mode for opening file based on PyFITS `mode` parameter values
-    memmap: int
-        switch for using memory mapping, 0 for no, 1 for yes
-    writefits: bool
-        if True, will write out GEIS as multi-extension FITS
-        and return handle to that opened GEIS-derived MEF file
-    clobber: bool
-        overwrite previously written out GEIS-derived MEF file
-    fitsname: str
-        name to use for GEIS-derived MEF file,
-        if None and writefits==True, will use 'buildFITSName()' to generate one
-    """
-    from stwcs import updatewcs
-
-    # Insure that the filename is always fully expanded
-    # This will not affect filenames without paths or
-    # filenames specified with extensions.
-    filename = osfn(filename)
-
-    # Extract the rootname and extension specification
-    # from input image name
-    _fname, _iextn = parseFilename(filename)
-
-    # Check whether we have a FITS file and if so what type
-    isfits, fitstype = isFits(_fname)
-
-    if isfits:
-        if fitstype != 'waiver':
-            # Open the FITS file
-            fimg = fits.open(_fname, mode=mode, memmap=memmap)
-            return fimg
-        else:
-            fimg = convertwaiveredfits.convertwaiveredfits(_fname)
-
-            #check for the existence of a data quality file
-            _dqname = buildNewRootname(_fname, extn='_c1f.fits')
-            dqexists = os.path.exists(_dqname)
-            if dqexists:
-                try:
-                    dqfile = convertwaiveredfits.convertwaiveredfits(_dqname)
-                    dqfitsname = buildNewRootname(_dqname, extn='_c1h.fits')
-                except:
-                    print("Could not read data quality file %s" % _dqname)
-            if writefits:
-                # User wants to make a FITS copy and update it
-                # using the filename they have provided
-                if fitsname is None:
-                    rname = buildNewRootname(_fname)
-                    fitsname = buildNewRootname(rname, extn='_c0h.fits')
-
-                # Write out GEIS image as multi-extension FITS.
-                fexists = os.path.exists(fitsname)
-                if (fexists and clobber) or not fexists:
-                    print('Writing out WAIVERED as MEF to ', fitsname)
-                    fimg.writeto(fitsname, clobber=clobber)
-                    if dqexists:
-                        print('Writing out WAIVERED as MEF to ', dqfitsname)
-                        dqfile.writeto(dqfitsname, clobber=clobber)
-                # Now close input GEIS image, and open writable
-                # handle to output FITS image instead...
-                fimg.close()
-                del fimg
-                # Image re-written as MEF, now it needs its WCS updated
-                updatewcs.updatewcs(fitsname)
-
-                fimg = fits.open(fitsname, mode=mode, memmap=memmap)
-
-        # Return handle for use by user
-        return fimg
-    else:
-        # Input was specified as a GEIS image, but no FITS copy
-        # exists.  Read it in with 'readgeis' and make a copy
-        # then open the FITS copy...
-        try:
-            # Open as a GEIS image for reading only
-            fimg = readgeis.readgeis(_fname)
-        except:
-            raise IOError("Could not open GEIS input: %s" % _fname)
-
-        #check for the existence of a data quality file
-        _dqname = buildNewRootname(_fname, extn='.c1h')
-        dqexists = os.path.exists(_dqname)
-        if dqexists:
-            try:
-                dqfile = readgeis.readgeis(_dqname)
-                dqfitsname = buildFITSName(_dqname)
-            except:
-                print("Could not read data quality file %s" % _dqname)
-
-        # Check to see if user wanted to update GEIS header.
-        # or write out a multi-extension FITS file and return a handle to it
-        if writefits:
-                # User wants to make a FITS copy and update it
-                # using the filename they have provided
-            if fitsname is None:
-                fitsname = buildFITSName(_fname)
-
-            # Write out GEIS image as multi-extension FITS.
-            fexists = os.path.exists(fitsname)
-            if (fexists and clobber) or not fexists:
-                    print('Writing out GEIS as MEF to ', fitsname)
-                    fimg.writeto(fitsname, clobber=clobber)
-                    if dqexists:
-                        print('Writing out GEIS as MEF to ', dqfitsname)
-                        dqfile.writeto(dqfitsname, clobber=clobber)
-            # Now close input GEIS image, and open writable
-            # handle to output FITS image instead...
-            fimg.close()
-            del fimg
-            # Image re-written as MEF, now it needs its WCS updated
-            updatewcs.updatewcs(fitsname)
-
-            fimg = fits.open(fitsname, mode=mode, memmap=memmap)
-
-        # Return handle for use by user
-        return fimg
-
-
-def parseFilename(filename):
-    """
-    Parse out filename from any specified extensions.
-
-    Returns rootname and string version of extension name.
-    """
-
-    # Parse out any extension specified in filename
-    _indx = filename.find('[')
-    if _indx > 0:
-        # Read extension name provided
-        _fname = filename[:_indx]
-        _extn = filename[_indx + 1:-1]
-    else:
-        _fname = filename
-        _extn = None
-
-    return _fname, _extn
-
-
-def parseExtn(extn=None):
-    """
-    Parse a string representing a qualified fits extension name as in the
-    output of `parseFilename` and return a tuple ``(str(extname),
-    int(extver))``, which can be passed to `astropy.io.fits` functions using
-    the 'ext' kw.
-
-    Default return is the first extension in a fits file.
-
-    Examples
-    --------
-
-    ::
-
-        >>> parseExtn('sci, 2')
-        ('sci', 2)
-        >>> parseExtn('2')
-        ('', 2)
-        >>> parseExtn('sci')
-        ('sci', 1)
-
-    """
-
-    if not extn:
-        return ('', 0)
-
-    try:
-        lext = extn.split(',')
-    except:
-        return ('', 1)
-
-    if len(lext) == 1 and lext[0].isdigit():
-        return ("", int(lext[0]))
-    elif len(lext) == 2:
-        return (lext[0], int(lext[1]))
-    else:
-        return (lext[0], 1)
-
-
-def countExtn(fimg, extname='SCI'):
-    """
-    Return the number of 'extname' extensions, defaulting to counting the
-    number of SCI extensions.
-    """
-
-    closefits = False
-    if isinstance(fimg, string_types):
-        fimg = fits.open(fimg)
-        closefits = True
-
-    n = 0
-    for e in fimg:
-        if 'extname' in e.header and e.header['extname'] == extname:
-            n += 1
-
-    if closefits:
-        fimg.close()
-
-    return n
-
-
-def getExtn(fimg, extn=None):
-    """
-    Returns the PyFITS extension corresponding to extension specified in
-    filename.
-
-    Defaults to returning the first extension with data or the primary
-    extension, if none have data.  If a non-existent extension has been
-    specified, it raises a `KeyError` exception.
-    """
-
-    # If no extension is provided, search for first extension
-    # in FITS file with data associated with it.
-    if extn is None:
-        # Set up default to point to PRIMARY extension.
-        _extn = fimg[0]
-        # then look for first extension with data.
-        for _e in fimg:
-            if _e.data is not None:
-                _extn = _e
-                break
-    else:
-        # An extension was provided, so parse it out...
-        if repr(extn).find(',') > 1:
-            if isinstance(extn, tuple):
-                # We have a tuple possibly created by parseExtn(), so
-                # turn it into a list for easier manipulation.
-                _extns = list(extn)
-                if '' in _extns:
-                    _extns.remove('')
-            else:
-                _extns = extn.split(',')
-            # Two values given for extension:
-            #    for example, 'sci,1' or 'dq,1'
-            try:
-                _extn = fimg[_extns[0], int(_extns[1])]
-            except KeyError:
-                _extn = None
-                for e in fimg:
-                    hdr = e.header
-                    if ('extname' in hdr and
-                            hdr['extname'].lower() == _extns[0].lower() and
-                            hdr['extver'] == int(_extns[1])):
-                        _extn = e
-                        break
-        elif repr(extn).find('/') > 1:
-            # We are working with GEIS group syntax
-            _indx = str(extn[:extn.find('/')])
-            _extn = fimg[int(_indx)]
-        elif isinstance(extn, string_types):
-            if extn.strip() == '':
-                _extn = None  # force error since invalid name was provided
-            # Only one extension value specified...
-            elif extn.isdigit():
-                # We only have an extension number specified as a string...
-                _nextn = int(extn)
-            else:
-                # We only have EXTNAME specified...
-                _nextn = None
-                if extn.lower() == 'primary':
-                    _nextn = 0
-                else:
-                    i = 0
-                    for hdu in fimg:
-                        isimg = 'extname' in hdu.header
-                        hdr = hdu.header
-                        if isimg and extn.lower() == hdr['extname'].lower():
-                            _nextn = i
-                            break
-                        i += 1
-
-            if _nextn < len(fimg):
-                _extn = fimg[_nextn]
-            else:
-                _extn = None
-
-        else:
-            # Only integer extension number given, or default of 0 is used.
-            if int(extn) < len(fimg):
-                _extn = fimg[int(extn)]
-            else:
-                _extn = None
-
-    if _extn is None:
-        raise KeyError('Extension %s not found' % extn)
-
-    return _extn
-
-
-#Revision History:
-#    Nov 2001: findFile upgraded to accept full filenames with paths,
-#               instead of working only on files from current directory. WJH
-#
-# Base function for
-#   with optional path.
-def findFile(input):
-    """Search a directory for full filename with optional path."""
-
-    # If no input name is provided, default to returning 'no'(FALSE)
-    if not input:
-        return no
-
-    # We use 'osfn' here to insure that any IRAF variables are
-    # expanded out before splitting out the path...
-    _fdir, _fname = os.path.split(osfn(input))
-
-    if _fdir == '':
-        _fdir = os.curdir
-
-    try:
-        flist = os.listdir(_fdir)
-    except OSError:
-        # handle when requested file in on a disconnect network store
-        return no
-
-    _root, _extn = parseFilename(_fname)
-
-    found = no
-    for name in flist:
-        if name == _root:
-            # Check to see if given extension, if any, exists
-            if _extn is None:
-                found = yes
-                continue
-            else:
-                _split = _extn.split(',')
-                _extnum = None
-                _extver = None
-                if  _split[0].isdigit():
-                    _extname = None
-                    _extnum = int(_split[0])
-                else:
-                    _extname = _split[0]
-                    if len(_split) > 1:
-                        _extver = int(_split[1])
-                    else:
-                        _extver = 1
-                f = openImage(_root)
-                f.close()
-                if _extnum is not None:
-                    if _extnum < len(f):
-                        found = yes
-                        del f
-                        continue
-                    else:
-                        del f
-                else:
-                    _fext = findExtname(f, _extname, extver=_extver)
-                    if _fext is not None:
-                        found = yes
-                        del f
-                        continue
-    return found
-
-
-def checkFileExists(filename, directory=None):
-    """
-    Checks to see if file specified exists in current or specified directory.
-
-    Default is current directory.  Returns 1 if it exists, 0 if not found.
-    """
-
-    if directory is not None:
-        fname = os.path.join(directory,filename)
-    else:
-        fname = filename
-    _exist = os.path.exists(fname)
-    return _exist
-
-
-def copyFile(input, output, replace=None):
-    """Copy a file whole from input to output."""
-
-    _found = findFile(output)
-    if not _found or (_found and replace):
-        shutil.copy2(input, output)
-
-
-def _remove(file):
-    # Check to see if file exists.  If not, return immediately.
-    if not findFile(file):
-        return
-
-    if file.find('.fits') > 0:
-        try:
-            os.remove(file)
-        except (IOError, OSError):
-            pass
-    elif file.find('.imh') > 0:
-        # Delete both .imh and .pix files
-        os.remove(file)
-        os.remove(file[:-3] + 'pix')
-    else:
-        # If we have a GEIS image that has separate header
-        # and pixel files which need to be removed.
-        # Assumption: filenames end in '.??h' and '.??d'
-        #
-        os.remove(file)
-        # At this point, we may be deleting a non-image
-        # file, so only verify whether a GEIS hhd or similar
-        # file exists before trying to delete it.
-        if findFile(file[:-1] + 'd'):
-            os.remove(file[:-1] + 'd')
-
-
-def removeFile(inlist):
-    """
-    Utility function for deleting a list of files or a single file.
-
-    This function will automatically delete both files of a GEIS image, just
-    like 'iraf.imdelete'.
-    """
-
-    if not isinstance(inlist, string_types):
-    # We do have a list, so delete all filenames in list.
-        # Treat like a list of full filenames
-        _ldir = os.listdir('.')
-        for f in inlist:
-        # Now, check to see if there are wildcards which need to be expanded
-            if f.find('*') >= 0 or f.find('?') >= 0:
-                # We have a wild card specification
-                regpatt = f.replace('?', '.?')
-                regpatt = regpatt.replace('*', '.*')
-                _reg = re.compile(regpatt)
-                for file in _ldir:
-                    if _reg.match(file):
-                        _remove(file)
-            else:
-                # This is just a single filename
-                _remove(f)
-    else:
-        # It must be a string then, so treat as a single filename
-        _remove(inlist)
-
-
-def findKeywordExtn(ft, keyword, value=None):
-    """
-    This function will return the index of the extension in a multi-extension
-    FITS file which contains the desired keyword with the given value.
-    """
-
-    i = 0
-    extnum = -1
-    # Search through all the extensions in the FITS object
-    for chip in ft:
-        hdr = chip.header
-        # Check to make sure the extension has the given keyword
-        if keyword in hdr:
-            if value is not None:
-                # If it does, then does the value match the desired value
-                # MUST use 'str.strip' to match against any input string!
-                if hdr[keyword].strip() == value:
-                    extnum = i
-                    break
-            else:
-                extnum = i
-                break
-        i += 1
-    # Return the index of the extension which contained the
-    # desired EXTNAME value.
-    return extnum
-
-
-def findExtname(fimg, extname, extver=None):
-    """
-    Returns the list number of the extension corresponding to EXTNAME given.
-    """
-
-    i = 0
-    extnum = None
-    for chip in fimg:
-        hdr = chip.header
-        if 'EXTNAME' in hdr:
-            if hdr['EXTNAME'].strip() == extname.upper():
-                if extver == None or hdr['EXTVER'] == extver:
-                    extnum = i
-                    break
-        i += 1
-    return extnum
-
-
-def rAsciiLine(ifile):
-    """Returns the next non-blank line in an ASCII file."""
-
-    _line = ifile.readline().strip()
-    while len(_line) == 0:
-        _line = ifile.readline().strip()
-    return _line
-
-
-#######################################################
-#
-#
-#
-#  IRAF environment variable interpretation routines
-#      extracted from PyRAF's 'iraffunction.py'
-#
-#  These allow IRAF variables to be interpreted without
-#      having to install/use IRAF or PyRAF.
-#
-#
-#######################################################
-# -----------------------------------------------------
-# private dictionaries:
-#
-# _varDict: dictionary of all IRAF cl variables (defined with set name=value)
-# _tasks: all IRAF tasks (defined with task name=value)
-# _mmtasks: minimum-match dictionary for tasks
-# _pkgs: min-match dictionary for all packages (defined with
-#                       task name.pkg=value)
-# _loaded: loaded packages
-# -----------------------------------------------------
-
-# Will want to enhance this to allow a "bye" function that unloads packages.
-# That might be done using a stack of definitions for each task.
-
-_varDict = {}
-
-
-# module variables that don't get saved (they get
-# initialized when this module is imported)
-
-unsavedVars = [
-    'EOF',
-    '_NullFile',
-    '_NullPath',
-    '__builtins__',
-    '__doc__',
-    '__file__',
-    '__name__',
-    '__re_var_match',
-    '__re_var_match2',
-    '__re_var_paren',
-    '_badFormats',
-    '_clearString',
-    '_exitCommands',
-    '_unsavedVarsDict',
-    '_radixDigits',
-    '_re_taskname',
-    '_sttyArgs',
-    'no',
-    'yes',
-    'userWorkingHome'
-]
-
-_unsavedVarsDict = {}
-for v in unsavedVars:
-    _unsavedVarsDict[v] = 1
-del unsavedVars, v
-
-
-# -----------------------------------------------------
-# Miscellaneous access routines:
-# getVarList: Get list of names of all defined IRAF variables
-# -----------------------------------------------------
-
-def getVarDict():
-    """Returns dictionary all IRAF variables."""
-
-    return _varDict
-
-
-def getVarList():
-    """Returns list of names of all IRAF variables."""
-
-    return list(_varDict.keys())
-
-
-# -----------------------------------------------------
-# listVars:
-# list contents of the dictionaries
-# -----------------------------------------------------
-
-def listVars(prefix="", equals="\t= ", **kw):
-    """List IRAF variables."""
-
-    keylist = getVarList()
-    if len(keylist) == 0:
-        print('No IRAF variables defined')
-    else:
-        keylist.sort()
-        for word in keylist:
-            print("%s%s%s%s" % (prefix, word, equals, envget(word)))
-
-
-def untranslateName(s):
-    """Undo Python conversion of CL parameter or variable name."""
-
-    s = s.replace('DOT', '.')
-    s = s.replace('DOLLAR', '$')
-    # delete 'PY' at start of name components
-    if s[:2] == 'PY': s = s[2:]
-    s = s.replace('.PY', '.')
-    return s
-
-
-def envget(var, default=None):
-    """Get value of IRAF or OS environment variable."""
-
-    if 'pyraf' in sys.modules:
-        #ONLY if pyraf is already loaded, import iraf into the namespace
-        from pyraf import iraf
-    else:
-        # else set iraf to None so it knows to not use iraf's environment
-        iraf = None
-
-    try:
-        if iraf:
-            return iraf.envget(var)
-        else:
-            raise KeyError
-    except KeyError:
-        try:
-            return _varDict[var]
-        except KeyError:
-            try:
-                return os.environ[var]
-            except KeyError:
-                if default is not None:
-                    return default
-                elif var == 'TERM':
-                    # Return a default value for TERM
-                    # TERM gets caught as it is found in the default
-                    # login.cl file setup by IRAF.
-                    print("Using default TERM value for session.")
-                    return 'xterm'
-                else:
-                    raise KeyError("Undefined environment variable `%s'" % var)
-
-
-def osfn(filename):
-    """Convert IRAF virtual path name to OS pathname."""
-
-    # Try to emulate the CL version closely:
-    #
-    # - expands IRAF virtual file names
-    # - strips blanks around path components
-    # - if no slashes or relative paths, return relative pathname
-    # - otherwise return absolute pathname
-    if filename is None:
-        return filename
-
-    ename = Expand(filename)
-    dlist = [part.strip() for part in ename.split(os.sep)]
-    if len(dlist) == 1 and dlist[0] not in [os.curdir, os.pardir]:
-        return dlist[0]
-
-    # I use str.join instead of os.path.join here because
-    # os.path.join("","") returns "" instead of "/"
-
-    epath = os.sep.join(dlist)
-    fname = os.path.abspath(epath)
-    # append '/' if relative directory was at end or filename ends with '/'
-    if fname[-1] != os.sep and dlist[-1] in ['', os.curdir, os.pardir]:
-        fname = fname + os.sep
-    return fname
-
-
-def defvar(varname):
-    """Returns true if CL variable is defined."""
-
-    if 'pyraf' in sys.modules:
-        #ONLY if pyraf is already loaded, import iraf into the namespace
-        from pyraf import iraf
-    else:
-        # else set iraf to None so it knows to not use iraf's environment
-        iraf = None
-
-    if iraf:
-        _irafdef = iraf.envget(varname)
-    else:
-        _irafdef = 0
-    return varname in _varDict or varname in os.environ or _irafdef
-
-
-# -----------------------------------------------------
-# IRAF utility procedures
-# -----------------------------------------------------
-
-# these have extra keywords (redirection, _save) because they can
-# be called as tasks
-
-def set(*args, **kw):
-    """Set IRAF environment variables."""
-
-    if len(args) == 0:
-        if len(kw) != 0:
-            # normal case is only keyword,value pairs
-            for keyword, value in kw.items():
-                keyword = untranslateName(keyword)
-                svalue = str(value)
-                _varDict[keyword] = svalue
-        else:
-            # set with no arguments lists all variables (using same format
-            # as IRAF)
-            listVars(prefix="    ", equals="=")
-    else:
-        # The only other case allowed is the peculiar syntax
-        # 'set @filename', which only gets used in the zzsetenv.def file,
-        # where it reads extern.pkg.  That file also gets read (in full cl
-        # mode) by clpackage.cl.  I get errors if I read this during
-        # zzsetenv.def, so just ignore it here...
-        #
-        # Flag any other syntax as an error.
-        if (len(args) != 1 or len(kw) != 0 or
-                not isinstance(args[0], string_types) or args[0][:1] != '@'):
-            raise SyntaxError("set requires name=value pairs")
-
-# currently do not distinguish set from reset
-# this will change when keep/bye/unloading are implemented
-
-reset = set
-
-def show(*args, **kw):
-    """Print value of IRAF or OS environment variables."""
-
-    if len(kw):
-        raise TypeError('unexpected keyword argument: %r' % list(kw))
-
-    if args:
-        for arg in args:
-            print(envget(arg))
-    else:
-        # print them all
-        listVars(prefix="    ", equals="=")
-
-
-def unset(*args, **kw):
-    """
-    Unset IRAF environment variables.
-
-    This is not a standard IRAF task, but it is obviously useful.  It makes the
-    resulting variables undefined.  It silently ignores variables that are not
-    defined.  It does not change the os environment variables.
-    """
-
-    if len(kw) != 0:
-        raise SyntaxError("unset requires a list of variable names")
-
-    for arg in args:
-        if arg in _varDict:
-            del _varDict[arg]
-
-
-def time(**kw):
-    """Print current time and date."""
-
-    print(_time.ctime(_time.time()))
-
-
-# -----------------------------------------------------
-# Expand: Expand a string with embedded IRAF variables
-# (IRAF virtual filename)
-# -----------------------------------------------------
-
-# Input string is in format 'name$rest' or 'name$str(name2)' where
-# name and name2 are defined in the _varDict dictionary.  The
-# name2 string may have embedded dollar signs, which are ignored.
-# There may be multiple embedded parenthesized variable names.
-#
-# Returns string with IRAF variable name expanded to full host name.
-# Input may also be a comma-separated list of strings to Expand,
-# in which case an expanded comma-separated list is returned.
-
-# search for leading string without embedded '$'
-__re_var_match = re.compile(r'(?P<varname>[^$]*)\$')
-__re_var_match2 = re.compile(r'\$(?P<varname>\w*)')
-
-# search for string embedded in parentheses
-__re_var_paren = re.compile(r'\((?P<varname>[^()]*)\)')
-
-
-def Expand(instring, noerror=0):
-    """
-    Expand a string with embedded IRAF variables (IRAF virtual filename).
-
-    Allows comma-separated lists.  Also uses os.path.expanduser to replace '~'
-    symbols.
-
-    Set the noerror flag to silently replace undefined variables with just the
-    variable name or null (so Expand('abc$def') = 'abcdef' and
-    Expand('(abc)def') = 'def').  This is the IRAF behavior, though it is
-    confusing and hides errors.
-    """
-
-    # call _expand1 for each entry in comma-separated list
-    wordlist = instring.split(",")
-    outlist = []
-    for word in wordlist:
-        outlist.append(os.path.expanduser(_expand1(word, noerror=noerror)))
-    return ",".join(outlist)
-
-
-def _expand1(instring, noerror):
-    """Expand a string with embedded IRAF variables (IRAF virtual filename)."""
-
-    # first expand names in parentheses
-    # note this works on nested names too, expanding from the
-    # inside out (just like IRAF)
-    mm = __re_var_paren.search(instring)
-    while mm is not None:
-        # remove embedded dollar signs from name
-        varname = mm.group('varname').replace('$','')
-        if defvar(varname):
-            varname = envget(varname)
-        elif noerror:
-            varname = ""
-        else:
-            raise ValueError("Undefined variable `%s' in string `%s'" %
-                             (varname, instring))
-
-        instring = instring[:mm.start()] + varname + instring[mm.end():]
-        mm = __re_var_paren.search(instring)
-    # now expand variable name at start of string
-    mm = __re_var_match.match(instring)
-    if mm is None:
-        return instring
-    varname = mm.group('varname')
-    if varname in ['', ' ', None]:
-        mm = __re_var_match2.match(instring)
-        varname = mm.group('varname')
-
-    if defvar(varname):
-        # recursively expand string after substitution
-        return _expand1(envget(varname) + instring[mm.end():], noerror)
-    elif noerror:
-        return _expand1(varname + instring[mm.end():], noerror)
-    else:
-        raise ValueError("Undefined variable `%s' in string `%s'" %
-                         (varname, instring))
-
-
-def access(filename):
-    """Returns true if file exists."""
-
-    return os.path.exists(Expand(filename))
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/fitsdiff.py b/required_pkgs/stsci.tools/lib/stsci/tools/fitsdiff.py
deleted file mode 100755
index ec0fdbb..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/fitsdiff.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-
-# $Id: fitsdiff.py 38142 2015-03-06 13:42:21Z bsimon $
-
-"""fitsdiff is now a part of PyFITS--the fitsdiff in PyFITS replaces the
-fitsdiff that used to be in the module.
-
-Now this module just provides a wrapper around astropy.io.fits.diff for backwards
-compatibility with the old interface in case anyone uses it.
-"""
-
-import os
-import sys
-PY3K = sys.version_info[0] > 2
-if PY3K:
-    string_types = str
-else:
-    string_types = basestring
-
-
-from astropy.io.fits.diff import FITSDiff
-from astropy.io.fits.scripts.fitsdiff import log, main
-
-def fitsdiff(input1, input2, comment_excl_list='', value_excl_list='',
-             field_excl_list='', maxdiff=10, delta=0.0, neglect_blanks=True,
-             output=None):
-
-    if isinstance(comment_excl_list, string_types):
-        comment_excl_list = list_parse(comment_excl_list)
-
-    if isinstance(value_excl_list, string_types):
-        value_excl_list = list_parse(value_excl_list)
-
-    if isinstance(field_excl_list, string_types):
-        field_excl_list = list_parse(field_excl_list)
-
-    diff = FITSDiff(input1, input2, ignore_keywords=value_excl_list,
-                    ignore_comments=comment_excl_list,
-                    ignore_fields=field_excl_list, numdiffs=maxdiff,
-                    tolerance=delta, ignore_blanks=neglect_blanks)
-
-    if output is None:
-        output = sys.stdout
-
-    diff.report(output)
-
-    return diff.identical
-
-
-def list_parse(name_list):
-    """Parse a comma-separated list of values, or a filename (starting with @)
-    containing a list value on each line.
-    """
-
-    if name_list and name_list[0] == '@':
-        value = name_list[1:]
-        if not os.path.exists(value):
-            log.warning('The file %s does not exist' % value)
-            return
-        try:
-            return [v.strip() for v in open(value, 'r').readlines()]
-        except IOError as e:
-            log.warning('reading %s failed: %s; ignoring this file' %
-                        (value, e))
-    else:
-        return [v.strip() for v in name_list.split(',')]
-
-
-if __name__ == "__main__":
-    sys.exit(main())
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/for2to3.py b/required_pkgs/stsci.tools/lib/stsci/tools/for2to3.py
deleted file mode 100644
index dfce7ee..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/for2to3.py
+++ /dev/null
@@ -1,114 +0,0 @@
-""" This is a temporary module, used during (and for a while after) the
-transition to Python 3.  This code is planned to be kept in place until
-the least version of Python supported no longer requires it (and of course
-until all callers no longer need it).
-This code should run as-is in 2.x and also run unedited after 2to3 in 3.x.
-
-$Id: for2to3.py 38142 2015-03-06 13:42:21Z bsimon $
-"""
-
-from __future__ import division # confidence high
-import os, sys
-PY3K = sys.version_info[0] > 2
-
-
-def ndarr2str(arr, encoding='ascii'):
-    """ This is used to ensure that the return value of arr.tostring()
-    is actually a string.  This will prevent lots of if-checks in calling
-    code.  As of numpy v1.6.1 (in Python 3.2.3), the tostring() function
-    still returns type 'bytes', not 'str' as it advertises. """
-    # be fast, don't check - just assume 'arr' is a numpy array - the tostring
-    # call will fail anyway if not
-    retval = arr.tostring()
-    # would rather check "if isinstance(retval, bytes)", but support 2.5.
-    # could rm the if PY3K check, but it makes this faster on 2.x.
-    if PY3K and not isinstance(retval, str):
-        return retval.decode(encoding)
-    else: # is str
-        return retval
-
-
-def ndarr2bytes(arr, encoding='ascii'):
-    """ This is used to ensure that the return value of arr.tostring()
-    is actually a *bytes* array in PY3K.  See notes in ndarr2str above.  Even
-    though we consider it a bug that numpy's tostring() function returns
-    a bytes array in PY3K, there are actually many instances where that is what
-    we want - bytes, not unicode.  So we use this function in those
-    instances to ensure that when/if this numpy "bug" is "fixed", that
-    our calling code still gets bytes where it needs/expects them. """
-    # be fast, don't check - just assume 'arr' is a numpy array - the tostring
-    # call will fail anyway if not
-    retval = arr.tostring()
-    # would rather check "if not isinstance(retval, bytes)", but support 2.5.
-    if PY3K and isinstance(retval, str):
-        # Take note if this ever gets used.  If this ever occurs, it
-        # is likely wildly inefficient since numpy.tostring() is now
-        # returning unicode and numpy surely has a tobytes() func by now.
-        # If so, add a code path to call its tobytes() func at our start.
-        return retval.encode(encoding)
-    else: # is str==bytes in 2.x
-        return retval
-
-
-def tobytes(s, encoding='ascii'):
-    """ Convert string s to the 'bytes' type, in all Pythons, even
-    back before Python 2.6.  What 'str' means varies by PY3K or not.
-    In Pythons before 3.0, this is technically the same as the str type
-    in terms of the character data in memory. """
-    # NOTE: after we abandon 2.5, we might simply instead use "bytes(s)"
-    # NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b'
-    if PY3K:
-        if isinstance(s, bytes):
-            return s
-        else:
-            return s.encode(encoding)
-    else:
-        # for py2.6 on (before 3.0), bytes is same as str;  2.5 has no bytes
-        # but handle if unicode is passed
-        if isinstance(s, unicode):
-            return s.encode(encoding)
-        else:
-            return s
-
-def tostr(s, encoding='ascii'):
-    """ Convert string-like-thing s to the 'str' type, in all Pythons, even
-    back before Python 2.6.  What 'str' means varies by PY3K or not.
-    In Pythons before 3.0, str and bytes are the same type.
-    In Python 3+, this may require a decoding step. """
-    if PY3K:
-        if isinstance(s, str): # str == unicode in PY3K
-            return s
-        else: # s is type bytes
-            return s.decode(encoding)
-    else:
-        # for py2.6 on (before 3.0), bytes is same as str;  2.5 has no bytes
-        # but handle if unicode is passed
-        if isinstance(s, unicode):
-            return s.encode(encoding)
-        else:
-            return s
-
-
-try:
-    BNULLSTR = tobytes('')   # after dropping 2.5, change to: b''
-    BNEWLINE = tobytes('\n') # after dropping 2.5, change to: b'\n'
-except:
-    BNULLSTR = ''
-    BNEWLINE = '\n'
-
-
-def bytes_read(fd, sz):
-   """ Perform an os.read in a way that can handle both Python2 and Python3
-   IO.  Assume we are always piping only ASCII characters (since that is all
-   we have ever done with IRAF).  Either way, return the data as bytes.
-   """
-#  return tobytes(os.read(fd, sz))
-   return os.read(fd, sz) # already returns str in Py2.x and bytes in PY3K
-
-
-def bytes_write(fd, bufstr):
-   """ Perform an os.write in a way that can handle both Python2 and Python3
-   IO.  Assume we are always piping only ASCII characters (since that is all
-   we have ever done with IRAF).  Either way, write the binary data to fd.
-   """
-   return os.write(fd, tobytes(bufstr))
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/gfit.py b/required_pkgs/stsci.tools/lib/stsci/tools/gfit.py
deleted file mode 100644
index b74db03..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/gfit.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""
-Return the gaussian fit of a 1D array.
-
-Uses mpfit.py - a python implementation of the Levenberg-Marquardt
-least-squares minimization, based on MINPACK-1. See nmpfit.py for
-the history of this module (fortran -> idl -> python).
-nmpfit.py is a version of mpfit.py which uses numarray.
-
- at author: Nadia Dencheva
- at version: '1.0 (2007-02-20)'
-
-"""
-from __future__ import division, print_function # confidence high
-
-__version__ = '1.0'          #Release version number only
-__vdate__ = '2007-02-20'     #Date of this version
-
-import numerixenv
-numerixenv.check()
-
-import nmpfit
-import numpy as N
-from numpy import random
-
-def _gauss_funct(p, fjac = None, x = None, y=None, err=None,
-weights=None):
-
-    """
-    Defines the gaussian function to be used as the model.
-
-    """
-
-    if p[2] != 0.0:
-        Z = (x - p[1]) / p[2]
-        model = p[0]*N.e ** (-Z**2 / 2.0)
-    else:
-        model = N.zeros(N.size(x))
-
-
-    status = 0
-    if weights != None:
-        if err != None:
-            print("Warning: Ignoring errors and using weights.\n")
-        return [status, (y - model) * weights]
-    elif err != None:
-        return [status, (y - model) / err]
-    else:
-        return [status, y-model]
-
-def test_gaussfit():
-    x=N.arange(10,20, 0.1)
-    #x1=N.arange(0,10,0.1)
-    #y1=5*N.e**(-(5-x1)**2/4)
-    n=random.randn(100)
-    y= 10*N.e**(-(15-x)**2/4) +n*3
-    #x=N.arange(100, typecode=N.Int)
-    #y=n.zeros(10, typecode=n.Float)
-    #y= random.rand(100)
-    #err = N.zeros(100)
-    #return gaussfit(x,y, maxiter=20) #, x,y, n
-    return gfit1d(y,x, maxiter=20)
-
-def gfit1d(y, x=None, err = None, weights=None, par=None, parinfo=None,
-maxiter=200, quiet=0):
-    """
-    Return the gaussian fit as an object.
-
-    Parameters
-    ----------
-    y:   1D Numarray array
-        The data to be fitted
-    x:   1D Numarray array
-        (optional) The x values of the y array. x and y must
-        have the same shape.
-    err: 1D Numarray array
-        (optional) 1D array with measurement errors, must be
-        the same shape as y
-    weights: 1D Numarray array
-        (optiional) 1D array with weights, must be the same
-        shape as y
-    par:  List
-        (optional) Starting values for the parameters to be fitted
-    parinfo: Dictionary of lists
-        (optional) provides additional information for the
-        parameters. For a detailed description see nmpfit.py.
-        Parinfo can be used to limit parameters or keep
-        some of them fixed.
-    maxiter: number
-        Maximum number of iterations to perform
-        Default: 200
-    quiet: number
-        if set to 1, nmpfit does not print to the screen
-        Default: 0
-
-    Examples
-    --------
-    >>> x=N.arange(10,20, 0.1)
-    >>> y= 10*N.e**(-(x-15)**2/4)
-    >>> print gfit1d(y,x=x, maxiter=20,quiet=1).params
-    [ 10.          15.           1.41421356]
-
-    """
-    if numerixenv.check_input(x) or numerixenv.check_input(y):
-        raise ValueError("Input is a NumArray array. This version of %s requires a Numpy array\n" % __name__)
-    
-    y = y.astype(N.float)
-    if weights != None:
-        weights = weights.astype(N.float)
-    if err != None:
-        err = err.astype(N.float)
-    if x == None and len(y.shape)==1 :
-        x = N.arange(len(y)).astype(N.float)
-    if x.shape != y.shape:
-        print("input arrays X and Y must be of equal shape.\n")
-        return
-
-
-    fa = {'x':x, 'y':y, 'err':err, 'weights':weights}
-
-    if par != None:
-        p = par
-    else:
-        ysigma = y.std()
-        ind = N.nonzero(y > ysigma)[0]
-        if len(ind) != 0:
-            xind = int(ind.mean())
-            p2 = x[xind]
-            p1 = y[xind]
-            p3 = 1.0
-        else:
-            ymax = y.max()
-            ymin = y.min()
-            ymean= y.mean()
-            if (ymax - ymean) > (abs(ymin - ymean)):
-                p1 = ymax
-            else: p1 = ymin
-            ind = (N.nonzero(y == p1))[0]
-            p2 = x.mean()
-            p3 = 1.
-
-
-        p = [p1, p2, p3]
-    m=nmpfit.mpfit(_gauss_funct, p,parinfo = parinfo, functkw=fa,
-maxiter=maxiter, quiet=quiet)
-    if (m.status <=0): print('error message = ', m.errmsg)
-    return m
-
-
-def plot_fit(y, mfit, x=None):
-    if x == None:
-        x=N.arange(len(y))
-    else:
-        x = x
-    p = mfit.params
-    #y = gauss_funct(p, y)
-    yy = p[0] + N.e**(-0.5*(x-p[1])**2/p[2]**2)
-    try:
-        import pylab
-    except ImportError:
-        print("Matplotlib is not available.\n")
-        return
-    pylab.plot(x,yy)
-
-def test():
-    import doctest
-    import gfit
-    return doctest.testmod(gfit)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/imageiter.py b/required_pkgs/stsci.tools/lib/stsci/tools/imageiter.py
deleted file mode 100644
index 155d0ee..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/imageiter.py
+++ /dev/null
@@ -1,48 +0,0 @@
-"""
-
-License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-
-"""
-from __future__ import absolute_import, division, generators # confidence medium
-
-from . import numerixenv
-numerixenv.check()
-
-import numpy as N
-
-BUFSIZE = 1024*1000   # 1Mb cache size
-
-__version__ = '0.2'
-
-
-def ImageIter(imgarr,bufsize=None,overlap=0,copy=0):
-
-    imgarr = N.asarray(imgarr)
-    
-    if bufsize == None: bufsize = BUFSIZE
-
-    if len(imgarr.shape) == 1:
-        if copy:
-            yield imgarr.copy()
-        else:
-            yield imgarr
-    else:
-        nrows = int(bufsize / (imgarr.itemsize * imgarr.shape[1]))    
-        niter = int(imgarr.shape[0] / nrows) * nrows
-    
-        if copy:
-                # Create a cache that will contain a copy of the input
-                    # not just a view...
-                    _cache = N.zeros((nrows,imgarr.shape[1]),dtype=imgarr.dtype.char)
-
-        for pix in range(0,niter+1,nrows):
-                    if copy:
-                            _cache = imgarr[pix:pix+nrows].copy()
-                            yield _cache
-                    else:
-                            yield imgarr[pix:pix+nrows]
-                    if copy:
-                            _cache *= 0
-
-                    pix -= overlap  
-             
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/irafglob.py b/required_pkgs/stsci.tools/lib/stsci/tools/irafglob.py
deleted file mode 100644
index a537962..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/irafglob.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-"""
-from __future__ import absolute_import, division # confidence high
-import glob
-try:
-    from .fileutil import osfn # added to provide interpretation of environment variables
-except:
-    osfn = None
-__author__ = 'Paul Barrett'
-__version__ = '1.1'
-
-def irafglob(inlist, atfile=None):
-    """ Returns a list of filenames based on the type of IRAF input.
-
-    Handles lists, wild-card characters, and at-files.  For special
-    at-files, use the atfile keyword to process them.
-
-    This function is recursive, so IRAF lists can also contain at-files
-    and wild-card characters, e.g. `a.fits`, `@file.lst`, `*flt.fits`.
-    """
-
-    # Sanity check
-    if inlist == None or len(inlist) == 0:
-        return []
-
-    # Determine which form of input was provided:
-    if isinstance(inlist, list):
-        #  python list
-        flist = []
-        for f in inlist:
-            flist += irafglob(f)
-    elif ',' in inlist:
-        #  comma-separated string list
-        flist = []
-        for f in inlist.split(','):
-            f = f.strip()
-            flist += irafglob(f)
-    elif inlist[0] == '@':
-        #  file list
-        flist = []
-        for f in open(inlist[1:], 'r').readlines():
-            f = f.rstrip()
-            # hook for application specific atfiles.
-            if atfile:
-                f = atfile(f)
-            flist += irafglob(f)
-    else:
-        #  shell globbing
-        if osfn:
-            inlist = osfn(inlist)
-        flist = glob.glob(inlist)
-
-    return flist
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/irafglobals.py b/required_pkgs/stsci.tools/lib/stsci/tools/irafglobals.py
deleted file mode 100644
index 0eb8dae..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/irafglobals.py
+++ /dev/null
@@ -1,442 +0,0 @@
-"""module irafglobals.py -- widely used IRAF constants and objects
-
-NOTE!  This module does NOT require the installation of IRAF.  It's location
-in stsci.tools is safe because it is intended to remain free of such dependency.
-
-yes, no         Boolean values
-IrafError       Standard IRAF exception
-Verbose         Flag indicating verbosity level
-userIrafHome    User's IRAF home directory (./ or ~/iraf/)
-userWorkingHome User's working home directory (the directory
-                when this module gets imported.)
-EOF             End-of-file indicator object
-INDEF           Undefined object
-IrafTask        "Tag" class for IrafTask type.
-IrafPkg         "Tag" class for IrafPkg type
-
-This is defined so it is safe to say 'from irafglobals import *'
-
-The tag classes do nothing except allow checks of types via (e.g.)
-isinstance(o,IrafTask).  Including it here decouples the other classes
-from the module that actually implements IrafTask, greatly reducing the
-need for mutual imports of modules by one another.
-
-$Id: irafglobals.py 38909 2015-04-08 17:41:07Z bsimon $
-
-Taken from pyraf.irafglobals, originally signed "R. White, 2000 Jan 5"
-"""
-from __future__ import absolute_import, division
-
-import sys
-PY3K = sys.version_info[0] > 2
-
-if PY3K:
-    string_types = str
-    number_types = (int, float)
-else:
-    string_types = basestring
-    number_types = (int, long, float)
-
-import os
-from . import compmixin
-_os = os
-_compmixin = compmixin
-del os, compmixin
-
-class IrafError(Exception):
-    def __init__(self, msg, errno=-1, errmsg="", errtask=""):
-        Exception.__init__(self, msg)
-        self.errno = errno
-        self.errmsg = errmsg or msg
-        self.errtask = errtask
-
-# -----------------------------------------------------
-# Verbose: verbosity flag
-# -----------------------------------------------------
-
-# make Verbose an instance of a class so it can be imported
-# into other modules and changed by them
-
-class _VerboseClass(_compmixin.ComparableIntBaseMixin):
-    """Container class for verbosity (or other) value"""
-    def __init__(self, value=0): self.value = value
-    def set(self, value): self.value = value
-    def get(self): return self.value
-    def _cmpkey(self): return self.value
-    def __nonzero__(self): return self.value != 0
-    def __bool__(self):    return self.value != 0
-    def __str__(self): return str(self.value)
-
-Verbose = _VerboseClass()
-
-# -----------------------------------------------------
-# userWorkingHome is current working directory
-# -----------------------------------------------------
-
-userWorkingHome = _os.getcwd()
-
-# -----------------------------------------------------
-# userIrafHome is location of user's IRAF home directory
-# -----------------------------------------------------
-
-# If login.cl exists here, use this directory as home.
-# Otherwise look for ~/iraf.
-
-if _os.path.exists('./login.cl'):
-    userIrafHome = _os.path.join(userWorkingHome,'')
-elif _os.path.exists(_os.path.expanduser('~/.iraf/login.cl')):
-    userIrafHome = _os.path.expanduser('~/.iraf')
-else:
-    userIrafHome = _os.path.join(_os.getenv('HOME','.'),'iraf','')
-    if not _os.path.exists(userIrafHome):
-        # no ~/iraf, just use '.' as home
-        userIrafHome = _os.path.join(userWorkingHome,'')
-
-# -----------------------------------------------------
-# Boolean constant class
-# -----------------------------------------------------
-
-class _Boolean(_compmixin.ComparableMixin):
-    """Class of boolean constant object"""
-    def __init__(self, value=None):
-        # change value to 1 or 0
-        if value:
-            self.__value = 1
-        else:
-            self.__value = 0
-        self.__strvalue = ["no", "yes"][self.__value]
-
-    def __copy__(self):
-        """Don't bother to make a copy"""
-        return self
-
-    def __deepcopy__(self, memo=None):
-        """Don't bother to make a copy"""
-        return self
-
-    def _compare(self, other, method):
-        # _Boolean vs. _Boolean
-        if isinstance(other, _Boolean):
-            return method(self.__value, other.__value)
-        # _Boolean vs. string:
-        # If a string, compare with string value of this parameter.
-        # Allow uppercase "YES", "NO" as well as lowercase.
-        # Also allows single letter abbrevation "y" or "n".
-        if isinstance(other, string_types):
-            ovalue = other.lower()
-            if len(ovalue)==1:
-                return method(self.__strvalue[0], ovalue)
-            else:
-                return method(self.__strvalue, ovalue)
-        # _Boolean vs. all other types (int, float, bool, etc) - treat this
-        # value like an integer
-        return method(self.__value, other)
-
-    def __nonzero__(self): return self.__value != 0
-    def __bool__(self):    return self.__value != 0
-    def __repr__(self): return self.__strvalue
-    def __str__(self): return self.__strvalue
-    def __int__(self): return self.__value
-    def __float__(self): return float(self.__value)
-
-# create yes, no boolean values
-
-yes = _Boolean(1)
-no = _Boolean(0)
-
-
-# -----------------------------------------------------
-# define end-of-file object
-# if printed, says 'EOF'
-# if converted to integer, has value -2 (special IRAF value)
-# Implemented as a singleton, although the singleton
-# nature is not really essential
-# -----------------------------------------------------
-
-class _EOFClass(_compmixin.ComparableMixin):
-    """Class of singleton EOF (end-of-file) object"""
-    def __init__(self):
-        global EOF
-        if EOF is not None:
-            # only allow one to be created
-            raise RuntimeError("Use EOF object, not _EOFClass")
-
-    def __copy__(self):
-        """Not allowed to make a copy"""
-        return self
-
-    def __deepcopy__(self, memo=None):
-        """Not allowed to make a copy"""
-        return self
-
-    def _compare(self, other, method):
-        if isinstance(other, _EOFClass):
-            # Despite trying to create only one EOF object, there
-            # could be more than one.  All EOFs are equal.
-            return method(1, 1)
-        if isinstance(other, string_types):
-            # If a string, compare with 'EOF'
-            return method("EOF", other)
-        if isinstance(other, number_types):
-            # If a number, compare with -2
-            return method(-2, other)
-        # what else could it be?
-        return NotImplemented
-
-    def __repr__(self): return "EOF"
-    def __str__(self): return "EOF"
-    def __int__(self): return -2
-    def __float__(self): return -2.0
-
-# initialize EOF to None first so singleton scheme works
-
-EOF = None
-EOF = _EOFClass()
-
-# -----------------------------------------------------
-# define IRAF-like INDEF object
-# -----------------------------------------------------
-
-class _INDEFClass(object):
-    """Class of singleton INDEF (undefined) object"""
-
-    def __new__(cls):
-        # Guido's example Singleton pattern
-        it = cls.__dict__.get("__it__")
-        if it is not None:
-            return it
-        # this use of super gets the correct version of __new__ for the
-        # int and float subclasses too
-        cls.__it__ = it = super(_INDEFClass, cls).__new__(cls)
-        return it
-
-    def __copy__(self):
-        """Not allowed to make a copy"""
-        return self
-
-    def __deepcopy__(self, memo=None):
-        """Not allowed to make a copy"""
-        return self
-
-    def __lt__(self, other): return INDEF
-    def __le__(self, other): return INDEF
-    def __gt__(self, other): return INDEF
-    def __ge__(self, other): return INDEF
-
-    def __eq__(self, other):
-        # Despite trying to create only one INDEF object, there
-        # could be more than one.  All INDEFs are equal.
-        # Also allow "INDEF" - CDS 17Nov2011
-        return isinstance(other, _INDEFClass) or (other and str(other)=="INDEF")
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def __repr__(self): return "INDEF"
-    def __str__(self): return "INDEF"
-
-    __oct__ = __str__
-    __hex__ = __str__
-
-    # type conversions return various types of INDEF objects
-    # this is necessary for Python 2.4
-
-    def __int__(self): return _INDEF_int
-    def __long__(self): return _INDEF_int
-    def __float__(self): return _INDEF_float
-
-    def __nonzero__(self): return False # need bool return type
-
-    # all operations on INDEF return INDEF
-
-    def __add__(self, other): return INDEF
-
-    __sub__    = __add__
-    __mul__    = __add__
-    __rmul__   = __add__
-    __div__    = __add__
-    __mod__    = __add__
-    __divmod__ = __add__
-    __pow__    = __add__
-    __lshift__ = __add__
-    __rshift__ = __add__
-    __and__    = __add__
-    __xor__    = __add__
-    __or__     = __add__
-
-    __radd__    = __add__
-    __rsub__    = __add__
-    __rmul__    = __add__
-    __rrmul__   = __add__
-    __rdiv__    = __add__
-    __rmod__    = __add__
-    __rdivmod__ = __add__
-    __rpow__    = __add__
-    __rlshift__ = __add__
-    __rrshift__ = __add__
-    __rand__    = __add__
-    __rxor__    = __add__
-    __ror__     = __add__
-
-    def __neg__(self): return INDEF
-
-    __pos__    = __neg__
-    __abs__    = __neg__
-    __invert__ = __neg__
-
-INDEF = _INDEFClass()
-
-# Classes that inherit from built-in types are required for Python 2.4
-# so that int and float conversion functions work correctly.
-# Unfortunately, if you call int(_INDEF_int) it ignores the
-# __int__ method and returns zero, so these objects should be
-# used sparingly and replaced with standard INDEF whereever
-# possible.
-
-class _INDEFClass_int(_INDEFClass, int): pass
-class _INDEFClass_float(_INDEFClass, float): pass
-_INDEF_int = _INDEFClass_int()
-_INDEF_float = _INDEFClass_float()
-
-# -----------------------------------------------------
-# define IRAF-like EPSILON object
-# -----------------------------------------------------
-
-class _EPSILONClass(_compmixin.ComparableFloatBaseMixin):
-    """Class of singleton EPSILON object, for floating-point comparison"""
-
-    def __new__(cls):
-        # Guido's example Singleton pattern
-        it = cls.__dict__.get("__it__")
-        if it is not None:
-            return it
-        cls.__it__ = it = super(_EPSILONClass, cls).__new__(cls)
-        return it
-
-    def __init__(self):
-        self.__dict__["_value"] = None
-
-    def setvalue(self):
-        DEFAULT_VALUE = 1.192e-7
-        hlib = _os.environ.get("hlib")
-        if hlib is None:
-            self._value = DEFAULT_VALUE
-            return
-        fd = open(_os.path.join(hlib, "mach.h"))
-        lines = fd.readlines()
-        fd.close()
-        foundit = 0
-        for line in lines:
-            words = line.split()
-            if len(words) < 1 or words[0] == "#":
-                continue
-            if words[0] == "define" and words[1] == "EPSILONR":
-                strvalue = words[2]
-                if strvalue[0] == "(":
-                    strvalue = strvalue[1:-1]
-                self._value = float(strvalue)
-                foundit = 1
-                break
-        if not foundit:
-            self._value = DEFAULT_VALUE
-
-    def __copy__(self):
-        """Not allowed to make a copy"""
-        return self
-
-    def __deepcopy__(self, memo=None):
-        """Not allowed to make a copy"""
-        return self
-
-    def __setattr__(self, name, value):
-        """Not allowed to modify the value or add a new attribute"""
-        if name == "_value":
-            if self.__dict__["_value"] is None:
-                self.__dict__["_value"] = value
-            else:
-                raise RuntimeError("epsilon cannot be modified")
-        else:
-            pass
-
-    def __delattr__(self, value):
-        """Not allowed to delete the value"""
-        pass
-
-    def _cmpkey(self): return self._value
-
-    def __repr__(self): return "%.6g" % self._value
-    def __str__(self): return "%.6g" % self._value
-
-    __oct__ = None
-    __hex__ = None
-
-    def __int__(self): return 0
-    def __long__(self): return 0
-    def __float__(self): return self._value
-
-    def __nonzero__(self): return True # need bool return type
-
-    def __add__(self, other):
-        return self._value + other
-
-    def __sub__(self, other):
-        return self._value - other
-
-    def __mul__(self, other):
-        return self._value * other
-
-    def __div__(self, other):
-        return self._value / other
-
-    def __mod__(self, other):
-        return self._value % other
-
-    def __divmod__(self, other):
-        return (self._value // other, self._value % other)
-
-    def __pow__(self, other):
-        return self._value ** other
-
-    def __neg__(self):
-        return -self._value
-
-    def __pos__(self):
-        return self._value
-
-    def __abs__(self):
-        return abs(self._value)
-
-    # arguments in reverse order
-    def __radd__(self, other):
-        return other + self._value
-
-    def __rsub__(self, other):
-        return other - self._value
-
-    def __rmul__(self, other):
-        return other * self._value
-
-    def __rdiv__(self, other):
-        return other / self._value
-
-    def __rmod__(self, other):
-        return other % self._value
-
-    def __rdivmod__(self, other):
-        return (other // self._value, other % self._value)
-
-    def __rpow__(self, other):
-        return other ** self._value
-
-epsilon = _EPSILONClass()
-epsilon.setvalue()
-
-# -----------------------------------------------------
-# tag classes
-# -----------------------------------------------------
-
-class IrafTask:
-    pass
-
-class IrafPkg(IrafTask):
-    pass
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/irafutils.py b/required_pkgs/stsci.tools/lib/stsci/tools/irafutils.py
deleted file mode 100644
index 46bea90..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/irafutils.py
+++ /dev/null
@@ -1,540 +0,0 @@
-"""module irafutils.py -- general utility functions
-
-printCols       Print elements of list in cols columns
-printColsAuto   Print elements of list in the best number of columns
-stripQuotes     Strip single or double quotes off string and remove embedded
-                quote pairs
-csvSplit        Split comma-separated fields in strings (cover bug in csv mod)
-rglob           Recursive glob
-setWritePrivs   Convenience function to add/remove write privs
-removeEscapes   Remove escaped quotes & newlines from strings
-translateName   Convert CL parameter or variable name to Python-acceptable name
-untranslateName Undo Python conversion of CL parameter or variable name
-tkread          Read n bytes from file while running Tk mainloop
-tkreadline      Read a line from file while running Tk mainloop
-launchBrowser   Given a URL, try to pop it up in a browser on most platforms.
-
-$Id: irafutils.py 44236 2015-09-08 23:34:43Z jhunk $
-
-R. White, 1999 Jul 16
-"""
-from __future__ import division, print_function
-
-import os, stat, string, sys, re, fnmatch, keyword, select
-from . import capable
-
-PY3K = sys.version_info[0] > 2
-
-if capable.OF_GRAPHICS:
-    if PY3K:
-        import tkinter as Tkinter
-    else:
-        import Tkinter
-
-def printColsAuto(in_strings, term_width=80, min_pad=1):
-    """ Print a list of strings centered in columns.  Determine the number
-    of columns and lines on the fly.  Return the result, ready to print.
-    in_strings is a list/tuple/iterable of strings
-    min_pad is number of spaces to appear on each side of a single string (so
-            you will see twice this many spaces between 2 strings)
-    """
-    # sanity check
-    assert in_strings and len(in_strings)>0, 'Unexpected: '+repr(in_strings)
-
-    # get max width in input
-    maxWidth = len(max(in_strings, key=len)) + (2*min_pad) # width with pad
-    numCols = term_width//maxWidth # integer div
-    # set numCols so we take advantage of the whole line width
-    numCols = min(numCols, len(in_strings))
-
-    # easy case - single column or too big
-    if numCols < 2:
-        # one or some items are too big but print one item per line anyway
-        lines = [x.center(term_width) for x in in_strings]
-        return '\n'.join(lines)
-
-    # normal case - 2 or more columns
-    colWidth = term_width//numCols # integer div
-    # colWidth is guaranteed to be larger than all items in input
-    retval = ''
-    for i in range(len(in_strings)):
-        retval+=in_strings[i].center(colWidth)
-        if (i+1)%numCols == 0:
-            retval += '\n'
-    return retval.rstrip()
-
-
-def printCols(strlist,cols=5,width=80):
-
-    """Print elements of list in cols columns"""
-
-    # This may exist somewhere in the Python standard libraries?
-    # Should probably rewrite this, it is pretty crude.
-
-    nlines = (len(strlist)+cols-1)//cols
-    line = nlines*[""]
-    for i in range(len(strlist)):
-        c, r = divmod(i,nlines)
-        nwid = c*width//cols - len(line[r])
-        if nwid>0:
-            line[r] = line[r] + nwid*" " + strlist[i]
-        else:
-            line[r] = line[r] + " " + strlist[i]
-    for s in line:
-        print(s)
-
-_re_doubleq2 = re.compile('""')
-_re_singleq2 = re.compile("''")
-
-def stripQuotes(value):
-
-    """Strip single or double quotes off string; remove embedded quote pairs"""
-
-    if value[:1] == '"':
-        value = value[1:]
-        if value[-1:] == '"':
-            value = value[:-1]
-        # replace "" with "
-        value = re.sub(_re_doubleq2, '"', value)
-    elif value[:1] == "'":
-        value = value[1:]
-        if value[-1:] == "'":
-            value = value[:-1]
-        # replace '' with '
-        value = re.sub(_re_singleq2, "'", value)
-    return value
-
-def csvSplit(line, delim=',', allowEol=True):
-    """ Take a string as input (e.g. a line in a csv text file), and break
-    it into tokens separated by commas while ignoring commas embedded inside
-    quoted sections.  This is exactly what the 'csv' module is meant for, so
-    we *should* be using it, save that it has two bugs (described next) which
-    limit our use of it.  When these bugs are fixed, this function should be
-    forsaken in favor of direct use of the csv module (or similar).
-
-    The basic use case is to split a function signature string, so for:
-        afunc(arg1='str1', arg2='str, with, embedded, commas', arg3=7)
-    we want a 3 element sequence:
-        ["arg1='str1'", "arg2='str, with, embedded, commas'", "arg3=7"]
-
-    but:
-    >>> import csv
-    >>> y = "arg1='str1', arg2='str, with, embedded, commas', arg3=7"
-    >>> rdr = csv.reader( (y,), dialect='excel', quotechar="'", skipinitialspace=True)
-    >>> l = rdr.next(); print len(l), str(l)
-    6 ["arg1='str1'", "arg2='str", 'with', 'embedded', "commas'", "arg3=7"]
-
-    which we can see is not correct - we wanted 3 tokens.  This occurs in
-    Python 2.5.2 and 2.6.  It seems to be due to the text at the start of each
-    token ("arg1=") i.e. because the quote isn't for the whole token.  If we
-    were to remove the names of the args and the equal signs, it works:
-
-    >>> x = "'str1', 'str, with, embedded, commas', 7"
-    >>> rdr = csv.reader( (x,), dialect='excel', quotechar="'", skipinitialspace=True)
-    >>> l = rdr.next(); print len(l), str(l)
-    3 ['str1', 'str, with, embedded, commas', '7']
-
-    But even this usage is delicate - when we turn off skipinitialspace, it
-    fails:
-
-    >>> x = "'str1', 'str, with, embedded, commas', 7"
-    >>> rdr = csv.reader( (x,), dialect='excel', quotechar="'")
-    >>> l = rdr.next(); print len(l), str(l)
-    6 ['str1', " 'str", ' with', ' embedded', " commas'", ' 7']
-
-    So, for now, we'll roll our own.
-    """
-    # Algorithm:  read chars left to right, go from delimiter to delimiter,
-    # but as soon as a single/double/triple quote is hit, scan forward
-    # (ignoring all else) until its matching end-quote is found.
-    # For now, we will not specially handle escaped quotes.
-    tokens = []
-    ldl = len(delim)
-    keepOnRollin = line != None and len(line) > 0
-    while keepOnRollin:
-        tok = _getCharsUntil(line, delim, True, allowEol=allowEol)
-        # len of token should always be > 0 because it includes end delimiter
-        # except on last token
-        if len(tok) > 0:
-            # append it, but without the delimiter
-            if tok[-ldl:] == delim:
-                tokens.append(tok[:-ldl])
-            else:
-                tokens.append(tok) # tok goes to EOL - has no delimiter
-                keepOnRollin = False
-            line = line[len(tok):]
-        else:
-            # This is the case of the empty end token
-            tokens.append('')
-            keepOnRollin = False
-    return tokens
-
-# We'll often need to search a string for 3 possible characters.  We could
-# loop and check each one ourselves; we could do 3 separate find() calls;
-# or we could do a compiled re.search().  For VERY long strings (hundreds
-# of thousands of chars), it turns out that find() is so fast and that
-# re (even compiled) has enough overhead, that 3 find's is the same or
-# slightly faster than one re.search with three chars in the re expr.
-# Of course, both methods are much faster than an explicit loop.
-# Since these strings will be short, the fastest method is re.search()
-_re_sq = re.compile(r"'")
-_re_dq = re.compile(r'"')
-_re_comma_sq_dq = re.compile('[,\'"]')
-
-def _getCharsUntil(buf, stopChar, branchForQuotes, allowEol):
-
-    # Sanity checks
-    if buf is None: return None
-    if len(buf) <= 0: return ''
-
-    # Search chars left-to-right looking for stopChar
-    sought = (stopChar,)
-    theRe = None
-    if branchForQuotes:
-        sought = (stopChar,"'",'"') # see later, we'll handle '"""' too
-        if stopChar == ',': theRe = _re_comma_sq_dq # pre-compiled common case
-    else:
-        if stopChar == '"': theRe = _re_dq # pre-compiled common case
-        if stopChar == "'": theRe = _re_sq # pre-compiled common case
-
-    if theRe == None:
-        theRe = re.compile('['+''.join(sought)+']')
-
-    mo = theRe.search(buf)
-
-    # No match found; stop
-    if mo == None:
-        if not stopChar in ('"', "'"):
-            # this is a primary search, not a branch into quoted text
-            return buf # searched until we hit the EOL, must be last token
-        else:
-            # this is a branch into a quoted string - do we allow EOL here?
-            if allowEol:
-                return buf
-            else:
-                raise ValueError('Unfound end-quote, buffer: '+buf)
-
-    # The expected match was found. Stop.
-    if mo.group() == stopChar:
-        return buf[:1 + mo.start()] # return token plus stopChar at end
-
-    # Should not get to this point unless in a branch-for-quotes situation.
-    assert branchForQuotes,"Programming error! shouldnt be here w/out branching"
-
-    # Quotes were found.
-    # There are two kinds, but double quotes could be the start of
-    # triple double-quotes. (""") So get the substring to create the token.
-    #
-    #    token = preQuote+quotedPart+postQuote (e.g.: "abc'-hi,ya-'xyz")
-    #
-    preQuote = buf[:mo.start()]
-    if mo.group() == "'":
-        quotedPart = "'"+_getCharsUntil(buf[1+mo.start():],"'",False,allowEol)
-    else:
-        # first double quote (are there 3 in a row?)
-        idx = mo.start()
-        if len(buf) > idx+2 and '"""' == buf[idx:idx+3]:
-            # We ARE in a triple-quote sub-string
-            end_t_q = buf[idx+3:].find('"""')
-            if end_t_q < 0:
-                # hit end of line before finding end quote
-                if allowEol:
-                    quotedPart = buf[idx:]
-                else:
-                    raise ValueError('Unfound triple end-quote, buffer: '+buf)
-            else:
-                quotedPart = buf[idx:idx+3+end_t_q+1]
-        else:
-            quotedPart = '"'+_getCharsUntil(buf[1+mo.start():],'"',False,allowEol)
-    lenSoFar = len(preQuote)+len(quotedPart)
-    if lenSoFar < len(buf):
-        # now get back to looking for end delimiter
-        postQuote = _getCharsUntil(buf[lenSoFar:], stopChar,
-                                   branchForQuotes, allowEol)
-        return preQuote+quotedPart+postQuote
-    else:
-        return buf # at end
-
-def testCsvSplit(quiet=True):
-    # test cases ( input-string, len(output-list), repr(output-list) )
-    cases = ( \
-(None,                0, "[]"),
-('',                  0, "[]"),
-(' ',                 1, "[' ']"),
-('a',                 1, "['a']"),
-(',',                 2, "['', '']"),
-(',a',                2, "['', 'a']"),
-('a,',                2, "['a', '']"),
-(',a,',               3, "['', 'a', '']"),
-("abc'-hi,ya-'xyz",   1, """["abc'-hi,ya-'xyz"]"""),
-('abc"double-quote,eg"xy,z',    2, """['abc"double-quote,eg"xy', 'z']"""),
-('abc"""triple-quote,eg"""xyz', 1, '[\'abc"""triple-quote,eg"""xyz\']'),
-("'s1', 'has, comma', z",       3, """["'s1'", " 'has, comma'", ' z']"""),
-("a='s1', b='has,comma,s', c",  3, """["a='s1'", " b='has,comma,s'", ' c']"""),
-    )
-    for c in cases:
-        if not quiet: print("Testing: "+repr(c[0]))
-        ll = csvSplit(c[0], ',', True)
-        assert len(ll) == c[1] and repr(ll) == c[2], \
-           "For case: "+repr(c[0])+" expected:\n"+c[2]+"\nbut got:\n"+repr(ll)
-    return True
-
-def rglob(root, pattern):
-    """ Same thing as glob.glob, but recursively checks subdirs. """
-    # Thanks to Alex Martelli for basics on Stack Overflow
-    retlist = []
-    if None not in (pattern, root):
-        for base, dirs, files in os.walk(root):
-            goodfiles = fnmatch.filter(files, pattern)
-            retlist.extend(os.path.join(base, f) for f in goodfiles)
-    return retlist
-
-def setWritePrivs(fname, makeWritable, ignoreErrors=False):
-    """ Set a file named fname to be writable (or not) by user, with the
-    option to ignore errors.  There is nothing ground-breaking here, but I
-    was annoyed with having to repeate this little bit of code. """
-    privs = os.stat(fname).st_mode
-    try:
-        if makeWritable:
-            os.chmod(fname, privs | stat.S_IWUSR)
-        else:
-            os.chmod(fname, privs & (~ stat.S_IWUSR))
-    except OSError:
-        if ignoreErrors:
-            pass # just try, don't whine
-        else:
-            raise
-
-
-def removeEscapes(value, quoted=0):
-
-    """Remove escapes from in front of quotes (which IRAF seems to
-    just stick in for fun sometimes.)  Remove \-newline too.
-    If quoted is true, removes all blanks following \-newline
-    (which is a nasty thing IRAF does for continuations inside
-    quoted strings.)
-    XXX Should we remove \\ too?
-    """
-
-    i = value.find(r'\"')
-    while i>=0:
-        value = value[:i] + value[i+1:]
-        i = value.find(r'\"',i+1)
-    i = value.find(r"\'")
-    while i>=0:
-        value = value[:i] + value[i+1:]
-        i = value.find(r"\'",i+1)
-    # delete backslash-newlines
-    i = value.find("\\\n")
-    while i>=0:
-        j = i+2
-        if quoted:
-            # ignore blanks and tabs following \-newline in quoted strings
-            for c in value[i+2:]:
-                if c not in ' \t':
-                    break
-                j = j+1
-        value = value[:i] + value[j:]
-        i = value.find("\\\n",i+1)
-    return value
-
-# Must modify Python keywords to make Python code legal.  I add 'PY' to
-# beginning of Python keywords (and some other illegal Python identifiers).
-# It will be stripped off where appropriate.
-
-def translateName(s, dot=0):
-
-    """Convert CL parameter or variable name to Python-acceptable name
-
-    Translate embedded dollar signs to 'DOLLAR'
-    Add 'PY' prefix to components that are Python reserved words
-    Add 'PY' prefix to components start with a number
-    If dot != 0, also replaces '.' with 'DOT'
-    """
-
-    s = s.replace('$', 'DOLLAR')
-    sparts = s.split('.')
-    for i in range(len(sparts)):
-        if sparts[i] == "" or sparts[i][0] in string.digits or \
-          keyword.iskeyword(sparts[i]):
-            sparts[i] = 'PY' + sparts[i]
-    if dot:
-        return 'DOT'.join(sparts)
-    else:
-        return '.'.join(sparts)
-
-def untranslateName(s):
-
-    """Undo Python conversion of CL parameter or variable name"""
-
-    s = s.replace('DOT', '.')
-    s = s.replace('DOLLAR', '$')
-    # delete 'PY' at start of name components
-    if s[:2] == 'PY': s = s[2:]
-    s = s.replace('.PY', '.')
-    return s
-
-# procedures to read while still allowing Tk widget updates
-
-def init_tk_default_root():
-
-    """ In case the _default_root value is required, you may
-    safely call this ahead of time to ensure that it has been
-    initialized.  If it has already been, this is a no-op.
-    """
-
-    if not capable.OF_GRAPHICS:
-        raise RuntimeError("Cannot run this command without graphics")
-
-    if not Tkinter._default_root: # Tkinter imported above
-        newdfrt = Tkinter.Tk()
-        newdfrt.withdraw()
-
-def tkread(file, n=0):
-
-    """Read n bytes from file (or socket) while running Tk mainloop.
-
-    If n=0 then this runs the mainloop until some input is ready on
-    the file.  (See tkreadline for an application of this.)  The
-    file must have a fileno method.
-    """
-
-    return _TkRead().read(file, n)
-
-def tkreadline(file=None):
-
-    """Read a line from file while running Tk mainloop.
-
-    If the file is not line-buffered then the Tk mainloop will stop
-    running after one character is typed.  The function will still work
-    but Tk widgets will stop updating.  This should work OK for stdin and
-    other line-buffered filehandles.  If file is omitted, reads from
-    sys.stdin.
-
-    The file must have a readline method.  If it does not have a fileno
-    method (which can happen e.g. for the status line input on the
-    graphics window) then the readline method is simply called directly.
-    """
-
-    if file is None:
-        file = sys.stdin
-    if not hasattr(file, "readline"):
-        raise TypeError("file must be a filehandle with a readline method")
-
-    # Call tkread now...
-    # BUT, if we get in here for something not GUI-related (e.g. terminal-
-    # focused code in a sometimes-GUI app) then skip tkread and simply call
-    # readline on the input eg. stdin.  Otherwise we'd fail in _TkRead().read()
-
-    try:
-        fd = file.fileno()
-    except:
-        fd = None
-        
-    if (fd and capable.OF_GRAPHICS):
-        tkread(fd, 0)
-        # if EOF was encountered on a tty, avoid reading again because
-        # it actually requests more data
-        if not select.select([fd],[],[],0)[0]:
-            return ''
-    return file.readline()
-
-class _TkRead:
-
-    """Run Tk mainloop while waiting for a pending read operation"""
-
-    def read(self, file, nbytes):
-        """Read nbytes characters from file while running Tk mainloop"""
-        if not capable.OF_GRAPHICS:
-            raise RuntimeError("Cannot run this command without graphics")
-        if isinstance(file, int):
-            fd = file
-        else: 
-            # Otherwise, assume we have Python file object
-            try:
-                fd = file.fileno()
-
-            except:
-                raise TypeError("file must be an integer or a filehandle/socket")
-        init_tk_default_root() # harmless if already done
-        self.widget = Tkinter._default_root
-        if not self.widget:
-            # no Tk widgets yet, so no need for mainloop
-            # (shouldnt happen now with init_tk_default_root)
-            s = []
-            while nbytes>0:
-                snew = os.read(fd, nbytes) # returns bytes in PY3K
-                if snew:
-                    if PY3K: snew = snew.decode('ascii')
-                    s.append(snew)
-                    nbytes -= len(snew)
-                else:
-                    # EOF -- just return what we have so far
-                    break
-            return "".join(s)
-        else:
-            self.nbytes = nbytes
-            self.value = []
-            self.widget.tk.createfilehandler(fd,
-                                    Tkinter.READABLE | Tkinter.EXCEPTION,
-                                    self._read)
-            try:
-                self.widget.mainloop()
-            finally:
-                self.widget.tk.deletefilehandler(fd)
-            return "".join(self.value)
-
-
-    def _read(self, fd, mask):
-        """Read waiting data and terminate Tk mainloop if done"""
-        try:
-            # if EOF was encountered on a tty, avoid reading again because
-            # it actually requests more data
-            if select.select([fd],[],[],0)[0]:
-                snew = os.read(fd, self.nbytes) # returns bytes in PY3K
-                if PY3K: snew = snew.decode('ascii')
-                self.value.append(snew)
-                self.nbytes -= len(snew)
-            else:
-                snew = ''
-            if (self.nbytes <= 0 or len(snew) == 0) and self.widget:
-                # stop the mainloop
-                self.widget.quit()
-        except OSError:
-            raise IOError("Error reading from %s" % (fd,))
-
-def launchBrowser(url, brow_bin='mozilla', subj=None):
-    """ Given a URL, try to pop it up in a browser on most platforms.
-    brow_bin is only used on OS's where there is no "open" or "start" cmd.
-    """
-
-    if not subj: subj = url
-
-    # Tries to use webbrowser module on most OSes, unless a system command
-    # is needed.  (E.g. win, linux, sun, etc)
-    if sys.platform not in ('os2warp, iphone'): # try webbrowser w/ everything?
-        import webbrowser
-        if not webbrowser.open(url):
-            print("Error opening URL: "+url)
-        else:
-            print('Help on "'+subj+'" is now being displayed in a web browser')
-        return
-
-    # Go ahead and fork a subprocess to call the correct binary
-    pid = os.fork()
-    if pid == 0: # child
-        if sys.platform == 'darwin':
-            if 0 != os.system('open "'+url+'"'): # does not seem to keep '#.*'
-                print("Error opening URL: "+url)
-        os._exit(0)
-#       The following retries if "-remote" doesnt work, opening a new browser
-#       cmd = brow_bin+" -remote 'openURL("+url+")' '"+url+"' 1> /dev/null 2>&1"
-#       if 0 != os.system(cmd)
-#           print "Running "+brow_bin+" for HTML help..."
-#           os.execvp(brow_bin,[brow_bin,url])
-#       os._exit(0)
-
-    else: # parent
-        if not subj:
-            subj = url
-        print('Help on "'+subj+'" is now being displayed in a browser')
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/iterfile.py b/required_pkgs/stsci.tools/lib/stsci/tools/iterfile.py
deleted file mode 100644
index f2a8427..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/iterfile.py
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
-
-License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-
-"""
-from __future__ import division # confidence high
-
-from astropy.io import fits
-
-__version__ = '0.3 (01-July-2014)'
-
-
-class IterFitsFile(object):
-    """ This class defines an object which can be used to
-        access the data from a FITS file without leaving
-        the file-handle open between reads.
-
-    """
-    def __init__(self,name):
-        self.name = name
-        self.fname = None
-        self.extn = None
-        self.handle = None
-        self.inmemory = False
-        self.compress = False
-
-        if not self.fname:
-            self.fname,self.extn = parseFilename(name)
-
-    def set_inmemory(self,val):
-        """Sets inmemory attribute to either True or False """
-        assert type(val) is bool, 'Please specify either True or False'
-        self.inmemory = val
-
-    def _shape(self):
-        """ Returns the shape of the data array associated with this file."""
-        hdu = self.open()
-        _shape = hdu.shape
-        if not self.inmemory:
-            self.close()
-            del hdu
-        return _shape
-
-    def _data(self):
-        """ Returns the data array associated with this file/extenstion."""
-        hdu = self.open()
-        _data = hdu.data.copy()
-        if not self.inmemory:
-            self.close()
-            del hdu
-        return _data
-
-    def type(self):
-        """ Returns the shape of the data array associated with this file."""
-        hdu = self.open()
-        _type = hdu.data.dtype.name
-        if not self.inmemory:
-            self.close()
-            del hdu
-        return _type
-
-    def open(self):
-        """ Opens the file for subsequent access. """
-
-        if self.handle == None:
-            self.handle = fits.open(self.fname, mode='readonly')
-
-        if self.extn:
-            if len(self.extn) == 1:
-                hdu = self.handle[self.extn[0]]
-            else:
-                hdu = self.handle[self.extn[0],self.extn[1]]
-        else:
-            hdu = self.handle[0]
-        if isinstance(hdu,fits.hdu.compressed.CompImageHDU):
-            self.compress = True
-        return hdu
-
-
-    def close(self):
-        """ Closes file handle for this FITS object."""
-        if self.handle != None:
-            self.handle.close()
-        self.handle = None
-
-    def __getitem__(self,i):
-        """ Returns a PyFITS section for the rows specified. """
-        # All I/O must be done here, starting with open
-        hdu = self.open()
-        if self.inmemory or self.compress:
-            _data = hdu.data[i,:]
-        else:
-            _data = hdu.section[i,:]
-
-        if not self.inmemory:
-            self.close()
-            del hdu
-
-        return _data
-        
-
-    def __getattribute__(self,name):
-        if name == 'data':
-            return self._data()
-        elif name == 'shape':
-            return self._shape()
-        else:
-            return object.__getattribute__(self,name)
-
-
-def parseFilename(filename):
-    """
-        Parse out filename from any specified extensions.
-        Returns rootname and string version of extension name.
-
-        Modified from 'pydrizzle.fileutil' to allow this
-        module to be independent of PyDrizzle/MultiDrizzle.
-
-    """
-    # Parse out any extension specified in filename
-    _indx = filename.find('[')
-    if _indx > 0:
-        # Read extension name provided
-        _fname = filename[:_indx]
-        extn = filename[_indx+1:-1]
-
-        # An extension was provided, so parse it out...
-        if repr(extn).find(',') > 1:
-            _extns = extn.split(',')
-            # Two values given for extension:
-            #    for example, 'sci,1' or 'dq,1'
-            _extn = [_extns[0],int(_extns[1])]
-        elif repr(extn).find('/') > 1:
-            # We are working with GEIS group syntax
-            _indx = str(extn[:extn.find('/')])
-            _extn = [int(_indx)]
-        elif isinstance(extn, str):
-            # Only one extension value specified...
-            if extn.isdigit():
-                # We only have an extension number specified as a string...
-                _nextn = int(extn)
-            else:
-                # We only have EXTNAME specified...
-                _nextn = extn
-            _extn = [_nextn]
-        else:
-            # Only integer extension number given, or default of 0 is used.
-            _extn = [int(extn)]
-
-    else:
-        _fname = filename
-        _extn = None
-    return _fname,_extn
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/linefit.py b/required_pkgs/stsci.tools/lib/stsci/tools/linefit.py
deleted file mode 100644
index 052905e..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/linefit.py
+++ /dev/null
@@ -1,86 +0,0 @@
-"""
-Fit a line to a data set with optional weights.
-
-Returns the parameters of the model, bo, b1:
-Y = b0 + b1* X
-
-:author: Nadia Dencheva
-:version: '1.0 (2007-02-20)'
-
-"""
-from __future__ import absolute_import, division, print_function # confidence high
-
-from . import numerixenv
-numerixenv.check()
-
-
-import numpy as N
-from numpy.core import around
-
-
-__version__ = '1.0'          #Release version number only
-__vdate__ = '2007-02-20'     #Date of this version
-
-
-
-def linefit(x,y,weights=None):
-
-    """
-    Parameters
-    ----------
-    y: 1D numpy array
-        The data to be fitted
-    x: 1D numpy array
-        The x values of the y array. x and y must
-        have the same shape.
-    weights:   1D numpy array, must have the same shape as x and y
-        weight values
-
-    Examples
-    --------
-    >>> x=N.array([-5, -4 ,-3 ,-2 ,-1, 0, 1, 2, 3, 4, 5])
-    >>> y=N.array([1, 5, 4, 7, 10, 8, 9, 13, 14, 13, 18])
-    >>> around(linefit(x,y), decimals=5)
-    array([ 9.27273,  1.43636])
-    >>> x=N.array([1.3,1.3,2.0,2.0,2.7,3.3,3.3,3.7,3.7,4.,4.,4.,4.7,4.7,5.,5.3,5.3,5.3,5.7,6.,6.,6.3,6.7])
-    >>> y = N.array([2.3,1.8,2.8,1.5,2.2,3.8,1.8,3.7,1.7,2.8,2.8,2.2,3.2,1.9,1.8,3.5,2.8,2.1,3.4,3.2,3.,3.,5.9])
-    >>> around(linefit(x,y), decimals=5)
-    array([ 1.42564,  0.31579])
-    """
-    
-    if numerixenv.check_input(x) or numerixenv.check_input(y):
-        raise ValueError("Input is a NumArray array. This version of %s requires a Numpy array\n" % __name__)
-    
-    if len(x) != len(y):
-        print("Error: X and Y must have equal size\n")
-        return
-    n = len(x)
-    w = N.zeros((n,n)).astype(N.float)
-    if weights == None:
-        for i in N.arange(n):
-            w[i,i] = 1
-    else:
-        if len(weights) != n:
-            print("Error: Weights must have the same size as X and Y.\n")
-            return
-        for i in N.arange(n):
-            w[i,i] = weights[i]
-    x = x.astype(N.float)
-    y = y.astype(N.float)
-    # take the weighted avg for calculatiing the covarince
-    Xavg = N.sum(N.dot(w,x)) / N.sum(w.diagonal())
-    Yavg = N.sum(N.dot(w,y)) / N.sum(w.diagonal())
-
-    xm = x - Xavg
-    xmt = N.transpose(xm)
-    ym = y - Yavg
-
-    b1 = N.dot(xmt,N.dot(w,ym)) / N.dot(xmt ,N.dot(w,xm))
-    b0 = Yavg - b1 * Xavg
-
-    return b0, b1
-
-def test():
-    import doctest
-    import linefit
-    return doctest.testmod(linefit)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/listdlg.py b/required_pkgs/stsci.tools/lib/stsci/tools/listdlg.py
deleted file mode 100644
index 60abbbc..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/listdlg.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#
-# A home-grown list-selection convenience dialog.  As *soon* as Tkinter comes
-# with one of these, replace all uses of this one with that.  This currently
-# only allows single selection.
-#
-"""
-$Id: listdlg.py 38909 2015-04-08 17:41:07Z bsimon $
-"""
-from __future__ import absolute_import, division, print_function # confidence high
-import sys
-PY3K = sys.version_info[0] > 2
-
-from . import capable
-if capable.OF_GRAPHICS:
-    if PY3K:        
-        from tkinter import *
-        from tkinter.simpledialog import Dialog
-    else:
-        from Tkinter import *
-        from tkSimpleDialog import Dialog
-else:
-    Dialog = object
-
-class ListSingleSelectDialog(Dialog):
-
-    def __init__(self, title, prompt, choiceList, parent=None):
-
-        if not parent:
-            if PY3K:
-                import tkinter
-                parent = tkinter._default_root
-            else:
-                import Tkinter
-                parent = Tkinter._default_root
-            parent.withdraw()
-
-        self.__prompt = prompt
-        self.__choices = choiceList
-        self.__retval = None
-        self.__clickedOK = False
-        parent.update()
-        Dialog.__init__(self, parent, title) # enters main loop here
-
-
-    def get_current_index(self):
-        """ Return currently selected index (or -1) """
-
-        # Need to convert to int; currently API returns a tuple of string
-        curSel = self.__lb.curselection()
-        if curSel and len(curSel) > 0:
-            return int(curSel[0])
-        else:
-            return -1
-
-
-    def getresult(self): return self.__retval
-
-
-    def destroy(self):
-        # first save the selected index before it is destroyed
-        idx = self.get_current_index()
-        # in PyRAF, assume they meant the first one if they clicked nothing,
-        # since it is already active (underlined)
-        if idx < 0: idx = 0
-        # get the object at that index
-        if self.__clickedOK and idx >= 0: # otherwise is None
-            self.__retval = self.__choices[idx]
-        if self.__retval and type(self.__retval) == str:
-            self.__retval = self.__retval.strip()
-
-        # now destroy
-        self.__lb = None
-        Dialog.destroy(self)
-
-
-    def body(self, master):
-
-        label = Label(master, text=self.__prompt, justify=LEFT)
-#       label.grid(row=0, padx=8, sticky=W)
-        label.pack(side=TOP, fill=X, padx=10, pady=8)
-
-        frame = Frame(master)
-#       frame.grid(row=1, padx=8, sticky=W+E)
-        frame.pack(side=TOP, fill=X, padx=10, pady=8)
-
-        vscrollbar = Scrollbar(frame, orient=VERTICAL)
-        hscrollbar = Scrollbar(frame, orient=HORIZONTAL)
-        self.__lb = Listbox(frame,
-                            selectmode=BROWSE,
-                            xscrollcommand=hscrollbar.set,
-                            yscrollcommand=vscrollbar.set)
-#                           activestyle='none', # none = dont underline items
-        hscrollbar.config(command=self.__lb.xview)
-        hscrollbar.pack(side=BOTTOM, fill=X)
-        vscrollbar.config(command=self.__lb.yview)
-        vscrollbar.pack(side=RIGHT, fill=Y)
-        self.__lb.pack(side=LEFT, fill=BOTH, expand=1)
-
-        for itm in self.__choices:
-            self.__lb.insert(END, str(itm))
-
-        self.__lb.bind("<Double-Button-1>", self.ok) # dbl clk
-#       self.__lb.selection_set(0,0)
-        self.__lb.focus_set()
-
-        return self.__lb
-
-    def ok(self, val=None):
-        self.__clickedOK = True # save that this wasn't a cancel
-        Dialog.ok(self, val)
-
-    def validate(self): return 1
-
-
-if __name__ == "__main__":
-
-    root = Tk()
-    root.withdraw()
-    root.update()
-    x = ListSingleSelectDialog("Select Parameter File", \
-                               "Select which file you prefer for task/pkg:", \
-                               ['abc','def','ghi','jkl','1'], None)
-    print(str(x.getresult()))
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/logutil.py b/required_pkgs/stsci.tools/lib/stsci/tools/logutil.py
deleted file mode 100644
index 11bc953..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/logutil.py
+++ /dev/null
@@ -1,679 +0,0 @@
-"""
-A collection of utilities for handling output to standard out/err as well as
-to file-based or other logging handlers through a single interface.
-"""
-
-
-import inspect
-import logging
-import os
-import sys
-import threading
-from stsci.tools.for2to3 import tostr
-
-PY3K = sys.version_info[0] > 2
-
-if PY3K:
-    from io import StringIO
-else:
-    from cStringIO import StringIO
-
-global_logging_started = False
-
-
-# The global_logging system replaces the raw_input builtin (input on Python 3)
-# for two reasons:
-#
-#  1) It's the easiest way to capture the raw_input prompt and subsequent user
-#     input to the log.
-#
-#  2) On Python 2.x raw_input() does not play nicely with GUI toolkits if
-#     sys.stdout has been replaced by a non-file object (as global_logging
-#     does).  The default raw_input() implementation first checks that
-#     sys.stdout and sys.stdin are connected to a terminal.  If so it uses the
-#     PyOS_Readline() implementation, which allows a GUI's event loop to run
-#     while waiting for user input via PyOS_InputHook().  However, if
-#     sys.stdout is not attached to a terminal, raw_input() uses
-#     PyFile_GetLine(), which blocks until a line is entered on sys.stdin,
-#     thus preventing the GUI from updating.  It doesn't matter if sys.stdin is
-#     still attached to the terminal even if sys.stdout isn't, nor does it
-#     automatically fall back on sys.__stdout__ and sys.__stdin__.
-#
-#     This replacement raw_input() reimplements most of the built in
-#     raw_input(), but is aware that sys.stdout may have been replaced and
-#     knows how to find the real stdout if so.
-#
-#     Note that this is a non-issue in Python 3 which has a new implementation
-#     in which it doesn't matter what sys.stdout points to--only that it has a
-#     fileno() method that returns the correct file descriptor for the
-#     console's stdout.
-if not PY3K:
-    import __builtin__ as builtins
-    from ctypes import pythonapi, py_object, c_void_p, c_char_p
-
-    # PyFile_AsFile returns a FILE * from a python file object.
-    # This is used later with pythonapi.PyOS_Readline to perform
-    # the readline.
-    pythonapi.PyFile_AsFile.argtypes = (py_object,)
-    pythonapi.PyFile_AsFile.restype = c_void_p
-    pythonapi.PyOS_Readline.argtypes = (c_void_p, c_void_p, c_char_p)
-    pythonapi.PyOS_Readline.restype = c_char_p
-
-    def global_logging_raw_input(prompt):
-        def get_stream(name):
-            if hasattr(sys, name):
-                stream = getattr(sys, name)
-                if isinstance(stream, file):
-                    return stream
-                elif isinstance(stream, StreamTeeLogger):
-                    return stream.stream
-            if hasattr(sys, '__%s__' % name):
-                stream = getattr(sys, '__%s__' % name)
-                if isinstance(stream, file):
-                    return stream
-            return None
-
-        def check_interactive(stream, name):
-            try:
-                fd = stream.fileno()
-            except:
-                # Could be an AttributeError, an OSError, and IOError, or who
-                # knows what else...
-                return False
-
-            realfd = {'stdin': 0, 'stdout': 1, 'stderr': 2}[name]
-
-            return fd == realfd and os.isatty(fd)
-
-
-        stdout = get_stream('stdout')
-        stdin = get_stream('stdin')
-        stderr = get_stream('stderr')
-
-        if stdout is None:
-            raise RuntimeError('raw_input(): lost sys.stdout')
-        if stdin is None:
-            raise RuntimeError('raw_input(): lost sys.stdin')
-        if stderr is None:
-            raise RuntimeError('raw_input(): lost sys.stderr')
-
-        if (not check_interactive(stdin, 'stdin') or
-            not check_interactive(stdout, 'stdout')):
-            # Use the built-in raw_input(); this will repeat some of the checks
-            # we just did, but will save us from having to reimplement
-            # raw_input() in its entirety
-            retval = builtins._original_raw_input(prompt)
-        else:
-            stdout.flush()
-            infd = pythonapi.PyFile_AsFile(stdin)
-            outfd = pythonapi.PyFile_AsFile(stdout)
-            retval = pythonapi.PyOS_Readline(infd, outfd, str(prompt))
-            retval = retval.rstrip('\n')
-
-        if isinstance(sys.stdout, StreamTeeLogger):
-            sys.stdout.log_orig(str(prompt) + retval, echo=False)
-
-        return retval
-else:
-    import builtins
-    def global_logging_raw_input(prompt):
-        retval = builtins._original_raw_input(prompt)        
-        if isinstance(sys.stdout, StreamTeeLogger):
-            sys.stdout.log_orig(str(prompt) + retval, echo=False)
-        return retval
-
-
-class StreamTeeLogger(logging.Logger):
-    """
-    A Logger implementation that is meant to replace an I/O stream such as
-    `sys.stdout`, `sys.stderr`, or any other stream-like object that supports a
-    `write()` method and a `flush()` method.
-
-    When `StreamTeeLogger.write` is called, the written strings are
-    line-buffered, and each line is logged through the normal Python logging
-    interface.  The `StreamTeeLogger` has two handlers:
-
-     * The LogTeeHandler redirects all log messages to a logger with the same
-       name as the module in which the `write()` method was called.  For
-       example, if this logger is used to replace `sys.stdout`, all `print`
-       statements in the module `foo.bar` will be logged to a logger called
-       ``foo.bar``.
-
-    * If the ``stream`` argument was provided, this logger also attaches a
-      `logging.StreamHandler` to itself for the given ``stream``.  For example,
-      if ``stream=sys.stdout`` then messages sent to this logger will be output
-      to `sys.stdout`.  However, only messages created through the `write()`
-      method call will re-output to the given stream.
-
-    Parameters
-    ----------
-    name : string
-        The name of this logger, as in `logging.Logger`
-
-    level : int (optional)
-        The minimum level at which to log messages sent to this logger; also
-        used as the default level for messages logged through the `write()`
-        interface (default: `logging.INFO`).
-
-    stream : stream-like object (optional)
-        The stream-like object (an object with `write()` and `flush()`
-        methods) to tee to; should be the same file object being replaced (i.e.
-        sys.stdout).  If `None` (the default) writes to this file will not be
-        sent to a stream logger.
-
-    See Also
-    --------
-    `EchoFilter` is a logger filter that can control which modules' output is
-    sent to the screen via the `StreamHandler` on this logger.
-    """
-
-    def __init__(self, name, level=logging.INFO, stream=None):
-        logging.Logger.__init__(self, name, level)
-        self.__thread_local_ctx = threading.local()
-        self.__thread_local_ctx.write_count = 0
-        self.propagate = False
-        self.buffer = ''
-
-        self.stream = None
-        self.set_stream(stream)
-
-        self.addHandler(_LogTeeHandler())
-        #self.errors = 'strict'
-        #self.encoding = 'utf8'
-
-    @property
-    def encoding(self):
-        if self.stream:
-            try:
-                return self.stream.encoding
-            except AttributeError:
-                pass
-
-        # Default value
-        return 'utf-8'
-
-    @property
-    def errors(self):
-        if self.stream:
-            try:
-                return self.stream.errors
-            except AttributeError:
-                pass
-
-        # Default value
-        return 'strict'
-
-    def set_stream(self, stream):
-        """
-        Set the stream that this logger is meant to replace.  Usually this will
-        be either `sys.stdout` or `sys.stderr`, but can be any object with
-        `write()` and `flush()` methods, as supported by
-        `logging.StreamHandler`.
-        """
-
-        for handler in self.handlers[:]:
-            if isinstance(handler, logging.StreamHandler):
-                self.handlers.remove(handler)
-
-        if stream is not None:
-            stream_handler = logging.StreamHandler(stream)
-            stream_handler.addFilter(_StreamHandlerEchoFilter())
-            stream_handler.setFormatter(logging.Formatter('%(message)s'))
-            self.addHandler(stream_handler)
-
-        self.stream = stream
-
-    def write(self, message):
-        """
-        Buffers each message until a newline is reached.  Each complete line is
-        then published to the logging system through ``self.log()``.
-        """
-
-        self.__thread_local_ctx.write_count += 1
-
-        try:
-            if self.__thread_local_ctx.write_count > 1:
-                return
-
-            # For each line in the buffer ending with \n, output that line to
-            # the logger
-            begin = 0
-            end = message.find('\n', begin) + 1
-            while end > begin:
-                if self.buffer:
-                    self.log_orig(self.buffer, echo=True)
-                    self.buffer = ''
-                self.log_orig(message[begin:end].rstrip(), echo=True)
-                begin = end
-                end = message.find('\n', begin) + 1
-            self.buffer = self.buffer + message[begin:]
-        finally:
-            self.__thread_local_ctx.write_count -= 1
-
-    def flush(self):
-        """
-        Flushes all handlers attached to this logger; this includes flushing
-        any attached stream-like object (e.g. `sys.stdout`).
-        """
-
-        for handler in self.handlers:
-            handler.flush()
-
-    def fileno(self):
-        fd = None
-        if self.stream:
-            try:
-                fd = self.stream.fileno()
-            except:
-                fd = None
-        if fd is None:
-            raise IOError('fileno() not defined for logger stream %r' %
-                          self.stream)
-        return fd
-
-    def log_orig(self, message, echo=True):
-        modname, path, lno, func = self.find_actual_caller()
-        self.log(self.level, message,
-                 extra={'orig_name': modname, 'orig_pathname': path,
-                        'orig_lineno': lno, 'orig_func': func, 'echo': echo})
-
-    def find_actual_caller(self):
-        """
-        Returns the full-qualified module name, full pathname, line number, and
-        function in which `StreamTeeLogger.write()` was called.  For example,
-        if this instance is used to replace `sys.stdout`, this will return the
-        location of any print statement.
-        """
-
-        # Gleaned from code in the logging module itself...
-        try:
-            f = sys._getframe(1)
-            ##f = inspect.currentframe(1)
-        except Exception:
-            f = None
-        # On some versions of IronPython, currentframe() returns None if
-         # IronPython isn't run with -X:Frames.
-        if f is not None:
-            f = f.f_back
-        rv = "(unknown module)", "(unknown file)", 0, "(unknown function)"
-        while hasattr(f, "f_code"):
-            co = f.f_code
-            filename = os.path.normcase(co.co_filename)
-            mod = inspect.getmodule(f)
-
-            if mod is None:
-                modname = '__main__'
-            else:
-                modname = mod.__name__
-
-            if modname == __name__:
-                # Crawl back until the first frame outside of this module
-                f = f.f_back
-                continue
-
-            rv = (modname, filename, f.f_lineno, co.co_name)
-            break
-        return rv
-
-
-class EchoFilter(object):
-    """
-    A logger filter primarily for use with `StreamTeeLogger`.  Adding an
-    `EchoFilter` to a `StreamTeeLogger` instances allows control over which
-    modules' print statements, for example, are output to stdout.
-
-    For example, to allow only output from the 'foo' module to be printed to
-    the console:
-
-    >>> stdout_logger = logging.getLogger('stsci.tools.logutil.stdout')
-    >>> stdout_logger.addFilter(EchoFilter(include=['foo']))
-
-    Now only print statements in the 'foo' module (or any sub-modules if 'foo'
-    is a package) are printed to stdout.   Any other print statements are just
-    sent to the appropriate logger.
-
-    Parameters
-    ----------
-    include : iterable
-        Packages or modules to include in stream output.  If set, then only the
-        modules listed here are output to the stream.
-
-    exclude : iterable
-        Packages or modules to be excluded from stream output.  If set then all
-        modules except for those listed here are output to the stream.  If both
-        ``include`` and ``exclude`` are provided, ``include`` takes precedence
-        and ``exclude`` is ignored.
-    """
-
-    def __init__(self, include=None, exclude=None):
-        self.include = set(include) if include is not None else include
-        self.exclude = set(exclude) if exclude is not None else exclude
-
-    def filter(self, record):
-        if ((self.include is None and self.exclude is None) or
-            not hasattr(record, 'orig_name')):
-            return True
-
-        record_name = record.orig_name.split('.')
-        while record_name:
-            if self.include is not None:
-                if '.'.join(record_name) in self.include:
-                    return True
-            elif self.exclude is not None:
-                if '.'.join(record_name) not in self.exclude:
-                    return True
-                else:
-                    break
-            record_name.pop()
-
-        record.echo = False
-        return True
-
-
-class LoggingExceptionHook(object):
-    def __init__(self, logger, level=logging.ERROR):
-        self._oldexcepthook = sys.excepthook
-        self.logger = logger
-        self.level = level
-        if not self.logger.handlers:
-            self.logger.addHandler(logging.NullHandler())
-
-    def __del__(self):
-        try:
-            try:
-                sys.excepthook = self._oldexcepthook
-            except AttributeError:
-                sys.excepthook = sys.__excepthook__
-        except AttributeError:
-            pass
-
-    def __call__(self, exc_type, exc_value, traceback):
-        self.logger.log(self.level, 'An unhandled exception ocurred:',
-                        exc_info=(exc_type, exc_value, traceback))
-        self._oldexcepthook(exc_type, exc_value, traceback)
-
-
-def setup_global_logging():
-    """
-    Initializes capture of stdout/stderr, Python warnings, and exceptions;
-    redirecting them to the loggers for the modules from which they originated.
-    """
-
-    global global_logging_started
-
-    if not PY3K:
-        sys.exc_clear()
-
-    if global_logging_started:
-        return
-
-    orig_logger_class = logging.getLoggerClass()
-    logging.setLoggerClass(StreamTeeLogger)
-    try:
-        stdout_logger = logging.getLogger(__name__ + '.stdout')
-        stderr_logger = logging.getLogger(__name__ + '.stderr')
-    finally:
-        logging.setLoggerClass(orig_logger_class)
-
-    stdout_logger.setLevel(logging.INFO)
-    stderr_logger.setLevel(logging.ERROR)
-    stdout_logger.set_stream(sys.stdout)
-    stderr_logger.set_stream(sys.stderr)
-    sys.stdout = stdout_logger
-    sys.stderr = stderr_logger
-
-    exception_logger = logging.getLogger(__name__ + '.exc')
-    sys.excepthook = LoggingExceptionHook(exception_logger)
-
-    logging.captureWarnings(True)
-
-    rawinput = 'input' if PY3K else 'raw_input'
-    builtins._original_raw_input = getattr(builtins, rawinput)
-    setattr(builtins, rawinput, global_logging_raw_input)
-
-    global_logging_started = True
-
-
-def teardown_global_logging():
-    """Disable global logging of stdio, warnings, and exceptions."""
-
-    global global_logging_started
-    if not global_logging_started:
-        return
-
-    stdout_logger = logging.getLogger(__name__ + '.stdout')
-    stderr_logger = logging.getLogger(__name__ + '.stderr')
-    if sys.stdout is stdout_logger:
-        sys.stdout = sys.stdout.stream
-    if sys.stderr is stderr_logger:
-        sys.stderr = sys.stderr.stream
-
-    # If we still have an unhandled exception go ahead and handle it with the
-    # replacement excepthook before deleting it
-    exc_type, exc_value, exc_traceback = sys.exc_info()
-    if exc_type is not None:
-        sys.excepthook(exc_type, exc_value, exc_traceback)
-    del exc_type
-    del exc_value
-    del exc_traceback
-    if not PY3K:
-        sys.exc_clear()
-
-    del sys.excepthook
-    logging.captureWarnings(False)
-
-    rawinput = 'input' if PY3K else 'raw_input'
-    if hasattr(builtins, '_original_raw_input'):
-        setattr(builtins, rawinput, builtins._original_raw_input)
-        del builtins._original_raw_input
-
-    global_logging_started = False
-
-
-# Cribbed, with a few tweaks from Tom Aldcroft at
-# http://www.astropython.org/snippet/2010/2/Easier-python-logging
-def create_logger(name, format='%(levelname)s: %(message)s', datefmt=None,
-                  stream=None, level=logging.INFO, filename=None, filemode='w',
-                  filelevel=None, propagate=True):
-    """
-    Do basic configuration for the logging system. Similar to
-    logging.basicConfig but the logger ``name`` is configurable and both a file
-    output and a stream output can be created. Returns a logger object.
-
-    The default behaviour is to create a logger called ``name`` with a null
-    handled, and to use the "%(levelname)s: %(message)s" format string, and add
-    the handler to the ``name`` logger.
-
-    A number of optional keyword arguments may be specified, which can alter
-    the default behaviour.
-
-    :param name: Logger name
-    :param format: handler format string
-    :param datefmt: handler date/time format specifier
-    :param stream: add a StreamHandler using ``stream``
-                    (None disables the stream, default=None)
-    :param level: logger level (default=INFO).
-    :param filename: add a FileHandler using ``filename`` (default=None)
-    :param filemode: open ``filename`` with specified filemode ('w' or 'a')
-    :param filelevel: logger level for file logger (default=``level``)
-    :param propagate: propagate message to parent (default=True)
-
-    :returns: logging.Logger object
-    """
-
-    # Get a logger for the specified name
-    logger = logging.getLogger(name)
-    logger.setLevel(level)
-    fmt = logging.Formatter(format, datefmt)
-    logger.propagate = propagate
-
-    # Remove existing handlers, otherwise multiple handlers can accrue
-    for hdlr in logger.handlers:
-        logger.removeHandler(hdlr)
-
-    # Add handlers. Add NullHandler if no file or stream output so that
-    # modules don't emit a warning about no handler.
-    if not (filename or stream):
-        logger.addHandler(logging.NullHandler())
-
-    if filename:
-        hdlr = logging.FileHandler(filename, filemode)
-        if filelevel is None:
-            filelevel = level
-        hdlr.setLevel(filelevel)
-        hdlr.setFormatter(fmt)
-        logger.addHandler(hdlr)
-
-    if stream:
-        hdlr = logging.StreamHandler(stream)
-        hdlr.setLevel(level)
-        hdlr.setFormatter(fmt)
-        logger.addHandler(hdlr)
-
-    return logger
-
-
-class _StreamHandlerEchoFilter(logging.Filter):
-    """
-    Filter used by the `logging.StreamHandler` internal to `StreamTeeLogger`;
-    any message logged through `StreamTeeLogger.write()` has an ``echo=True``
-    attribute attached to the `LogRecord`.  This ensures that the
-    `StreamHandler` only logs messages with this ``echo`` attribute set to
-    `True`.
-    """
-
-    def filter(self, record):
-        if hasattr(record, 'echo'):
-            return record.echo
-        return False
-
-
-class _LogTeeHandler(logging.Handler):
-    def __init__(self, level=logging.NOTSET):
-        logging.Handler.__init__(self, level)
-        self.__thread_local_ctx = threading.local()
-        self.__thread_local_ctx.logger_handle_counts = {}
-
-    def emit(self, record):
-        # Hand off to the global logger with the name same as the module of
-        # origin for this record
-        if not hasattr(record, 'orig_name'):
-            return
-
-        record = logging.LogRecord(record.orig_name, record.levelno,
-                                   record.orig_pathname, record.orig_lineno,
-                                   record.msg, record.args, record.exc_info,
-                                   record.orig_func)
-        record.origin = ""
-        logger = logging.getLogger(record.name)
-        if not logger.handlers:
-            logger.addHandler(logging.NullHandler())
-
-        counts = self.__thread_local_ctx.logger_handle_counts
-        if logger.name in counts:
-            counts[logger.name] += 1
-        else:
-            counts[logger.name] = 1
-            if self._search_stack():
-                return
-        try:
-            if counts[logger.name] > 1:
-                return
-            logger.handle(record)
-        finally:
-            counts[logger.name] -= 1
-
-    def _search_stack(self):
-        curr_frame = sys._getframe(3)
-        ##curr_frame = inspect.currentframe(3)
-        while curr_frame:
-            if 'self' in curr_frame.f_locals:
-                s = curr_frame.f_locals['self']
-                if (isinstance(s, logging.Logger) and not
-                    isinstance(s, StreamTeeLogger)):
-                    return True
-            curr_frame = curr_frame.f_back
-        return False
-
-
-if sys.version_info[:2] < (2, 7):
-    # We need to backport logging.captureWarnings
-    import warnings
-
-    PY26 = sys.version_info[:2] >= (2, 6)
-
-    logging._warnings_showwarning = None
-
-    class NullHandler(logging.Handler):
-        """
-        This handler does nothing. It's intended to be used to avoid the "No
-        handlers could be found for logger XXX" one-off warning. This is
-        important for library code, which may contain code to log events. If a
-        user of the library does not configure logging, the one-off warning
-        might be produced; to avoid this, the library developer simply needs to
-        instantiate a NullHandler and add it to the top-level logger of the
-        library module or package.
-        """
-
-        def handle(self, record):
-            pass
-
-        def emit(self, record):
-            pass
-
-        def createLock(self):
-            self.lock = None
-
-    logging.NullHandler = NullHandler
-
-
-    def _showwarning(message, category, filename, lineno, file=None,
-                     line=None):
-        """
-        Implementation of showwarnings which redirects to logging, which will
-        first check to see if the file parameter is None. If a file is
-        specified, it will delegate to the original warnings implementation of
-        showwarning. Otherwise, it will call warnings.formatwarning and will
-        log the resulting string to a warnings logger named "py.warnings" with
-        level logging.WARNING.
-        """
-
-        if file is not None:
-            if logging._warnings_showwarning is not None:
-                if PY26:
-                    _warnings_showwarning(message, category, filename, lineno,
-                                          file, line)
-                else:
-                    # Python 2.5 and below don't support the line argument
-                    _warnings_showwarning(message, category, filename, lineno,
-                                          file)
-        else:
-            if PY26:
-                s = warnings.formatwarning(message, category, filename, lineno,
-                                           line)
-            else:
-                s = warnings.formatwarning(message, category, filename, lineno)
-
-            logger = logging.getLogger("py.warnings")
-            if not logger.handlers:
-                logger.addHandler(NullHandler())
-            logger.warning("%s", s)
-    logging._showwarning = _showwarning
-    del _showwarning
-
-    def captureWarnings(capture):
-        """
-        If capture is true, redirect all warnings to the logging package.
-        If capture is False, ensure that warnings are not redirected to logging
-        but to their original destinations.
-        """
-        if capture:
-            if logging._warnings_showwarning is None:
-                logging._warnings_showwarning = warnings.showwarning
-                warnings.showwarning = logging._showwarning
-        else:
-            if logging._warnings_showwarning is not None:
-                warnings.showwarning = logging._warnings_showwarning
-                logging._warnings_showwarning = None
-    logging.captureWarnings = captureWarnings
-    del captureWarnings
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/minmatch.py b/required_pkgs/stsci.tools/lib/stsci/tools/minmatch.py
deleted file mode 100644
index 23add4e..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/minmatch.py
+++ /dev/null
@@ -1,317 +0,0 @@
-"""minmatch.py: Dictionary allowing minimum-match of string keys
-
-Entries can be retrieved using an abbreviated key as long as the key
-is unambiguous.  __getitem__ and get() raise an error if the key is
-ambiguous.
-
-A key is not consider ambiguous if it matches a full key, even if it
-also is an abbreviation for a longer key.  E.g., if there are keys
-'spam' and 'spameggs' in the dictionary, d['spam'] returns the value
-associated with 'spam', while d['spa'] is an error due to ambiguity.
-
-New key/value pairs must be inserted using the add() method to avoid
-ambiguities with when to overwrite and when to add a new key.  Assignments
-using setitem (e.g. d[key] = value) will raise an exception unless the
-key already exists and is unambiguous.
-
-The getall(key) method returns a list of all the matching values,
-containing a single entry for unambiguous matches and multiple entries
-for ambiguous matches.
-
-$Id: minmatch.py 38142 2015-03-06 13:42:21Z bsimon $
-
-R. White, 2000 January 28
-"""
-from __future__ import division, print_function # confidence high
-
-import sys, copy
-PY3K = sys.version_info[0] > 2
-if PY3K:
-    string_types = str
-else:
-    string_types = basestring
-
-# Need to import UserDict - 3.x has it in collections, 2.x has it in UserDict,
-# and the 2to3 tool doesn't fix this for us; the following should handle it all
-try:
-    from collections import UserDict # try for 3.x
-except ImportError:
-    from UserDict import UserDict # must be in 2.x
-
-class AmbiguousKeyError(KeyError):
-    pass
-
-class MinMatchDict(UserDict):
-
-    def __init__(self,indict=None,minkeylength=1):
-        self.data = {}
-        # use lazy instantiation for min-match dictionary
-        # it may never be created if full keys are always used
-        self.mmkeys = None
-        if minkeylength<1: minkeylength = 1
-        self.minkeylength = minkeylength
-        if indict is not None:
-            add = self.add
-            for key in indict.keys():
-                add(key, indict[key])
-
-    def __deepcopy__(self, memo=None):
-        """Deep copy of dictionary"""
-        # this is about twice as fast as the default implementation
-        return self.__class__(copy.deepcopy(self.data,memo), self.minkeylength)
-
-    def __getinitargs__(self):
-        """Return __init__ args for pickle"""
-        return (self.data, self.minkeylength)
-
-    def _mmInit(self):
-        """Create the minimum match dictionary of keys"""
-        # cache references to speed up loop a bit
-        mmkeys = {}
-        mmkeysGet = mmkeys.setdefault
-        minkeylength = self.minkeylength
-        for key in self.data.keys():
-            # add abbreviations as short as minkeylength
-            # always add at least one entry (even for key="")
-            lenkey = len(key)
-            start = min(minkeylength,lenkey)
-            for i in range(start,lenkey+1):
-                mmkeysGet(key[0:i],[]).append(key)
-        self.mmkeys = mmkeys
-
-    def getfullkey(self, key, new=0):
-        # check for exact match first
-        # ambiguous key is ok if there is exact match
-        if key in self.data: return key
-        if not isinstance(key, string_types):
-            raise KeyError("MinMatchDict keys must be strings")
-        # no exact match, so look for unique minimum match
-        if self.mmkeys is None: self._mmInit()
-        keylist = self.mmkeys.get(key)
-        if keylist is None:
-            # no such key -- ok only if new flag is set
-            if new: return key
-            raise KeyError("Key "+key+" not found")
-        elif len(keylist) == 1:
-            # unambiguous key
-            return keylist[0]
-        else:
-            return self.resolve(key,keylist)
-
-    def resolve(self, key, keylist):
-        """Hook to resolve ambiguities in selected keys"""
-        raise AmbiguousKeyError("Ambiguous key "+ repr(key) +
-                ", could be any of " + str(sorted(keylist)))
-
-    def add(self, key, item):
-        """Add a new key/item pair to the dictionary.  Resets an existing
-        key value only if this is an exact match to a known key."""
-        mmkeys = self.mmkeys
-        if mmkeys is not None and not (key in self.data):
-            # add abbreviations as short as minkeylength
-            # always add at least one entry (even for key="")
-            lenkey = len(key)
-            start = min(self.minkeylength,lenkey)
-            # cache references to speed up loop a bit
-            mmkeysGet = mmkeys.setdefault
-            for i in range(start,lenkey+1):
-                mmkeysGet(key[0:i],[]).append(key)
-        self.data[key] = item
-
-    def __setitem__(self, key, item):
-        """Set value of existing key/item in dictionary"""
-        try:
-            key = self.getfullkey(key)
-            self.data[key] = item
-        except KeyError as e:
-            raise e.__class__(str(e) + "\nUse add() method to add new items")
-
-    def __getitem__(self, key):
-        try:
-            # try the common case that the exact key is given first
-            return self.data[key]
-        except KeyError:
-            return self.data[self.getfullkey(key)]
-
-    def get(self, key, failobj=None, exact=0):
-        """Raises exception if key is ambiguous"""
-        if not exact:
-            key = self.getfullkey(key,new=1)
-        return self.data.get(key,failobj)
-
-    def get_exact_key(self, key, failobj=None):
-        """Returns failobj if key does not match exactly"""
-        return self.data.get(key,failobj)
-
-    def __delitem__(self, key):
-        key = self.getfullkey(key)
-        del self.data[key]
-        if self.mmkeys is not None:
-            start = min(self.minkeylength,len(key))
-            for i in range(start,len(key)+1):
-                s = key[0:i]
-                value = self.mmkeys.get(s)
-                value.remove(key)
-                if not value:
-                    # delete entry from mmkeys if that was last value
-                    del self.mmkeys[s]
-
-    def clear(self):
-        self.mmkeys = None
-        self.data.clear()
-
-    def __contains__(self, key):
-        """For the "in" operator. Raise an exception if key is ambiguous"""
-        return self._has(key)
-
-    def has_key(self, key, exact=0): return self._has(key, exact)
-
-    def _has(self, key, exact=0):
-        """Raises an exception if key is ambiguous"""
-        if not exact:
-            key = self.getfullkey(key,new=1)
-        return key in self.data
-
-    def has_exact_key(self, key):
-        """Returns true if there is an exact match for this key"""
-        return key in self.data
-
-    def update(self, other):
-        # check for missing attrs (needed in python 2.7)
-        if not hasattr(self, 'data'):
-            self.data = {}
-        if not hasattr(self, 'mmkeys'):
-            self.mmkeys = None
-        if not hasattr(self, 'minkeylength'):
-            self.minkeylength = other.minkeylength
-        # now do the update from 'other'
-        if type(other) is type(self.data):
-            for key in other.keys():
-                self.add(key,other[key])
-        else:
-            for key, value in other.items():
-                self.add(key,value)
-
-    def getall(self, key, failobj=None):
-        """Returns a list of all the matching values for key,
-        containing a single entry for unambiguous matches and
-        multiple entries for ambiguous matches."""
-        if self.mmkeys is None: self._mmInit()
-        k = self.mmkeys.get(key)
-        if not k: return failobj
-        return list(map(self.data.get, k))
-
-    def getallkeys(self, key, failobj=None):
-        """Returns a list of the full key names (not the items)
-        for all the matching values for key.  The list will
-        contain a single entry for unambiguous matches and
-        multiple entries for ambiguous matches."""
-        if self.mmkeys is None: self._mmInit()
-        return self.mmkeys.get(key, failobj)
-
-
-class QuietMinMatchDict(MinMatchDict):
-
-    """Minimum match dictionary that does not raise unexpected AmbiguousKeyError
-
-    Unlike MinMatchDict, if key is ambiguous then both get() and
-    has_key() methods return false (just as if there is no match).
-    For most uses this is probably not the preferred behavior (use
-    MinMatchDict instead), but for applications that rely on the
-    usual dictionary behavior where .get() and .has_key() do not
-    raise exceptions, this is useful.
-    """
-
-    def get(self, key, failobj=None, exact=0):
-
-        """Returns failobj if key is not found or is ambiguous"""
-
-        if not exact:
-            try:
-                key = self.getfullkey(key)
-            except KeyError:
-                return failobj
-        return self.data.get(key,failobj)
-
-
-    def _has(self, key, exact=0):
-
-        """Returns false if key is not found or is ambiguous"""
-
-        if not exact:
-            try:
-                key = self.getfullkey(key)
-                return 1
-            except KeyError:
-                return 0
-        else:
-            return key in self.data
-
-
-# some simple tests
-
-def test():
-    d = MinMatchDict()
-    print("a few d.add() calls")
-    d.add('test',1)
-    d.add('text',2)
-    d.add('ten',10)
-    print("d.items()", sorted(d.items()))
-    print("d['tex']=", d['tex'])
-    print("Changing d['tes'] to 3")
-    d['tes'] = 3
-    print("d.items()", sorted(d.items()))
-    try:
-        print("Ambiguous assignment to d['te'] - expecting exception")
-        d['te'] = 5
-    except AmbiguousKeyError as e:
-        print(str(e))
-        print('---')
-    print("d.get('tes')", d.get('tes'))
-    print("d.get('teq'), expect None: ", d.get('teq'))
-    print("d.getall('t')", sorted(d.getall('t')))
-    try:
-        print("d.get('t') - expecting exception")
-        d.get('t')
-    except AmbiguousKeyError as e:
-        print(str(e))
-        print('---')
-    print("d.add('tesseract',100)")
-    d.add('tesseract',100)
-    print("d.items()", sorted(d.items()))
-    try:
-        print("d.get('tes') - expecting exception")
-        d.get('tes')
-    except AmbiguousKeyError as e:
-        print(str(e))
-        print('---')
-    try:
-        print("del d['tes'] - expecting exception")
-        del d['tes']
-    except AmbiguousKeyError as e:
-        print(str(e))
-        print('---')
-    print("del d['tess']")
-    del d['tess']
-    print("d.items()", sorted(d.items()))
-    print("d.get('tes')", d.get('tes'))
-    print("d.has_key('tes'):", 'tes' in d)
-    print("d.has_key('tes', exact=True):", d.has_key('tes', exact=True))
-    print("'tes' in d:", 'tes' in d)
-    print("d.clear()")
-    d.clear()
-    print("d.items()", sorted(d.items()))
-    print("d.update({'ab': 0, 'cd': 1, 'ce': 2})")
-    d.update({'ab': 0, 'cd': 1, 'ce': 2})
-    print("d.items()", sorted(d.items()))
-    print("d['a']", d['a'])
-    try:
-        print("d['t'] - expecting exception")
-        d['t']
-    except KeyError as e:
-        print(str(e))
-        print('---')
-
-
-if __name__ == "__main__":
-    test()
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/mputil.py b/required_pkgs/stsci.tools/lib/stsci/tools/mputil.py
deleted file mode 100755
index c9ba701..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/mputil.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python
-#
-from __future__ import print_function
-import math, time
-
-class WatchedProcess(object):
-    """ MINIMAL wrapper around multiprocessing.Process so we can more easily track/time them. """
-
-    def __init__(self, proc):
-        self.process = proc
-        self.state = 0 # 0=not-yet-started; 1=started; 2=finished-or-terminated
-        self._start_time = None
-
-    def start_process(self):
-        assert self.state == 0, "Already started: "+str(self.process)
-        self._start_time = time.time()
-        self.process.start()
-        self.state = 1
-
-    def join_process(self):
-        assert self.state >= 1, "Not started: "+str(self.process)
-        self.process.join()
-        self.state = 2
-
-    def time_since_started(self):
-        assert self.state > 0, "Not yet started: "+str(self.process)
-        return time.time() - self._start_time
-
-    def __repr__(self):
-        return "WatchedProcess for: "+str(self.process)+', state='+str(self.state)
-
-
-def launch_and_wait(mp_proc_list, pool_size):
-    """ Given a list of multiprocessing.Process objects which have not yet
-    been started, this function launches them and blocks until the last
-    finishes.  This makes sure that only <pool_size> processes are ever
-    working at any one time (this number does not include the main process
-    which called this function, since that will not tax the CPU).
-    The idea here is roughly analogous to multiprocessing.Pool
-    with the exceptions that:
-        1 - The caller will get to use the multiprocessing.Process model of
-            using shared memory (inheritance) to pass arg data to the child,
-        2 - maxtasksperchild is always 1,
-        3 - no function return value is kept/tranferred (not yet implemented)
-    """
-
-    # Sanity check
-    if len(mp_proc_list) < 1:
-        return
-
-    # Create or own list with easy state watching
-    procs = []
-    for p in mp_proc_list:
-        procs.append(WatchedProcess(p))
-    num_total = len(procs)
-
-    # Launch all of them, but only so pool_size are running at any time
-    keep_going = True
-    while (keep_going):
-        # Before we start any more, find out how many are running.  First go
-        # through the list of those started and see if alive.  Update state.
-        for p in procs:
-            if p.state == 1: # been started
-                if not p.process.is_alive():
-                    p.state = 2 # process has finished or been terminated
-                    assert p.process.exitcode != None, \
-                           "Process is not alive but has no exitcode? "+ \
-                           str(p.process)
-
-        # now figure num_running
-        num_running = len([p for p in procs if p.state == 1])
-
-        # Start some.  Only as many as pool_size should ever be running.
-        num_avail_cpus = pool_size - num_running
-        num_to_start = len([p for p in procs if p.state == 0])
-        if num_to_start < 1:
-            # all have been started, can finally leave loop and go wait
-            break
-        if num_avail_cpus > 0 and num_to_start > 0:
-            num_to_start_now = min(num_avail_cpus, num_to_start)
-            started_now = 0
-            for p in procs:
-                if started_now < num_to_start_now and p.state == 0:
-                    p.start_process()
-                    # debug "launch_and_wait: started: "+str(p.process)
-                    started_now += 1
-        # else: otherwise, all cpus are in use, just wait ...
-
-        # sleep to tame loop activity, but also must sleep a bit after each
-        # start call so that the call to is_alive() woorks correctly
-        time.sleep(1)
-
-    # Out of the launching loop, can now wait on all procs left.
-    for p in procs:
-        p.join_process()
-
-    # Check all exit codes before returning
-    for p in procs:
-        if 0 != p.process.exitcode:
-            raise RuntimeError("Problem during: "+str(p.process.name)+ \
-                  ', exitcode: '+str(p.process.exitcode)+'. Check log.')
-    # all is well, can return
-
-
-def takes_time(x):
-    """ Example function which takes some time to run - just here for testing. """
-
-    import numpy
-    START = time.time()
-    s = numpy.float64(1)
-    #s = numpy.float32(1)
-    #s = 1.0
-
-    assert x not in (3, 7,9), "Simulate some errors"
-
-    for i in range(10000000):
-        s = (s + x) * s % 2399232
-
-    elap = time.time() - START
-    print(('Done "takes_time" x='+str(x)+': s = '+str(s)+', elapsed time = %.2f s' % elap))
-
-
-def do_main():
-    """ Illustrate use of launch_and_wait """
-    # load em up
-    import multiprocessing
-    p = None
-    subprocs = []
-    for item in [2,3,4,5,6,7,8,9]:
-        print(("mputil: instantiating Process for x = "+str(item)))
-        p = multiprocessing.Process(target=takes_time, args=(item,),
-                                    name='takes_time()')
-        subprocs.append(p)
-
-    # launch em, pool-fashion
-    launch_and_wait(subprocs, 3)
-
-    # by now, all should be finished
-    print("All subprocs should be finished and joined.")
-
-
-def best_tile_layout(pool_size):
-    """ Determine and return the best layout of "tiles" for fastest
-    overall parallel processing of a rectangular image broken up into N
-    smaller equally-sized rectangular tiles, given as input the number
-    of processes/chunks which can be run/worked at the same time (pool_size).
-
-    This attempts to return a layout whose total number of tiles is as
-    close as possible to pool_size, without going over (and thus not
-    really taking advantage of pooling).  Since we can vary the
-    size of the rectangles, there is not much (any?) benefit to pooling.
-
-    Returns a tuple of ( <num tiles in X dir>, <num in Y direction> )
-
-    This assumes the image in question is relatively close to square, and
-    so the returned tuple attempts to give a layout which is as
-    squarishly-blocked as possible, except in cases where speed would be
-    sacrificed.
-
-    EXAMPLES:
-
-    For pool_size of 4, the best result is 2x2.
-
-    For pool_size of 6, the best result is 2x3.
-
-    For pool_size of 5, a result of 1x5 is better than a result of
-    2x2 (which would leave one core unused), and 1x5 is also better than
-    a result of 2x3 (which would require one core to work twice while all
-    others wait).
-
-    For higher, odd pool_size values (say 39), it is deemed best to
-    sacrifice a few unused cores to satisfy our other constraints, and thus
-    the result of 6x6 is best (giving 36 tiles and 3 unused cores).
-    """
-    # Easy answer sanity-checks
-    if pool_size < 2:
-        return (1, 1)
-
-    # Next, use a small mapping of hard-coded results.  While we agree
-    # that many of these are unlikely pool_size values, they are easy
-    # to accomodate.
-    mapping = { 0:(1,1), 1:(1,1), 2:(1,2), 3:(1,3), 4:(2,2), 5:(1,5),
-                6:(2,3), 7:(2,3), 8:(2,4), 9:(3,3), 10:(2,5), 11:(2,5),
-                14:(2,7), 18:(3,6), 19:(3,6), 28:(4,7), 29:(4,7),
-                32:(4,8), 33:(4,8), 34:(4,8), 40:(4,10), 41:(4,10) }
-    if pool_size in mapping:
-        return mapping[pool_size]
-
-    # Next, take a guess using the square root and (for the sake of
-    # simplicity), go with it.  We *could* get much fancier here...
-    # Use floor-rounding (not ceil) so that the total number of resulting
-    # tiles is <= pool_size.
-    xnum = int(math.sqrt(pool_size))
-    ynum = int((1.*pool_size)/xnum)
-    return (xnum, ynum)
-
-
-def test_best_tile_layout():
-    """ Loop though some numbers and make sure we get expected results. """
-    for i in range(257):
-        x,y = best_tile_layout(i)
-        assert (x*y <= i) or (i == 0), "Total num resulting tiles > pool_size"
-        unused_cores = i - (x*y)
-        print(i, (x,y), unused_cores)
-        if i < 10:
-            assert unused_cores <= 1, "Too many idle cores at i = "+str(i)
-        else:
-            percent_unused = 100.*((unused_cores*1.)/i)
-            assert percent_unused < 14., "Too many idles cores at i: "+str(i)
-
-
-if __name__=='__main__':
-    do_main()
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/nimageiter.py b/required_pkgs/stsci.tools/lib/stsci/tools/nimageiter.py
deleted file mode 100644
index 79a1db2..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/nimageiter.py
+++ /dev/null
@@ -1,210 +0,0 @@
-"""
-
-License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-
-"""
-from __future__ import division # confidence medium
-from __future__ import generators
-
-import types
-import numpy as N
-
-
-BUFSIZE = 1024*1000   # 1Mb cache size
-
-__version__ = '0.7'
-__vdate__ = '25-July-2012'
-
-
-def ImageIter(imglist,bufsize=BUFSIZE,overlap=0,copy=0,updateSection = None):
-    """ Return image section for each image listed on input.
-        The inputs can either be a single image or a list of them,
-        with the return value matching the input type.
-        All images in a list MUST have the same shape, though,
-        in order for the iterator to scroll through them properly.
-
-        The size of section gets defined by 'bufsize', while
-        'copy' specifies whether to return an explicit copy
-        of each input section or simply return views.
-        The 'overlap' parameter provides a way of scrolling
-        through the image with this many rows of overlap, with
-        the default being no overlap at all.
-    """
-    if type(imglist) != list:
-        imgarr = imglist.data
-        imgarr = N.asarray(imgarr)
-        _imglen = 1
-        single = 1
-    else:
-        imgarr = imglist[0].data
-        imgarr = N.asarray(imgarr)
-        _imglen = len(imglist)
-        single = 0
-        _outlist = []
-    _numrows = imgarr.shape[0]
-
-    if len(imgarr.shape) == 1:
-        if copy:
-            if single:
-                yield imgarr.copy(),None
-            else:
-                for img in imglist: _outlist.append(img.copy())
-        else:
-            yield imglist,None
-
-    else:
-        nrows = computeBuffRows(imgarr,bufsize=bufsize)
-#       niter = int(imgarr.shape[0] / nrows) * nrows
-        nbuff,nrows = computeNumberBuff(imgarr.shape[0],nrows,overlap)
-        niter = nbuff*nrows
-
-        if copy:
-            # Create a cache that will contain a copy of the input
-                    # not just a view...
-            if single:
-                _cache = N.zeros((nrows,imgarr.shape[1]),dtype=imgarr.dtype)
-            else:
-                for img in imglist: _outlist.append(N.zeros((nrows,imgarr.shape[1]),dtype=imgarr.dtype))
-
-        for pix in range(0,niter+1,nrows):
-            # overlap needs to be computed here
-            # This allows the user to avoid edge effects when
-            # convolving the returned image sections, and insures
-            # that the last segment will always be returned with
-            # overlap+1 rows.
-
-            _prange = pix+nrows+overlap
-            if _prange > _numrows: _prange = _numrows
-            if pix == _prange: break
-
-            if copy:
-                if single:
-                    _cache = imgarr[pix:_prange].copy()
-                    yield _cache,(pix,_prange)
-                    N.multiply(_cache,0.,_cache)
-                else:
-                    for img in range(len(imglist)): _outlist[img] = imglist[img][pix:_prange].copy()
-                    yield _outlist,(pix,_prange)
-                    for img in range(len(imglist)): N.multiply(_outlist[img],0.,_outlist[img])
-            else:
-                if single:
-                    #yield imgarr.section[pix:_prange,:],(pix,_prange)
-                    yield imgarr[pix:_prange],(pix,_prange)
-                else:
-                    for hdu in imglist:
-                        #_outlist.append(imglist[img][pix:pix+nrows])
-                        _outlist.append(hdu.section[pix:_prange,:])
-                    yield _outlist,(pix,_prange)
-                    # This code is inserted to copy any values changed
-                    # in the image sections back into the original image.
-                    if (updateSection != None):
-                        #for _index in xrange(len(_outlist)):
-                        imglist[updateSection][pix:_prange] = _outlist[updateSection]
-                    del _outlist
-                    _outlist = []
-
-def computeBuffRows(imgarr,bufsize=BUFSIZE):
-    """ Function to compute the number of rows from the
-        input array that fits in the allocated memory given
-        by the bufsize.
-    """
-    imgarr = N.asarray(imgarr)
-    buffrows = int(bufsize / (imgarr.itemsize * imgarr.shape[1]))
-    return buffrows
-
-def computeNumberBuff(numrows, buffrows, overlap):
-    """ Function to compute the number of buffer sections
-        that will be used to read the input image given the
-        specified overlap.
-    """
-    nbuff = _computeNbuff(numrows, buffrows, overlap)
-    niter = 1 + int(nbuff)
-    totalrows = niter * buffrows
-    # We need to account for the case where the number of
-    # iterations ends up being greater than needed due to the
-    # overlap.
-    #if totalrows > numrows: niter -= 1
-    lastbuff = numrows - (niter*(buffrows-overlap))
-
-    if lastbuff < overlap+1 and nbuff > 1:
-        good = False
-        while not good:
-            if buffrows > overlap+1:
-                buffrows -= 1
-
-                nbuff = _computeNbuff(numrows, buffrows, overlap)
-                niter = 1 + int(nbuff)
-                totalrows = niter * (buffrows - overlap)
-                lastbuff = numrows - (niter*(buffrows-overlap))
-                if lastbuff > overlap + 1:
-                    good = True
-            else:
-                good = True
-    return niter,buffrows
-
-def _computeNbuff(numrows,buffrows,overlap):
-
-    if buffrows > numrows:
-        nbuff = 1
-    else:
-        overlaprows = buffrows - overlap
-        rowratio = (numrows - overlaprows)/(1.0*buffrows)
-        nbuff = (numrows - overlaprows+1)/(1.0*overlaprows)
-    return nbuff
-
-def FileIter(filelist,bufsize=BUFSIZE,overlap=0):
-    """ Return image section for each image listed on input, with
-        the object performing the file I/O upon each call to the
-        iterator.
-
-        The inputs can either be a single image or a list of them,
-        with the return value matching the input type.
-        All images in a list MUST have the same shape, though,
-        in order for the iterator to scroll through them properly.
-
-        The size of section gets defined by 'bufsize'.
-        The 'overlap' parameter provides a way of scrolling
-        through the image with this many rows of overlap, with
-        the default being no overlap at all.
-    """
-    if type(filelist) != list:
-        imgarr = filelist.data
-        imgarr = N.asarray(imgarr)
-        _imglen = 1
-        single = 1
-    else:
-        imgarr = filelist[0].data
-        imgarr = N.asarray(imgarr)
-        _imglen = len(filelist)
-        single = 0
-        _outlist = []
-    _numrows = imgarr.shape[0]
-
-    if len(imgarr.shape) == 1:
-        # This needs to be generalized to return pixel ranges
-        # based on bufsize, just like with 2-D arrays (images).
-        yield filelist,None
-
-    else:
-        nrows = computeBuffRows(imgarr,bufsize=bufsize)
-#       niter = int(imgarr.shape[0] / nrows) * nrows
-        nbuff,nrows = computeNumberBuff(imgarr.shape[0],nrows,overlap)
-        niter = nbuff * nrows
-
-        for pix in range(0,niter+1,nrows-overlap):
-            # overlap needs to be computed here
-            # This allows the user to avoid edge effects when
-            # convolving the returned image sections, and insures
-            # that the last segment will always be returned with
-            # overlap+1 rows.
-            _prange = pix+nrows
-            if _prange > _numrows: _prange = _numrows
-            if pix >= _prange: break
-            if single:
-                yield imgarr[pix:_prange],(pix,_prange)
-            else:
-                for hdu in filelist:
-                    _outlist.append(hdu[pix:_prange])
-                yield _outlist,(pix,_prange)
-                del _outlist
-                _outlist = []
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/nmpfit.py b/required_pkgs/stsci.tools/lib/stsci/tools/nmpfit.py
deleted file mode 100644
index b89d1e4..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/nmpfit.py
+++ /dev/null
@@ -1,2274 +0,0 @@
-"""
-Python/Numeric version of this module was called mpfit. This version was modified to use numpy.
-"""
-from __future__ import division, print_function # confidence medium
-__version__ = '0.2'
-
-"""
-Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
-
-                                                                                                AUTHORS
-        The original version of this software, called LMFIT, was written in FORTRAN
-        as part of the MINPACK-1 package by XXX.
-
-        Craig Markwardt converted the FORTRAN code to IDL.  The information for the
-        IDL version is:
-                Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
-                craigm at lheamail.gsfc.nasa.gov
-                UPDATED VERSIONs can be found on my WEB PAGE:
-                        http://cow.physics.wisc.edu/~craigm/idl/idl.html
-
-        Mark Rivers created this Python version from Craig's IDL version.
-                Mark Rivers, University of Chicago
-                Building 434A, Argonne National Laboratory
-                9700 South Cass Avenue, Argonne, IL 60439
-                rivers at cars.uchicago.edu
-                Updated versions can be found at http://cars.uchicago.edu/software
-
-
-                                                                                        DESCRIPTION
-
-        MPFIT uses the Levenberg-Marquardt technique to solve the
-        least-squares problem.  In its typical use, MPFIT will be used to
-        fit a user-supplied function (the "model") to user-supplied data
-        points (the "data") by adjusting a set of parameters.  MPFIT is
-        based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
-
-        For example, a researcher may think that a set of observed data
-        points is best modelled with a Gaussian curve.  A Gaussian curve is
-        parameterized by its mean, standard deviation and normalization.
-        MPFIT will, within certain constraints, find the set of parameters
-        which best fits the data.  The fit is "best" in the least-squares
-        sense; that is, the sum of the weighted squared differences between
-        the model and data is minimized.
-
-        The Levenberg-Marquardt technique is a particular strategy for
-        iteratively searching for the best fit.  This particular
-        implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
-        and more accurate than the version provided in the Scientific Python package
-        in Scientific.Functions.LeastSquares.
-        This version allows upper and lower bounding constraints to be placed on each
-        parameter, or the parameter can be held fixed.
-
-        The user-supplied Python function should return an array of weighted
-        deviations between model and data.  In a typical scientific problem
-        the residuals should be weighted so that each deviate has a
-        gaussian sigma of 1.0.  If X represents values of the independent
-        variable, Y represents a measurement for each value of X, and ERR
-        represents the error in the measurements, then the deviates could
-        be calculated as follows:
-
-        DEVIATES = (Y - F(X)) / ERR
-
-        where F is the analytical function representing the model.  You are
-        recommended to use the convenience functions MPFITFUN and
-        MPFITEXPR, which are driver functions that calculate the deviates
-        for you.  If ERR are the 1-sigma uncertainties in Y, then
-
-        TOTAL( DEVIATES^2 )
-
-        will be the total chi-squared value.  MPFIT will minimize the
-        chi-square value.  The values of X, Y and ERR are passed through
-        MPFIT to the user-supplied function via the FUNCTKW keyword.
-
-        Simple constraints can be placed on parameter values by using the
-        PARINFO keyword to MPFIT.  See below for a description of this
-        keyword.
-
-        MPFIT does not perform more general optimization tasks.  See TNMIN
-        instead.  MPFIT is customized, based on MINPACK-1, to the
-        least-squares minimization problem.
-
-
-                                                                                        USER FUNCTION
-
-        The user must define a function which returns the appropriate
-        values as specified above.  The function should return the weighted
-        deviations between the model and the data.  It should also return a status
-        flag and an optional partial derivative array.  For applications which
-        use finite-difference derivatives -- the default -- the user
-        function should be declared in the following way:
-
-        def myfunct(p, fjac=None, x=None, y=None, err=None)
-                # Parameter values are passed in "p"
-                # If fjac==None then partial derivatives should not
-                # computed.  It will always be None if MPFIT is called with default
-                # flag.
-                model = F(x, p)
-                # Non-negative status value means MPFIT should continue, negative means
-                # stop the
-                status = 0
-                return([status, (y-model)/err]
-
-        See below for applications with analytical derivatives.
-
-        The keyword parameters X, Y, and ERR in the example above are
-        suggestive but not required.  Any parameters can be passed to
-        MYFUNCT by using the functkw keyword to MPFIT.  Use MPFITFUN and
-        MPFITEXPR if you need ideas on how to do that.  The function *must*
-        accept a parameter list, P.
-
-        In general there are no restrictions on the number of dimensions in
-        X, Y or ERR.  However the deviates *must* be returned in a
-        one-dimensional Numeric array of type Float.
-
-        User functions may also indicate a fatal error condition using the
-        status return described above. If status is set to a number between
-        -15 and -1 then MPFIT will stop the calculation and return to the caller.
-
-
-                    ANALYTIC DERIVATIVES
-
-        In the search for the best-fit solution, MPFIT by default
-        calculates derivatives numerically via a finite difference
-        approximation.  The user-supplied function need not calculate the
-        derivatives explicitly.  However, if you desire to compute them
-        analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
-        As a practical matter, it is often sufficient and even faster to allow
-        MPFIT to calculate the derivatives numerically, and so
-        AUTODERIVATIVE=0 is not necessary.
-
-        If AUTODERIVATIVE=0 is used then the user function must check the parameter
-        FJAC, and if FJAC!=None then return the partial derivative array in the
-        return list.
-        def myfunct(p, fjac=None, x=None, y=None, err=None)
-                # Parameter values are passed in "p"
-                # If FJAC!=None then partial derivatives must be comptuer.
-                # FJAC contains an array of len(p), where each entry
-                # is 1 if that parameter is free and 0 if it is fixed.
-                model = F(x, p)
-                Non-negative status value means MPFIT should continue, negative means
-                # stop the calculation.
-                status = 0
-                if (dojac):
-                        pderiv = Numeric.zeros([len(x), len(p)], Numeric.Float)
-                        for j in range(len(p)):
-                        pderiv[:,j] = FGRAD(x, p, j)
-                else:
-                        pderiv = None
-                return([status, (y-model)/err, pderiv]
-
-        where FGRAD(x, p, i) is a user function which must compute the
-        derivative of the model with respect to parameter P[i] at X.  When
-        finite differencing is used for computing derivatives (ie, when
-        AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
-        derivatives the parameter FJAC=None.
-
-        Derivatives should be returned in the PDERIV array. PDERIV should be an m x
-        n array, where m is the number of data points and n is the number
-        of parameters.  dp[i,j] is the derivative at the ith point with
-        respect to the jth parameter.
-
-        The derivatives with respect to fixed parameters are ignored; zero
-        is an appropriate value to insert for those derivatives.  Upon
-        input to the user function, FJAC is set to a vector with the same
-        length as P, with a value of 1 for a parameter which is free, and a
-        value of zero for a parameter which is fixed (and hence no
-        derivative needs to be calculated).
-
-        If the data is higher than one dimensional, then the *last*
-        dimension should be the parameter dimension.  Example: fitting a
-        50x50 image, "dp" should be 50x50xNPAR.
-
-
-                                CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
-
-        The behavior of MPFIT can be modified with respect to each
-        parameter to be fitted.  A parameter value can be fixed; simple
-        boundary constraints can be imposed; limitations on the parameter
-        changes can be imposed; properties of the automatic derivative can
-        be modified; and parameters can be tied to one another.
-
-        These properties are governed by the PARINFO structure, which is
-        passed as a keyword parameter to MPFIT.
-
-        PARINFO should be a list of dictionaries, one list entry for each parameter.
-        Each parameter is associated with one element of the array, in
-        numerical order.  The dictionary can have the following keys
-        (none are required, keys are case insensitive):
-
-                'value' - the starting parameter value (but see the START_PARAMS
-                                        parameter for more information).
-
-                'fixed' - a boolean value, whether the parameter is to be held
-                                        fixed or not.  Fixed parameters are not varied by
-                                        MPFIT, but are passed on to MYFUNCT for evaluation.
-
-                'limited' - a two-element boolean array.  If the first/second
-                                        element is set, then the parameter is bounded on the
-                                        lower/upper side.  A parameter can be bounded on both
-                                        sides.  Both LIMITED and LIMITS must be given
-                                        together.
-
-                'limits' - a two-element float array.  Gives the
-                                        parameter limits on the lower and upper sides,
-                                        respectively.  Zero, one or two of these values can be
-                                        set, depending on the values of LIMITED.  Both LIMITED
-                                        and LIMITS must be given together.
-
-                'parname' - a string, giving the name of the parameter.  The
-                                        fitting code of MPFIT does not use this tag in any
-                                        way.  However, the default iterfunct will print the
-                                        parameter name if available.
-
-                'step' - the step size to be used in calculating the numerical
-                                derivatives.  If set to zero, then the step size is
-                                computed automatically.  Ignored when AUTODERIVATIVE=0.
-
-                'mpside' - the sidedness of the finite difference when computing
-                                        numerical derivatives.  This field can take four
-                                        values:
-
-                                                0 - one-sided derivative computed automatically
-                                                1 - one-sided derivative (f(x+h) - f(x)  )/h
-                                                -1 - one-sided derivative (f(x)   - f(x-h))/h
-                                                2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
-
-                                        Where H is the STEP parameter described above.  The
-                                        "automatic" one-sided derivative method will chose a
-                                        direction for the finite difference which does not
-                                        violate any constraints.  The other methods do not
-                                        perform this check.  The two-sided method is in
-                                        principle more precise, but requires twice as many
-                                        function evaluations.  Default: 0.
-
-                'mpmaxstep' - the maximum change to be made in the parameter
-                                                value.  During the fitting process, the parameter
-                                                will never be changed by more than this value in
-                                                one iteration.
-
-                                                A value of 0 indicates no maximum.  Default: 0.
-
-                'tied' - a string expression which "ties" the parameter to other
-                                free or fixed parameters.  Any expression involving
-                                constants and the parameter array P are permitted.
-                                Example: if parameter 2 is always to be twice parameter
-                                1 then use the following: parinfo(2).tied = '2 * p(1)'.
-                                Since they are totally constrained, tied parameters are
-                                considered to be fixed; no errors are computed for them.
-                                [ NOTE: the PARNAME can't be used in expressions. ]
-
-                'mpprint' - if set to 1, then the default iterfunct will print the
-                                        parameter value.  If set to 0, the parameter value
-                                        will not be printed.  This tag can be used to
-                                        selectively print only a few parameter values out of
-                                        many.  Default: 1 (all parameters printed)
-
-
-        Future modifications to the PARINFO structure, if any, will involve
-        adding dictionary tags beginning with the two letters "MP".
-        Therefore programmers are urged to avoid using tags starting with
-        the same letters; otherwise they are free to include their own
-        fields within the PARINFO structure, and they will be ignored.
-
-        PARINFO Example:
-        parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}]*5
-        parinfo[0]['fixed'] = 1
-        parinfo[4]['limited'][0] = 1
-        parinfo[4]['limits'][0]  = 50.
-        values = [5.7, 2.2, 500., 1.5, 2000.]
-        for i in range(5): parinfo[i]['value']=values[i]
-
-        A total of 5 parameters, with starting values of 5.7,
-        2.2, 500, 1.5, and 2000 are given.  The first parameter
-        is fixed at a value of 5.7, and the last parameter is
-        constrained to be above 50.
-
-
-                                                                                                EXAMPLE
-
-        import mpfit
-        import Numeric
-        x = Numeric.arange(100, Numeric.float)
-        p0 = [5.7, 2.2, 500., 1.5, 2000.]
-        y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*Numeric.sqrt(x) +
-                        p[4]*Numeric.log(x))
-        fa = {'x':x, 'y':y, 'err':err}
-        m = mpfit('myfunct', p0, functkw=fa)
-        print 'status = ', m.status
-        if (m.status <= 0): print 'error message = ', m.errmsg
-        print 'parameters = ', m.params
-
-        Minimizes sum of squares of MYFUNCT.  MYFUNCT is called with the X,
-        Y, and ERR keyword parameters that are given by FUNCTKW.  The
-        results can be obtained from the returned object m.
-
-
-                                                                                THEORY OF OPERATION
-
-        There are many specific strategies for function minimization.  One
-        very popular technique is to use function gradient information to
-        realize the local structure of the function.  Near a local minimum
-        the function value can be taylor expanded about x0 as follows:
-
-                f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
-                                        -----   ---------------   -------------------------------  (1)
-                Order    0th          1st                      2nd
-
-        Here f'(x) is the gradient vector of f at x, and f''(x) is the
-        Hessian matrix of second derivatives of f at x.  The vector x is
-        the set of function parameters, not the measured data vector.  One
-        can find the minimum of f, f(xm) using Newton's method, and
-        arrives at the following linear equation:
-
-                f''(x0) . (xm-x0) = - f'(x0)                            (2)
-
-        If an inverse can be found for f''(x0) then one can solve for
-        (xm-x0), the step vector from the current position x0 to the new
-        projected minimum.  Here the problem has been linearized (ie, the
-        gradient information is known to first order).  f''(x0) is
-        symmetric n x n matrix, and should be positive definite.
-
-        The Levenberg - Marquardt technique is a variation on this theme.
-        It adds an additional diagonal term to the equation which may aid the
-        convergence properties:
-
-                (f''(x0) + nu I) . (xm-x0) = -f'(x0)                  (2a)
-
-        where I is the identity matrix.  When nu is large, the overall
-        matrix is diagonally dominant, and the iterations follow steepest
-        descent.  When nu is small, the iterations are quadratically
-        convergent.
-
-        In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
-        determined.  However the Hessian matrix is often difficult or
-        impossible to compute.  The gradient f'(x0) may be easier to
-        compute, if even by finite difference techniques.  So-called
-        quasi-Newton techniques attempt to successively estimate f''(x0)
-        by building up gradient information as the iterations proceed.
-
-        In the least squares problem there are further simplifications
-        which assist in solving eqn (2).  The function to be minimized is
-        a sum of squares:
-
-                        f = Sum(hi^2)                                         (3)
-
-        where hi is the ith residual out of m residuals as described
-        above.  This can be substituted back into eqn (2) after computing
-        the derivatives:
-
-                        f'  = 2 Sum(hi  hi')
-                        f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'')                (4)
-
-        If one assumes that the parameters are already close enough to a
-        minimum, then one typically finds that the second term in f'' is
-        negligible [or, in any case, is too difficult to compute].  Thus,
-        equation (2) can be solved, at least approximately, using only
-        gradient information.
-
-        In matrix notation, the combination of eqns (2) and (4) becomes:
-
-                        hT' . h' . dx = - hT' . h                          (5)
-
-        Where h is the residual vector (length m), hT is its transpose, h'
-        is the Jacobian matrix (dimensions n x m), and dx is (xm-x0).  The
-        user function supplies the residual vector h, and in some cases h'
-        when it is not found by finite differences (see MPFIT_FDJAC2,
-        which finds h and hT').  Even if dx is not the best absolute step
-        to take, it does provide a good estimate of the best *direction*,
-        so often a line minimization will occur along the dx vector
-        direction.
-
-        The method of solution employed by MINPACK is to form the Q . R
-        factorization of h', where Q is an orthogonal matrix such that QT .
-        Q = I, and R is upper right triangular.  Using h' = Q . R and the
-        ortogonality of Q, eqn (5) becomes
-
-                        (RT . QT) . (Q . R) . dx = - (RT . QT) . h
-                                                        RT . R . dx = - RT . QT . h         (6)
-                                                                        R . dx = - QT . h
-
-        where the last statement follows because R is upper triangular.
-        Here, R, QT and h are known so this is a matter of solving for dx.
-        The routine MPFIT_QRFAC provides the QR factorization of h, with
-        pivoting, and MPFIT_QRSOLV provides the solution for dx.
-
-
-                                                                                        REFERENCES
-
-        MINPACK-1, Jorge More', available from netlib (www.netlib.org).
-        "Optimization Software Guide," Jorge More' and Stephen Wright,
-                SIAM, *Frontiers in Applied Mathematics*, Number 14.
-        More', Jorge J., "The Levenberg-Marquardt Algorithm:
-                Implementation and Theory," in *Numerical Analysis*, ed. Watson,
-                G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
-
-
-                                                                        MODIFICATION HISTORY
-
-        Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
-        Copyright (C) 1997-2002, Craig Markwardt
-        This software is provided as is without any warranty whatsoever.
-        Permission to use, copy, modify, and distribute modified or
-        unmodified copies is granted, provided this copyright and disclaimer
-        are included unchanged.
-
-        Translated from MPFIT (Craig Markwardt's IDL package) to Python,
-        August, 2002.  Mark Rivers
-"""
-from . import numerixenv
-numerixenv.check()
-
-import numpy
-import types
-
-
-#     Original FORTRAN documentation
-#     **********
-#
-#     subroutine lmdif
-#
-#     the purpose of lmdif is to minimize the sum of the squares of
-#     m nonlinear functions in n variables by a modification of
-#     the levenberg-marquardt algorithm. the user must provide a
-#     subroutine which calculates the functions. the jacobian is
-#     then calculated by a forward-difference approximation.
-#
-#     the subroutine statement is
-#
-#       subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
-#                        diag,mode,factor,nprint,info,nfev,fjac,
-#                        ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
-#
-#     where
-#
-#       fcn is the name of the user-supplied subroutine which
-#         calculates the functions. fcn must be declared
-#         in an external statement in the user calling
-#         program, and should be written as follows.
-#
-#         subroutine fcn(m,n,x,fvec,iflag)
-#         integer m,n,iflag
-#         double precision x(n),fvec(m)
-#         ----------
-#         calculate the functions at x and
-#         return this vector in fvec.
-#         ----------
-#         return
-#         end
-#
-#         the value of iflag should not be changed by fcn unless
-#         the user wants to terminate execution of lmdif.
-#         in this case set iflag to a negative integer.
-#
-#       m is a positive integer input variable set to the number
-#         of functions.
-#
-#       n is a positive integer input variable set to the number
-#         of variables. n must not exceed m.
-#
-#       x is an array of length n. on input x must contain
-#         an initial estimate of the solution vector. on output x
-#         contains the final estimate of the solution vector.
-#
-#       fvec is an output array of length m which contains
-#         the functions evaluated at the output x.
-#
-#       ftol is a nonnegative input variable. termination
-#         occurs when both the actual and predicted relative
-#         reductions in the sum of squares are at most ftol.
-#         therefore, ftol measures the relative error desired
-#         in the sum of squares.
-#
-#       xtol is a nonnegative input variable. termination
-#         occurs when the relative error between two consecutive
-#         iterates is at most xtol. therefore, xtol measures the
-#         relative error desired in the approximate solution.
-#
-#       gtol is a nonnegative input variable. termination
-#         occurs when the cosine of the angle between fvec and
-#         any column of the jacobian is at most gtol in absolute
-#         value. therefore, gtol measures the orthogonality
-#         desired between the function vector and the columns
-#         of the jacobian.
-#
-#       maxfev is a positive integer input variable. termination
-#         occurs when the number of calls to fcn is at least
-#         maxfev by the end of an iteration.
-#
-#       epsfcn is an input variable used in determining a suitable
-#         step length for the forward-difference approximation. this
-#         approximation assumes that the relative errors in the
-#         functions are of the order of epsfcn. if epsfcn is less
-#         than the machine precision, it is assumed that the relative
-#         errors in the functions are of the order of the machine
-#         precision.
-#
-#       diag is an array of length n. if mode = 1 (see
-#         below), diag is internally set. if mode = 2, diag
-#         must contain positive entries that serve as
-#         multiplicative scale factors for the variables.
-#
-#       mode is an integer input variable. if mode = 1, the
-#         variables will be scaled internally. if mode = 2,
-#         the scaling is specified by the input diag. other
-#         values of mode are equivalent to mode = 1.
-#
-#       factor is a positive input variable used in determining the
-#         initial step bound. this bound is set to the product of
-#         factor and the euclidean norm of diag*x if nonzero, or else
-#         to factor itself. in most cases factor should lie in the
-#         interval (.1,100.). 100. is a generally recommended value.
-#
-#       nprint is an integer input variable that enables controlled
-#         printing of iterates if it is positive. in this case,
-#         fcn is called with iflag = 0 at the beginning of the first
-#         iteration and every nprint iterations thereafter and
-#         immediately prior to return, with x and fvec available
-#         for printing. if nprint is not positive, no special calls
-#         of fcn with iflag = 0 are made.
-#
-#       info is an integer output variable. if the user has
-#         terminated execution, info is set to the (negative)
-#         value of iflag. see description of fcn. otherwise,
-#         info is set as follows.
-#
-#         info = 0  improper input parameters.
-#
-#         info = 1  both actual and predicted relative reductions
-#                   in the sum of squares are at most ftol.
-#
-#         info = 2  relative error between two consecutive iterates
-#                   is at most xtol.
-#
-#         info = 3  conditions for info = 1 and info = 2 both hold.
-#
-#         info = 4  the cosine of the angle between fvec and any
-#                   column of the jacobian is at most gtol in
-#                   absolute value.
-#
-#         info = 5  number of calls to fcn has reached or
-#                   exceeded maxfev.
-#
-#         info = 6  ftol is too small. no further reduction in
-#                   the sum of squares is possible.
-#
-#         info = 7  xtol is too small. no further improvement in
-#                   the approximate solution x is possible.
-#
-#         info = 8  gtol is too small. fvec is orthogonal to the
-#                   columns of the jacobian to machine precision.
-#
-#       nfev is an integer output variable set to the number of
-#         calls to fcn.
-#
-#       fjac is an output m by n array. the upper n by n submatrix
-#         of fjac contains an upper triangular matrix r with
-#         diagonal elements of nonincreasing magnitude such that
-#
-#                t     t           t
-#               p *(jac *jac)*p = r *r,
-#
-#         where p is a permutation matrix and jac is the final
-#         calculated jacobian. column j of p is column ipvt(j)
-#         (see below) of the identity matrix. the lower trapezoidal
-#         part of fjac contains information generated during
-#         the computation of r.
-#
-#       ldfjac is a positive integer input variable not less than m
-#         which specifies the leading dimension of the array fjac.
-#
-#       ipvt is an integer output array of length n. ipvt
-#         defines a permutation matrix p such that jac*p = q*r,
-#         where jac is the final calculated jacobian, q is
-#         orthogonal (not stored), and r is upper triangular
-#         with diagonal elements of nonincreasing magnitude.
-#         column j of p is column ipvt(j) of the identity matrix.
-#
-#       qtf is an output array of length n which contains
-#         the first n elements of the vector (q transpose)*fvec.
-#
-#       wa1, wa2, and wa3 are work arrays of length n.
-#
-#       wa4 is a work array of length m.
-#
-#     subprograms called
-#
-#       user-supplied ...... fcn
-#
-#       minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
-#
-#       fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
-#
-#     argonne national laboratory. minpack project. march 1980.
-#     burton s. garbow, kenneth e. hillstrom, jorge j. more
-#
-#     **********
-
-class mpfit:
-    def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
-                                            ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
-                                            damp=0., maxiter=200, factor=100., nprint=1,
-                                            iterfunct='default', iterkw={}, nocovar=0,
-                                            fastnorm=0, rescale=0, autoderivative=1, quiet=0,
-                                            diag=None, epsfcn=None, debug=0):
-        """
-Inputs:
-fcn:
-        The function to be minimized.  The function should return the weighted
-        deviations between the model and the data, as described above.
-
-xall:
-        An array of starting values for each of the parameters of the model.
-        The number of parameters should be fewer than the number of measurements.
-
-        This parameter is optional if the parinfo keyword is used (but see
-        parinfo).  The parinfo keyword provides a mechanism to fix or constrain
-        individual parameters.
-
-Keywords:
-
-autoderivative:
-        If this is set, derivatives of the function will be computed
-        automatically via a finite differencing procedure.  If not set, then
-        fcn must provide the (analytical) derivatives.
-                Default: set (=1)
-                NOTE: to supply your own analytical derivatives,
-                                explicitly pass autoderivative=0
-
-fastnorm:
-        Set this keyword to select a faster algorithm to compute sum-of-square
-        values internally.  For systems with large numbers of data points, the
-        standard algorithm can become prohibitively slow because it cannot be
-        vectorized well.  By setting this keyword, MPFIT will run faster, but
-        it will be more prone to floating point overflows and underflows.  Thus, setting
-        this keyword may sacrifice some stability in the fitting process.
-                Default: clear (=0)
-
-ftol:
-        A nonnegative input variable. Termination occurs when both the actual
-        and predicted relative reductions in the sum of squares are at most
-        ftol (and status is accordingly set to 1 or 3).  Therefore, ftol
-        measures the relative error desired in the sum of squares.
-                Default: 1E-10
-
-functkw:
-        A dictionary which contains the parameters to be passed to the
-        user-supplied function specified by fcn via the standard Python
-        keyword dictionary mechanism.  This is the way you can pass additional
-        data to your user-supplied function without using global variables.
-
-        Consider the following example:
-                if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
-                                                        'errval':[1.,1.,1.] }
-        then the user supplied function should be declared like this:
-                def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
-
-        Default: {}   No extra parameters are passed to the user-supplied
-                                                function.
-
-gtol:
-        A nonnegative input variable. Termination occurs when the cosine of
-        the angle between fvec and any column of the jacobian is at most gtol
-        in absolute value (and status is accordingly set to 4). Therefore,
-        gtol measures the orthogonality desired between the function vector
-        and the columns of the jacobian.
-                Default: 1e-10
-
-iterkw:
-        The keyword arguments to be passed to iterfunct via the dictionary
-        keyword mechanism.  This should be a dictionary and is similar in
-        operation to FUNCTKW.
-                Default: {}  No arguments are passed.
-
-iterfunct:
-        The name of a function to be called upon each NPRINT iteration of the
-        MPFIT routine.  It should be declared in the following way:
-                def iterfunct(myfunct, p, iter, fnorm, functkw=None,
-                                                        parinfo=None, quiet=0, dof=None, [iterkw keywords here])
-                # perform custom iteration update
-
-        iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
-        and QUIET).
-
-        myfunct:  The user-supplied function to be minimized,
-        p:        The current set of model parameters
-        iter:     The iteration number
-        functkw:  The arguments to be passed to myfunct.
-        fnorm:    The chi-squared value.
-        quiet:    Set when no textual output should be printed.
-        dof:      The number of degrees of freedom, normally the number of points
-                                        less the number of free parameters.
-        See below for documentation of parinfo.
-
-        In implementation, iterfunct can perform updates to the terminal or
-        graphical user interface, to provide feedback while the fit proceeds.
-        If the fit is to be stopped for any reason, then iterfunct should return a
-        a status value between -15 and -1.  Otherwise it should return None
-        (e.g. no return statement) or 0.
-        In principle, iterfunct should probably not modify the parameter values,
-        because it may interfere with the algorithm's stability.  In practice it
-        is allowed.
-
-        Default: an internal routine is used to print the parameter values.
-
-        Set iterfunct=None if there is no user-defined routine and you don't
-        want the internal default routine be called.
-
-maxiter:
-        The maximum number of iterations to perform.  If the number is exceeded,
-        then the status value is set to 5 and MPFIT returns.
-        Default: 200 iterations
-
-nocovar:
-        Set this keyword to prevent the calculation of the covariance matrix
-        before returning (see COVAR)
-        Default: clear (=0)  The covariance matrix is returned
-
-nprint:
-        The frequency with which iterfunct is called.  A value of 1 indicates
-        that iterfunct is called with every iteration, while 2 indicates every
-        other iteration, etc.  Note that several Levenberg-Marquardt attempts
-        can be made in a single iteration.
-        Default value: 1
-
-parinfo
-        Provides a mechanism for more sophisticated constraints to be placed on
-        parameter values.  When parinfo is not passed, then it is assumed that
-        all parameters are free and unconstrained.  Values in parinfo are never
-        modified during a call to MPFIT.
-
-        See description above for the structure of PARINFO.
-
-        Default value: None  All parameters are free and unconstrained.
-
-quiet:
-        Set this keyword when no textual output should be printed by MPFIT
-
-damp:
-        A scalar number, indicating the cut-off value of residuals where
-        "damping" will occur.  Residuals with magnitudes greater than this
-        number will be replaced by their hyperbolic tangent.  This partially
-        mitigates the so-called large residual problem inherent in
-        least-squares solvers (as for the test problem CURVI,
-        http://www.maxthis.com/curviex.htm).
-        A value of 0 indicates no damping.
-                Default: 0
-
-        Note: DAMP doesn't work with autoderivative=0
-
-xtol:
-        A nonnegative input variable. Termination occurs when the relative error
-        between two consecutive iterates is at most xtol (and status is
-        accordingly set to 2 or 3).  Therefore, xtol measures the relative error
-        desired in the approximate solution.
-        Default: 1E-10
-
-Outputs:
-
-Returns an object of type mpfit.  The results are attributes of this class,
-e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
-
-.status
-        An integer status code is returned.  All values greater than zero can
-        represent success (however .status == 5 may indicate failure to
-        converge). It can have one of the following values:
-
-        -16
-                A parameter or function value has become infinite or an undefined
-                number.  This is usually a consequence of numerical overflow in the
-                user's model function, which must be avoided.
-
-        -15 to -1
-                These are error codes that either MYFUNCT or iterfunct may return to
-                terminate the fitting process.  Values from -15 to -1 are reserved
-                for the user functions and will not clash with MPFIT.
-
-        0  Improper input parameters.
-
-        1  Both actual and predicted relative reductions in the sum of squares
-                are at most ftol.
-
-        2  Relative error between two consecutive iterates is at most xtol
-
-        3  Conditions for status = 1 and status = 2 both hold.
-
-        4  The cosine of the angle between fvec and any column of the jacobian
-                is at most gtol in absolute value.
-
-        5  The maximum number of iterations has been reached.
-
-        6  ftol is too small. No further reduction in the sum of squares is
-                possible.
-
-        7  xtol is too small. No further improvement in the approximate solution
-                x is possible.
-
-        8  gtol is too small. fvec is orthogonal to the columns of the jacobian
-                to machine precision.
-
-.fnorm
-        The value of the summed squared residuals for the returned parameter
-        values.
-
-.covar
-        The covariance matrix for the set of parameters returned by MPFIT.
-        The matrix is NxN where N is the number of  parameters.  The square root
-        of the diagonal elements gives the formal 1-sigma statistical errors on
-        the parameters if errors were treated "properly" in fcn.
-        Parameter errors are also returned in .perror.
-
-        To compute the correlation matrix, pcor, use this example:
-                cov = mpfit.covar
-                pcor = cov * 0.
-                for i in range(n):
-                        for j in range(n):
-                                pcor[i,j] = cov[i,j]/Numeric.sqrt(cov[i,i]*cov[j,j])
-
-        If nocovar is set or MPFIT terminated abnormally, then .covar is set to
-        a scalar with value None.
-
-.errmsg
-        A string error or warning message is returned.
-
-.nfev
-        The number of calls to MYFUNCT performed.
-
-.niter
-        The number of iterations completed.
-
-.perror
-        The formal 1-sigma errors in each parameter, computed from the
-        covariance matrix.  If a parameter is held fixed, or if it touches a
-        boundary, then the error is reported as zero.
-
-        If the fit is unweighted (i.e. no errors were given, or the weights
-        were uniformly set to unity), then .perror will probably not represent
-        the true parameter uncertainties.
-
-        *If* you can assume that the true reduced chi-squared value is unity --
-        meaning that the fit is implicitly assumed to be of good quality --
-        then the estimated parameter uncertainties can be computed by scaling
-        .perror by the measured chi-squared value.
-
-                dof = len(x) - len(mpfit.params) # deg of freedom
-                # scaled uncertainties
-                pcerror = mpfit.perror * numpy.sqrt(mpfit.fnorm / dof)
-
-        """
-        self.niter = 0
-        self.params = None
-        self.covar = None
-        self.perror = None
-        self.status = 0  # Invalid input flag set while we check inputs
-        self.debug = debug
-        self.errmsg = ''
-        self.fastnorm = fastnorm
-        self.nfev = 0
-        self.damp = damp
-        self.machar = machar(double=1)
-        machep = self.machar.machep
-
-        if (fcn==None):
-            self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
-            return
-
-        if (iterfunct == 'default'): iterfunct = self.defiter
-
-        ## Parameter damping doesn't work when user is providing their own
-        ## gradients.
-        if (self.damp != 0) and (autoderivative == 0):
-            self.errmsg =  'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
-            return
-
-        ## Parameters can either be stored in parinfo, or x. x takes precedence if it exists
-        if (xall == None) and (parinfo == None):
-            self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
-            return
-
-        ## Be sure that PARINFO is of the right type
-        if (parinfo != None):
-            if (type(parinfo) != list):
-                self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
-                return
-            else:
-                if (type(parinfo[0]) != dict):
-                    self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
-                    return
-            if ((xall != None) and (len(xall) != len(parinfo))):
-                self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
-                return
-
-        ## If the parameters were not specified at the command line, then
-        ## extract them from PARINFO
-        if (xall == None):
-            xall = self.parinfo(parinfo, 'value')
-            if (xall == None):
-                self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
-                return
-
-
-        ## Make sure parameters are numpy arrays of type numpy.float
-        #print 'xall', xall, type(xall)
-        xall = numpy.asarray(xall, numpy.float)
-
-        npar = len(xall)
-        self.fnorm  = -1.
-        fnorm1 = -1.
-
-        ## TIED parameters?
-        ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
-        self.qanytied = 0
-        for i in range(npar):
-            ptied[i] = ptied[i].strip()
-            if (ptied[i] != ''): self.qanytied = 1
-        self.ptied = ptied
-
-        ## FIXED parameters ?
-        pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
-        pfixed = (pfixed == 1)
-        for i in range(npar):
-            pfixed[i] = pfixed[i] or (ptied[i] != '') ## Tied parameters are also effectively fixed
-
-        ## Finite differencing step, absolute and relative, and sidedness of deriv.
-        step = self.parinfo(parinfo, 'step', default=0., n=npar)
-        dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
-        dside = self.parinfo(parinfo, 'mpside',  default=0, n=npar)
-
-        ## Maximum and minimum steps allowed to be taken in one iteration
-        maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
-        minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
-
-        qmin = minstep * 0  ## Remove minstep for now!!
-        qmax = maxstep != 0
-
-        wh = numpy.nonzero(((qmin!=0.) & (qmax!=0.)) & (maxstep < minstep))
-
-        #check if it's 1d array?
-        if (len(wh[0]) > 0):
-            self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
-            return
-        wh = numpy.nonzero((qmin!=0.) & (qmax!=0.))
-        qminmax = len(wh[0] > 0)
-
-        ## Finish up the free parameters
-        ifree = (numpy.nonzero(pfixed != 1))[0]
-        nfree = len(ifree)
-        if nfree == 0:
-            self.errmsg = 'ERROR: no free parameters'
-            return
-
-        ## Compose only VARYING parameters
-        self.params = xall      ## self.params is the set of parameters to be returned
-        x = numpy.take(self.params, ifree)  ## x is the set of free parameters
-
-        ## LIMITED parameters ?
-        limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
-        limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
-
-        if (limited != None) and (limits != None):
-            ## Error checking on limits in parinfo
-            wh = numpy.nonzero((limited[:,0] & (xall < limits[:,0])) |
-                                                                    (limited[:,1] & (xall > limits[:,1])))
-            if (len(wh[0]) > 0):
-                self.errmsg = 'ERROR: parameters are not within PARINFO limits'
-                return
-            wh = numpy.nonzero((limited[:,0] & limited[:,1]) &
-                                                                    (limits[:,0] >= limits[:,1]) &
-                                                                    (pfixed == 0))
-            if (len(wh[0]) > 0):
-                self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
-                return
-
-            ## Transfer structure values to local variables
-            qulim = numpy.take(limited[:,1], ifree)
-            ulim  = numpy.take(limits [:,1], ifree)
-            qllim = numpy.take(limited[:,0], ifree)
-            llim  = numpy.take(limits [:,0], ifree)
-
-            wh = numpy.nonzero((qulim!=0.) | (qllim!=0.))
-            if (len(wh[0]) > 0): qanylim = 1
-            else: qanylim = 0
-        else:
-            ## Fill in local variables with dummy values
-            qulim = numpy.zeros(nfree, dtype=n.int8)
-            ulim  = x * 0.
-            qllim = qulim
-            llim  = x * 0.
-            qanylim = 0
-
-        n = len(x)
-        ## Check input parameters for errors
-        if ((n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0)
-                                        or (maxiter <= 0) or (factor <= 0)):
-            self.errmsg = 'ERROR: input keywords are inconsistent'
-            return
-
-        if (rescale != 0):
-            self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
-            if (len(diag) < n): return
-            wh = (numpy.nonzero(diag <= 0))[0]
-            if (len(wh) > 0): return
-            self.errmsg = ''
-
-        # Make sure x is a numpy array of type numpy.float
-        x = numpy.asarray(x, numpy.float64)
-
-        [self.status, fvec] = self.call(fcn, self.params, functkw)
-        if (self.status < 0):
-            self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
-            return
-
-        m = len(fvec)
-        if (m < n):
-            self.errmsg = 'ERROR: number of parameters must not exceed data'
-            return
-
-        self.fnorm = self.enorm(fvec)
-
-        ## Initialize Levelberg-Marquardt parameter and iteration counter
-
-        par = 0.
-        self.niter = 1
-        qtf = x * 0.
-        self.status = 0
-
-        ## Beginning of the outer loop
-
-        while(1):
-
-            ## If requested, call fcn to enable printing of iterates
-            numpy.put(self.params, ifree, x)
-            if (self.qanytied): self.params = self.tie(self.params, ptied)
-
-            if (nprint > 0) and (iterfunct != None):
-                if (((self.niter-1) % nprint) == 0):
-                    mperr = 0
-                    xnew0 = self.params.copy()
-
-                    dof = max(len(fvec) - len(x), 0)
-                    status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
-                            functkw=functkw, parinfo=parinfo, quiet=quiet,
-                            dof=dof, **iterkw)
-                    if (status != None): self.status = status
-
-                    ## Check for user termination
-                    if (self.status < 0):
-                        self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
-                        return
-
-                    ## If parameters were changed (grrr..) then re-tie
-                    if (max(abs(xnew0-self.params)) > 0):
-                        if (self.qanytied): self.params = self.tie(self.params, ptied)
-                        x = numpy.take(self.params, ifree)
-
-
-            ## Calculate the jacobian matrix
-            self.status = 2
-            catch_msg = 'calling MPFIT_FDJAC2'
-            fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
-                                                    epsfcn=epsfcn,
-                                                    autoderivative=autoderivative, dstep=dstep,
-                                                    functkw=functkw, ifree=ifree, xall=self.params)
-            if (fjac == None):
-                self.errmsg = 'WARNING: premature termination by FDJAC2'
-                return
-
-            ## Determine if any of the parameters are pegged at the limits
-            if (qanylim):
-                catch_msg = 'zeroing derivatives of pegged parameters'
-                whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
-                nlpeg = len(whlpeg)
-                whupeg = (numpy.nonzero(qulim & (x == ulim)) )[0]
-                nupeg = len(whupeg)
-                ## See if any "pegged" values should keep their derivatives
-                if (nlpeg > 0):
-                    ## Total derivative of sum wrt lower pegged parameters
-                    for i in range(nlpeg):
-                        sum = numpy.sum(fvec * fjac[:,whlpeg[i]])
-                        if (sum > 0): fjac[:,whlpeg[i]] = 0
-                if (nupeg > 0):
-                    ## Total derivative of sum wrt upper pegged parameters
-                    for i in range(nupeg):
-                        sum = numpy.sum(fvec * fjac[:,whupeg[i]])
-                        if (sum < 0): fjac[:,whupeg[i]] = 0
-
-            ## Compute the QR factorization of the jacobian
-            [fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
-
-            ## On the first iteration if "diag" is unspecified, scale
-            ## according to the norms of the columns of the initial jacobian
-            catch_msg = 'rescaling diagonal elements'
-            if (self.niter == 1):
-                if ((rescale==0) or (len(diag) < n)):
-                    diag = wa2.copy()
-                    wh = (numpy.nonzero(diag == 0) )[0]
-                    numpy.put(diag, wh, 1.)
-
-                ## On the first iteration, calculate the norm of the scaled x
-                ## and initialize the step bound delta
-                wa3 = diag * x
-                xnorm = self.enorm(wa3)
-                delta = factor*xnorm
-                if (delta == 0.): delta = factor
-
-            ## Form (q transpose)*fvec and store the first n components in qtf
-            catch_msg = 'forming (q transpose)*fvec'
-            wa4 = fvec.copy()
-            for j in range(n):
-                lj = ipvt[j]
-                temp3 = fjac[j,lj]
-                if (temp3 != 0):
-                    fj = fjac[j:,lj]
-                    wj = wa4[j:]
-                    ## *** optimization wa4(j:*)
-                    wa4[j:] = wj - fj * numpy.sum(fj*wj) / temp3
-                fjac[j,lj] = wa1[j]
-                qtf[j] = wa4[j]
-            ## From this point on, only the square matrix, consisting of the
-            ## triangle of R, is needed.
-            fjac = fjac[0:n, 0:n]
-            fjac.shape = [n, n]
-            temp = fjac.copy()
-            for i in range(n):
-                temp[:,i] = fjac[:, ipvt[i]]
-            fjac = temp.copy()
-
-            ## Check for overflow.  This should be a cheap test here since FJAC
-            ## has been reduced to a (small) square matrix, and the test is
-            ## O(N^2).
-            #wh = where(finite(fjac) EQ 0, ct)
-            #if ct GT 0 then goto, FAIL_OVERFLOW
-
-            ## Compute the norm of the scaled gradient
-            catch_msg = 'computing the scaled gradient'
-            gnorm = 0.
-            if (self.fnorm != 0):
-                for j in range(n):
-                    l = ipvt[j]
-                    if (wa2[l] != 0):
-                        sum = numpy.sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
-                        gnorm = max([gnorm,abs(sum/wa2[l])])
-
-            ## Test for convergence of the gradient norm
-            if (gnorm <= gtol):
-                self.status = 4
-                return
-
-            ## Rescale if necessary
-            if (rescale == 0):
-                diag = numpy.choose(diag>wa2, (wa2, diag))
-
-            ## Beginning of the inner loop
-            while(1):
-
-                ## Determine the levenberg-marquardt parameter
-                catch_msg = 'calculating LM parameter (MPFIT_)'
-                [fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
-                                                                                                                        delta, wa1, wa2, par=par)
-                ## Store the direction p and x+p. Calculate the norm of p
-                wa1 = -wa1
-
-                if (qanylim == 0) and (qminmax == 0):
-                    ## No parameter limits, so just move to new position WA2
-                    alpha = 1.
-                    wa2 = x + wa1
-
-                else:
-
-                    ## Respect the limits.  If a step were to go out of bounds, then
-                    ## we should take a step in the same direction but shorter distance.
-                    ## The step should take us right to the limit in that case.
-                    alpha = 1.
-
-                    if (qanylim):
-                        ## Do not allow any steps out of bounds
-                        catch_msg = 'checking for a step out of bounds'
-                        if (nlpeg > 0):
-                            numpy.put(wa1, whlpeg, numpy.clip(
-                                    numpy.take(wa1, whlpeg), 0., max(wa1)))
-                        if (nupeg > 0):
-                            numpy.put(wa1, whupeg, numpy.clip(
-                                    numpy.take(wa1, whupeg), min(wa1), 0.))
-
-                        dwa1 = abs(wa1) > machep
-                        whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)) )[0]
-
-                        if (len(whl) > 0):
-                            t = (((numpy.take(llim, whl) - numpy.take(x, whl)) /
-                                            numpy.take(wa1, whl)))
-
-                            alpha = min(alpha, min(t))
-                        whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)) )[0]
-                        if (len(whu) > 0):
-                            t = ((numpy.take(ulim, whu) - numpy.take(x, whu)) /
-                                            numpy.take(wa1, whu))
-                            alpha = min(alpha, min(t))
-
-                    ## Obey any max step values.
-                    if (qminmax):
-                        nwa1 = wa1 * alpha
-                        whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)) )[0]
-                        if (len(whmax) > 0):
-                            mrat = max(numpy.take(nwa1, whmax) /
-                                                            numpy.take(maxstep, whmax))
-                            if (mrat > 1): alpha = alpha / mrat
-
-                    ## Scale the resulting vector
-                    wa1 = wa1 * alpha
-                    wa2 = x + wa1
-
-                    ## Adjust the final output values.  If the step put us exactly
-                    ## on a boundary, make sure it is exact.
-                    wh = (numpy.nonzero((qulim!=0.) & (wa2 >= ulim*(1-machep))) )[0]
-                    if (len(wh) > 0): numpy.put(wa2, wh, numpy.take(ulim, wh))
-                    wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim*(1+machep))) )[0]
-                    if (len(wh) > 0): numpy.put(wa2, wh, numpy.take(llim, wh))
-                # endelse
-                wa3 = diag * wa1
-                pnorm = self.enorm(wa3)
-
-                ## On the first iteration, adjust the initial step bound
-                if (self.niter == 1): delta = min([delta,pnorm])
-
-                numpy.put(self.params, ifree, wa2)
-
-                ## Evaluate the function at x+p and calculate its norm
-                mperr = 0
-                catch_msg = 'calling '+str(fcn)
-                [self.status, wa4] = self.call(fcn, self.params, functkw)
-                if (self.status < 0):
-                    self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
-                    return
-                fnorm1 = self.enorm(wa4)
-
-                ## Compute the scaled actual reduction
-                catch_msg = 'computing convergence criteria'
-                actred = -1.
-                if ((0.1 * fnorm1) < self.fnorm): actred = - (fnorm1/self.fnorm)**2 + 1.
-
-                ## Compute the scaled predicted reduction and the scaled directional
-                ## derivative
-                for j in range(n):
-                    wa3[j] = 0
-                    wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
-
-                ## Remember, alpha is the fraction of the full LM step actually
-                ## taken
-                temp1 = self.enorm(alpha*wa3)/self.fnorm
-                temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
-                prered = temp1*temp1 + (temp2*temp2)/0.5
-                dirder = -(temp1*temp1 + temp2*temp2)
-
-                ## Compute the ratio of the actual to the predicted reduction.
-                ratio = 0.
-                if (prered != 0): ratio = actred/prered
-
-                ## Update the step bound
-                if (ratio <= 0.25):
-                    if (actred >= 0): temp = .5
-                    else: temp = .5*dirder/(dirder + .5*actred)
-                    if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1): temp = 0.1
-                    delta = temp*min([delta,pnorm/0.1])
-                    par = par/temp
-                else:
-                    if (par == 0) or (ratio >= 0.75):
-                        delta = pnorm/.5
-                        par = .5*par
-
-                ## Test for successful iteration
-                if (ratio >= 0.0001):
-                    ## Successful iteration.  Update x, fvec, and their norms
-                    x = wa2
-                    wa2 = diag * x
-                    fvec = wa4
-                    xnorm = self.enorm(wa2)
-                    self.fnorm = fnorm1
-                    self.niter = self.niter + 1
-
-                ## Tests for convergence
-                if ((abs(actred) <= ftol) and (prered <= ftol)
-                                and (0.5 * ratio <= 1)): self.status = 1
-                if delta <= xtol*xnorm: self.status = 2
-                if ((abs(actred) <= ftol) and (prered <= ftol)
-                                and (0.5 * ratio <= 1) and (self.status == 2)): self.status = 3
-                if (self.status != 0): break
-
-                ## Tests for termination and stringent tolerances
-                if (self.niter >= maxiter): self.status = 5
-                if ((abs(actred) <= machep) and (prered <= machep)
-                                and (0.5*ratio <= 1)): self.status = 6
-                if delta <= machep*xnorm: self.status = 7
-                if gnorm <= machep: self.status = 8
-                if (self.status != 0): break
-
-                ## End of inner loop. Repeat if iteration unsuccessful
-                if (ratio >= 0.0001): break
-
-            ## Check for over/underflow - SKIP FOR NOW
-            ##wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
-            ##if ct GT 0 OR finite(ratio) EQ 0 then begin
-            ##   errmsg = ('ERROR: parameter or function value(s) have become '+$
-            ##      'infinite# check model function for over- '+$
-            ##      'and underflow')
-            ##   self.status = -16
-            ##   break
-            if (self.status != 0): break;
-        ## End of outer loop.
-
-        catch_msg = 'in the termination phase'
-        ## Termination, either normal or user imposed.
-        if (len(self.params) == 0):
-            return
-        if (nfree == 0): self.params = xall.copy()
-        else: numpy.put(self.params, ifree, x)
-        if (nprint > 0) and (self.status > 0):
-            catch_msg = 'calling ' + str(fcn)
-            [status, fvec] = self.call(fcn, self.params, functkw)
-            catch_msg = 'in the termination phase'
-            self.fnorm = self.enorm(fvec)
-
-        if ((self.fnorm != None) and (fnorm1 != None)):
-            self.fnorm = max([self.fnorm, fnorm1])
-            self.fnorm = self.fnorm**2.
-
-        self.covar = None
-        self.perror = None
-        ## (very carefully) set the covariance matrix COVAR
-        if ((self.status > 0) and (nocovar==0) and (n != None)
-                                                and (fjac != None) and (ipvt != None)):
-            sz = numpy.shape(fjac)
-            if ((n > 0) and (sz[0] >= n) and (sz[1] >= n)
-                            and (len(ipvt) >= n)):
-                catch_msg = 'computing the covariance matrix'
-                cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
-                cv.shape = [n, n]
-                nn = len(xall)
-
-                ## Fill in actual covariance matrix, accounting for fixed
-                ## parameters.
-                self.covar = numpy.zeros([nn, nn], numpy.float)
-
-                for i in range(n):
-                    indices = ifree+ifree[i]*nn
-                    numpy.put(self.covar, indices, cv[:,i])
-                    #numpy.put(self.covar, i, cv[:,i])
-                ## Compute errors in parameters
-                catch_msg = 'computing parameter errors'
-                self.perror = numpy.zeros(nn, numpy.float)
-                d = numpy.diagonal(self.covar)
-                wh = (numpy.nonzero(d >= 0) )[0]
-                if len(wh) > 0:
-                    numpy.put(self.perror, wh, numpy.sqrt(numpy.take(d, wh)))
-        return
-
-
-    ## Default procedure to be called every iteration.  It simply prints
-    ## the parameter values.
-    def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
-                                                            quiet=0, iterstop=None, parinfo=None,
-                                                            format=None, pformat='%.10g', dof=1):
-
-        if (self.debug): print('Entering defiter...')
-        if (quiet): return
-        if (fnorm == None):
-            [status, fvec] = self.call(fcn, x, functkw)
-            fnorm = self.enorm(fvec)**2
-
-        ## Determine which parameters to print
-        nprint = len(x)
-        print("Iter ", ('%6i' % iter),"   CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof))
-        for i in range(nprint):
-            if (parinfo != None) and ('parname' in parinfo[i]):
-                p = '   ' + parinfo[i]['parname'] + ' = '
-            else:
-                p = '   P' + str(i) + ' = '
-            if (parinfo != None) and ('mpprint' in parinfo[i]):
-                iprint = parinfo[i]['mpprint']
-            else:
-                iprint = 1
-            if (iprint):
-                print(p + (pformat % x[i]) + '  ')
-        return(0)
-
-    ##  DO_ITERSTOP:
-    ##  if keyword_set(iterstop) then begin
-    ##      k = get_kbrd(0)
-    ##      if k EQ string(byte(7)) then begin
-    ##          message, 'WARNING: minimization not complete', /info
-    ##          print, 'Do you want to terminate this procedure? (y/n)', $
-    ##            format='(A,$)'
-    ##          k = ''
-    ##          read, k
-    ##          if strupcase(strmid(k,0,1)) EQ 'Y' then begin
-    ##              message, 'WARNING: Procedure is terminating.', /info
-    ##              mperr = -1
-    ##          endif
-    ##      endif
-    ##  endif
-
-
-    ## Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
-    def parinfo(self, parinfo=None, key='a', default=None, n=0):
-        if (self.debug): print('Entering parinfo...')
-        if (n == 0) and (parinfo != None): n = len(parinfo)
-        if (n == 0):
-            values = default
-            return(values)
-
-        values = []
-        for i in range(n):
-            if ((parinfo != None) and (key in parinfo[i])):
-                values.append(parinfo[i][key])
-            else:
-                values.append(default)
-
-        # Convert to numeric arrays if possible
-        test = default
-        if (type(default) == list): test=default[0]
-        if (type(test) == int):
-            values = numpy.asarray(values, dtype=numpy.int)
-        elif (type(test) == float):
-            values = numpy.asarray(values, dtype=numpy.float)
-        return(values)
-
-
-    ## Call user function or procedure, with _EXTRA or not, with
-    ## derivatives or not.
-    def call(self, fcn, x, functkw, fjac=None):
-        if (self.debug): print('Entering call...')
-        if (self.qanytied): x = self.tie(x, self.ptied)
-        self.nfev = self.nfev + 1
-        if (fjac == None):
-            [status, f] = fcn(x, fjac=fjac, **functkw)
-
-            if (self.damp > 0):
-                ## Apply the damping if requested.  This replaces the residuals
-                ## with their hyperbolic tangent.  Thus residuals larger than
-                ## DAMP are essentially clipped.
-                f = numpy.tanh(f/self.damp)
-            return([status, f])
-        else:
-            return(fcn(x, fjac=fjac, **functkw))
-
-
-    def enorm(self, vec):
-
-        if (self.debug): print('Entering enorm...')
-        ## NOTE: it turns out that, for systems that have a lot of data
-        ## points, this routine is a big computing bottleneck.  The extended
-        ## computations that need to be done cannot be effectively
-        ## vectorized.  The introduction of the FASTNORM configuration
-        ## parameter allows the user to select a faster routine, which is
-        ## based on TOTAL() alone.
-
-        # Very simple-minded sum-of-squares
-        if (self.fastnorm):
-            ans = numpy.sqrt(numpy.sum(vec*vec))
-        else:
-            agiant = self.machar.rgiant / len(vec)
-            adwarf = self.machar.rdwarf * len(vec)
-
-            ## This is hopefully a compromise between speed and robustness.
-            ## Need to do this because of the possibility of over- or underflow.
-            mx = max(vec)
-            mn = min(vec)
-            mx = max(abs(mx), abs(mn))
-            if mx == 0: return(vec[0]*0.)
-            if mx > agiant or mx < adwarf:
-                ans = mx * numpy.sqrt(numpy.sum((vec/mx)*(vec/mx)))
-            else:
-                ans = numpy.sqrt(numpy.sum(vec*vec))
-
-        return(ans)
-
-
-    def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
-                                    epsfcn=None, autoderivative=1,
-                                    functkw=None, xall=None, ifree=None, dstep=None):
-
-        if (self.debug): print('Entering fdjac2...')
-        machep = self.machar.machep
-        if epsfcn == None:  epsfcn = machep
-        if xall == None:    xall = x
-        if ifree == None:   ifree = numpy.arange(len(xall))
-        if step == None:    step = x * 0.
-        nall = len(xall)
-
-        eps = numpy.sqrt(max([epsfcn, machep]))
-        m = len(fvec)
-        n = len(x)
-
-        ## Compute analytical derivative if requested
-        if (autoderivative == 0):
-            mperr = 0
-            fjac = numpy.zeros(nall, numpy.float)
-            numpy.put(fjac, ifree, 1.0)  ## Specify which parameters need derivatives
-            [status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
-
-            if len(fjac) != m*nall:
-                print('ERROR: Derivative matrix was not computed properly.')
-                return(None)
-
-            ## This definition is c1onsistent with CURVEFIT
-            ## Sign error found (thanks Jesus Fernandez <fernande at irm.chu-caen.fr>)
-            fjac.shape = [m,nall]
-            fjac = -fjac
-
-            ## Select only the free parameters
-            if len(ifree) < nall:
-                fjac = fjac[:,ifree]
-                fjac.shape = [m, n]
-                return(fjac)
-
-        fjac = numpy.zeros([m, n], numpy.float)
-
-        h = eps * abs(x)
-
-        ## if STEP is given, use that
-        if step != None:
-            stepi = numpy.take(step, ifree)
-            wh = (numpy.nonzero(stepi > 0) )[0]
-            if (len(wh) > 0): numpy.put(h, wh, numpy.take(stepi, wh))
-
-        ## if relative step is given, use that
-        if (len(dstep) > 0):
-            dstepi = numpy.take(dstep, ifree)
-            wh = (numpy.nonzero(dstepi > 0) )[0]
-            if len(wh) > 0: numpy.put(h, wh, abs(numpy.take(dstepi,wh)*numpy.take(x,wh)))
-
-        ## In case any of the step values are zero
-        wh = (numpy.nonzero(h == 0) )[0]
-        if len(wh) > 0: numpy.put(h, wh, eps)
-
-        ## Reverse the sign of the step if we are up against the parameter
-        ## limit, or if the user requested it.
-        #mask = dside == -1
-        mask = numpy.take((dside == -1), ifree)
-
-        if len(ulimited) > 0 and len(ulimit) > 0:
-            #mask = mask or (ulimited and (x > ulimit-h))
-            mask = mask | (ulimited & (x > ulimit-h))
-            wh = (numpy.nonzero(mask))[0]
-
-            if len(wh) > 0: numpy.put(h, wh, -numpy.take(h, wh))
-        ## Loop through parameters, computing the derivative for each
-        for j in range(n):
-            xp = xall.copy()
-            xp[ifree[j]] = xp[ifree[j]] + h[j]
-            [status, fp] = self.call(fcn, xp, functkw)
-            if (status < 0): return(None)
-
-            if abs(dside[j]) <= 1:
-                ## COMPUTE THE ONE-SIDED DERIVATIVE
-                ## Note optimization fjac(0:*,j)
-                fjac[0:,j] = (fp-fvec)/h[j]
-
-            else:
-                ## COMPUTE THE TWO-SIDED DERIVATIVE
-                xp[ifree[j]] = xall[ifree[j]] - h[j]
-
-                mperr = 0
-                [status, fm] = self.call(fcn, xp, functkw)
-                if (status < 0): return(None)
-
-                ## Note optimization fjac(0:*,j)
-                fjac[0:,j] = (fp-fm)/(2*h[j])
-        return(fjac)
-
-
-
-    #     Original FORTRAN documentation
-    #     **********
-    #
-    #     subroutine qrfac
-    #
-    #     this subroutine uses householder transformations with column
-    #     pivoting (optional) to compute a qr factorization of the
-    #     m by n matrix a. that is, qrfac determines an orthogonal
-    #     matrix q, a permutation matrix p, and an upper trapezoidal
-    #     matrix r with diagonal elements of nonincreasing magnitude,
-    #     such that a*p = q*r. the householder transformation for
-    #     column k, k = 1,2,...,min(m,n), is of the form
-    #
-    #                        t
-    #        i - (1/u(k))*u*u
-    #
-    #     where u has zeros in the first k-1 positions. the form of
-    #     this transformation and the method of pivoting first
-    #     appeared in the corresponding linpack subroutine.
-    #
-    #     the subroutine statement is
-    #
-    #    subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
-    #
-    #     where
-    #
-    #    m is a positive integer input variable set to the number
-    #      of rows of a.
-    #
-    #    n is a positive integer input variable set to the number
-    #      of columns of a.
-    #
-    #    a is an m by n array. on input a contains the matrix for
-    #      which the qr factorization is to be computed. on output
-    #      the strict upper trapezoidal part of a contains the strict
-    #      upper trapezoidal part of r, and the lower trapezoidal
-    #      part of a contains a factored form of q (the non-trivial
-    #      elements of the u vectors described above).
-    #
-    #    lda is a positive integer input variable not less than m
-    #      which specifies the leading dimension of the array a.
-    #
-    #    pivot is a logical input variable. if pivot is set true,
-    #      then column pivoting is enforced. if pivot is set false,
-    #      then no column pivoting is done.
-    #
-    #    ipvt is an integer output array of length lipvt. ipvt
-    #      defines the permutation matrix p such that a*p = q*r.
-    #      column j of p is column ipvt(j) of the identity matrix.
-    #      if pivot is false, ipvt is not referenced.
-    #
-    #    lipvt is a positive integer input variable. if pivot is false,
-    #      then lipvt may be as small as 1. if pivot is true, then
-    #      lipvt must be at least n.
-    #
-    #    rdiag is an output array of length n which contains the
-    #      diagonal elements of r.
-    #
-    #    acnorm is an output array of length n which contains the
-    #      norms of the corresponding columns of the input matrix a.
-    #      if this information is not needed, then acnorm can coincide
-    #      with rdiag.
-    #
-    #    wa is a work array of length n. if pivot is false, then wa
-    #      can coincide with rdiag.
-    #
-    #     subprograms called
-    #
-    #    minpack-supplied ... dpmpar,enorm
-    #
-    #    fortran-supplied ... dmax1,dsqrt,min0
-    #
-    #     argonne national laboratory. minpack project. march 1980.
-    #     burton s. garbow, kenneth e. hillstrom, jorge j. more
-    #
-    #     **********
-
-    # NOTE: in IDL the factors appear slightly differently than described
-    # above.  The matrix A is still m x n where m >= n.
-    #
-    # The "upper" triangular matrix R is actually stored in the strict
-    # lower left triangle of A under the standard notation of IDL.
-    #
-    # The reflectors that generate Q are in the upper trapezoid of A upon
-    # output.
-    #
-    #  EXAMPLE:  decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
-    #    aa = [[9.,2.,6.],[4.,8.,7.]]
-    #    mpfit_qrfac, aa, aapvt, rdiag, aanorm
-    #     IDL> print, aa
-    #          1.81818*     0.181818*     0.545455*
-    #         -8.54545+      1.90160*     0.432573*
-    #     IDL> print, rdiag
-    #         -11.0000+     -7.48166+
-    #
-    # The components marked with a * are the components of the
-    # reflectors, and those marked with a + are components of R.
-    #
-    # To reconstruct Q and R we proceed as follows.  First R.
-    #    r = fltarr(m, n)
-    #    for i = 0, n-1 do r(0:i,i) = aa(0:i,i)  # fill in lower diag
-    #    r(lindgen(n)*(m+1)) = rdiag
-    #
-    # Next, Q, which are composed from the reflectors.  Each reflector v
-    # is taken from the upper trapezoid of aa, and converted to a matrix
-    # via (I - 2 vT . v / (v . vT)).
-    #
-    #   hh = ident                                    ## identity matrix
-    #   for i = 0, n-1 do begin
-    #    v = aa(*,i) & if i GT 0 then v(0:i-1) = 0    ## extract reflector
-    #    hh = hh ## (ident - 2*(v # v)/total(v * v))  ## generate matrix
-    #   endfor
-    #
-    # Test the result:
-    #    IDL> print, hh ## transpose(r)
-    #          9.00000      4.00000
-    #          2.00000      8.00000
-    #          6.00000      7.00000
-    #
-    # Note that it is usually never necessary to form the Q matrix
-    # explicitly, and MPFIT does not.
-
-
-    def qrfac(self, a, pivot=0):
-
-        if (self.debug): print('Entering qrfac...')
-        machep = self.machar.machep
-        sz = numpy.shape(a)
-        m = sz[0]
-        n = sz[1]
-
-        ## Compute the initial column norms and initialize arrays
-        acnorm = numpy.zeros(n, numpy.float)
-        for j in range(n):
-            acnorm[j] = self.enorm(a[:,j])
-        rdiag = acnorm.copy()
-        wa = rdiag.copy()
-        ipvt = numpy.arange(n)
-
-        ## Reduce a to r with householder transformations
-        minmn = min([m,n])
-        for j in range(minmn):
-            if (pivot != 0):
-                ## Bring the column of largest norm into the pivot position
-                rmax = max(rdiag[j:])
-                kmax = (numpy.nonzero(rdiag[j:] == rmax) )[0]
-                ct = len(kmax)
-                kmax = kmax + j
-                if ct > 0:
-                    kmax = kmax[0]
-
-                    ## Exchange rows via the pivot only.  Avoid actually exchanging
-                    ## the rows, in case there is lots of memory transfer.  The
-                    ## exchange occurs later, within the body of MPFIT, after the
-                    ## extraneous columns of the matrix have been shed.
-                    if kmax != j:
-                        temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
-                        rdiag[kmax] = rdiag[j]
-                        wa[kmax] = wa[j]
-
-            ## Compute the householder transformation to reduce the jth
-            ## column of A to a multiple of the jth unit vector
-            lj = ipvt[j]
-            ajj = a[j:,lj]
-            ajnorm = self.enorm(ajj)
-            if ajnorm == 0: break
-            if a[j,j] < 0: ajnorm = -ajnorm
-
-            ajj = ajj / ajnorm
-            ajj[0] = ajj[0] + 1
-            ## *** Note optimization a(j:*,j)
-            a[j:,lj] = ajj
-
-            ## Apply the transformation to the remaining columns
-            ## and update the norms
-
-            ## NOTE to SELF: tried to optimize this by removing the loop,
-            ## but it actually got slower.  Reverted to "for" loop to keep
-            ## it simple.
-            if (j+1 < n):
-                for k in range(j+1, n):
-                    lk = ipvt[k]
-                    ajk = a[j:,lk]
-                    ## *** Note optimization a(j:*,lk)
-                    ## (corrected 20 Jul 2000)
-                    if a[j,lj] != 0:
-                        a[j:,lk] = ajk - ajj * numpy.sum(ajk*ajj)/a[j,lj]
-                        if ((pivot != 0) and (rdiag[k] != 0)):
-                            temp = a[j,lk]/rdiag[k]
-                            rdiag[k] = rdiag[k] * numpy.sqrt(max((1.-temp**2), 0.))
-                            temp = rdiag[k]/wa[k]
-                            if ((0.05*temp*temp) <= machep):
-                                rdiag[k] = self.enorm(a[j+1:,lk])
-                                wa[k] = rdiag[k]
-            rdiag[j] = -ajnorm
-        return([a, ipvt, rdiag, acnorm])
-
-
-    #     Original FORTRAN documentation
-    #     **********
-    #
-    #     subroutine qrsolv
-    #
-    #     given an m by n matrix a, an n by n diagonal matrix d,
-    #     and an m-vector b, the problem is to determine an x which
-    #     solves the system
-    #
-    #           a*x = b ,     d*x = 0 ,
-    #
-    #     in the least squares sense.
-    #
-    #     this subroutine completes the solution of the problem
-    #     if it is provided with the necessary information from the
-    #     factorization, with column pivoting, of a. that is, if
-    #     a*p = q*r, where p is a permutation matrix, q has orthogonal
-    #     columns, and r is an upper triangular matrix with diagonal
-    #     elements of nonincreasing magnitude, then qrsolv expects
-    #     the full upper triangle of r, the permutation matrix p,
-    #     and the first n components of (q transpose)*b. the system
-    #     a*x = b, d*x = 0, is then equivalent to
-    #
-    #                  t       t
-    #           r*z = q *b ,  p *d*p*z = 0 ,
-    #
-    #     where x = p*z. if this system does not have full rank,
-    #     then a least squares solution is obtained. on output qrsolv
-    #     also provides an upper triangular matrix s such that
-    #
-    #            t   t               t
-    #           p *(a *a + d*d)*p = s *s .
-    #
-    #     s is computed within qrsolv and may be of separate interest.
-    #
-    #     the subroutine statement is
-    #
-    #       subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
-    #
-    #     where
-    #
-    #       n is a positive integer input variable set to the order of r.
-    #
-    #       r is an n by n array. on input the full upper triangle
-    #         must contain the full upper triangle of the matrix r.
-    #         on output the full upper triangle is unaltered, and the
-    #         strict lower triangle contains the strict upper triangle
-    #         (transposed) of the upper triangular matrix s.
-    #
-    #       ldr is a positive integer input variable not less than n
-    #         which specifies the leading dimension of the array r.
-    #
-    #       ipvt is an integer input array of length n which defines the
-    #         permutation matrix p such that a*p = q*r. column j of p
-    #         is column ipvt(j) of the identity matrix.
-    #
-    #       diag is an input array of length n which must contain the
-    #         diagonal elements of the matrix d.
-    #
-    #       qtb is an input array of length n which must contain the first
-    #         n elements of the vector (q transpose)*b.
-    #
-    #       x is an output array of length n which contains the least
-    #         squares solution of the system a*x = b, d*x = 0.
-    #
-    #       sdiag is an output array of length n which contains the
-    #         diagonal elements of the upper triangular matrix s.
-    #
-    #       wa is a work array of length n.
-    #
-    #     subprograms called
-    #
-    #       fortran-supplied ... dabs,dsqrt
-    #
-    #     argonne national laboratory. minpack project. march 1980.
-    #     burton s. garbow, kenneth e. hillstrom, jorge j. more
-    #
-
-    def qrsolv(self, r, ipvt, diag, qtb, sdiag):
-        if (self.debug): print('Entering qrsolv...')
-        sz = numpy.shape(r)
-        m = sz[0]
-        n = sz[1]
-
-        ## copy r and (q transpose)*b to preserve input and initialize s.
-        ## in particular, save the diagonal elements of r in x.
-
-        for j in range(n):
-            r[j:n,j] = r[j,j:n]
-        x = numpy.diagonal(r)
-        wa = qtb.copy()
-
-        ## Eliminate the diagonal matrix d using a givens rotation
-        for j in range(n):
-            l = ipvt[j]
-            if (diag[l] == 0): break
-            sdiag[j:] = 0
-            sdiag[j] = diag[l]
-
-            ## The transformations to eliminate the row of d modify only a
-            ## single element of (q transpose)*b beyond the first n, which
-            ## is initially zero.
-
-            qtbpj = 0.
-            for k in range(j,n):
-                if (sdiag[k] == 0): break
-                if (abs(r[k,k]) < abs(sdiag[k])):
-                    cotan  = r[k,k]/sdiag[k]
-                    sine   = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
-                    cosine = sine*cotan
-                else:
-                    tang   = sdiag[k]/r[k,k]
-                    cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
-                    sine   = cosine*tang
-
-                ## Compute the modified diagonal element of r and the
-                ## modified element of ((q transpose)*b,0).
-                r[k,k] = cosine*r[k,k] + sine*sdiag[k]
-                temp = cosine*wa[k] + sine*qtbpj
-                qtbpj = -sine*wa[k] + cosine*qtbpj
-                wa[k] = temp
-
-                ## Accumulate the transformation in the row of s
-                if (n > k+1):
-                    temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
-                    sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
-                    r[k+1:n,k] = temp
-            sdiag[j] = r[j,j]
-            r[j,j] = x[j]
-
-        ## Solve the triangular system for z.  If the system is singular
-        ## then obtain a least squares solution
-        nsing = n
-        wh = (numpy.nonzero(sdiag == 0) )[0]
-        if (len(wh) > 0):
-            nsing = wh[0]
-            wa[nsing:] = 0
-
-        if (nsing >= 1):
-            wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] ## Degenerate case
-            ## *** Reverse loop ***
-            for j in range(nsing-2,-1,-1):
-                sum = numpy.sum(r[j+1:nsing,j]*wa[j+1:nsing])
-                wa[j] = (wa[j]-sum)/sdiag[j]
-
-        ## Permute the components of z back to components of x
-        numpy.put(x, ipvt, wa)
-        return(r, x, sdiag)
-
-
-
-
-    #     Original FORTRAN documentation
-    #
-    #     subroutine lmpar
-    #
-    #     given an m by n matrix a, an n by n nonsingular diagonal
-    #     matrix d, an m-vector b, and a positive number delta,
-    #     the problem is to determine a value for the parameter
-    #     par such that if x solves the system
-    #
-    #        a*x = b ,     sqrt(par)*d*x = 0 ,
-    #
-    #     in the least squares sense, and dxnorm is the euclidean
-    #     norm of d*x, then either par is zero and
-    #
-    #        (dxnorm-delta) .le. 0.1*delta ,
-    #
-    #     or par is positive and
-    #
-    #        abs(dxnorm-delta) .le. 0.1*delta .
-    #
-    #     this subroutine completes the solution of the problem
-    #     if it is provided with the necessary information from the
-    #     qr factorization, with column pivoting, of a. that is, if
-    #     a*p = q*r, where p is a permutation matrix, q has orthogonal
-    #     columns, and r is an upper triangular matrix with diagonal
-    #     elements of nonincreasing magnitude, then lmpar expects
-    #     the full upper triangle of r, the permutation matrix p,
-    #     and the first n components of (q transpose)*b. on output
-    #     lmpar also provides an upper triangular matrix s such that
-    #
-    #         t   t                   t
-    #        p *(a *a + par*d*d)*p = s *s .
-    #
-    #     s is employed within lmpar and may be of separate interest.
-    #
-    #     only a few iterations are generally needed for convergence
-    #     of the algorithm. if, however, the limit of 10 iterations
-    #     is reached, then the output par will contain the best
-    #     value obtained so far.
-    #
-    #     the subroutine statement is
-    #
-    #    subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
-    #                     wa1,wa2)
-    #
-    #     where
-    #
-    #    n is a positive integer input variable set to the order of r.
-    #
-    #    r is an n by n array. on input the full upper triangle
-    #      must contain the full upper triangle of the matrix r.
-    #      on output the full upper triangle is unaltered, and the
-    #      strict lower triangle contains the strict upper triangle
-    #      (transposed) of the upper triangular matrix s.
-    #
-    #    ldr is a positive integer input variable not less than n
-    #      which specifies the leading dimension of the array r.
-    #
-    #    ipvt is an integer input array of length n which defines the
-    #      permutation matrix p such that a*p = q*r. column j of p
-    #      is column ipvt(j) of the identity matrix.
-    #
-    #    diag is an input array of length n which must contain the
-    #      diagonal elements of the matrix d.
-    #
-    #    qtb is an input array of length n which must contain the first
-    #      n elements of the vector (q transpose)*b.
-    #
-    #    delta is a positive input variable which specifies an upper
-    #      bound on the euclidean norm of d*x.
-    #
-    #    par is a nonnegative variable. on input par contains an
-    #      initial estimate of the levenberg-marquardt parameter.
-    #      on output par contains the final estimate.
-    #
-    #    x is an output array of length n which contains the least
-    #      squares solution of the system a*x = b, sqrt(par)*d*x = 0,
-    #      for the output par.
-    #
-    #    sdiag is an output array of length n which contains the
-    #      diagonal elements of the upper triangular matrix s.
-    #
-    #    wa1 and wa2 are work arrays of length n.
-    #
-    #     subprograms called
-    #
-    #    minpack-supplied ... dpmpar,enorm,qrsolv
-    #
-    #    fortran-supplied ... dabs,dmax1,dmin1,dsqrt
-    #
-    #     argonne national laboratory. minpack project. march 1980.
-    #     burton s. garbow, kenneth e. hillstrom, jorge j. more
-    #
-
-    def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
-
-        if (self.debug): print('Entering lmpar...')
-        dwarf = self.machar.minnum
-        sz = numpy.shape(r)
-        m = sz[0]
-        n = sz[1]
-
-        ## Compute and store in x the gauss-newton direction.  If the
-        ## jacobian is rank-deficient, obtain a least-squares solution
-        nsing = n
-        wa1 = qtb.copy()
-        wh = (numpy.nonzero(numpy.diagonal(r) == 0) )[0]
-        if len(wh) > 0:
-            nsing = wh[0]
-            wa1[wh[0]:] = 0
-        if nsing > 1:
-            ## *** Reverse loop ***
-            for j in range(nsing-1,-1,-1):
-                wa1[j] = wa1[j]/r[j,j]
-                if (j-1 >= 0):
-                    wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
-
-        ## Note: ipvt here is a permutation array
-        numpy.put(x, ipvt, wa1)
-
-        ## Initialize the iteration counter.  Evaluate the function at the
-        ## origin, and test for acceptance of the gauss-newton direction
-        iter = 0
-        wa2 = diag * x
-        dxnorm = self.enorm(wa2)
-        fp = dxnorm - delta
-        if (fp <= 0.1*delta):
-            return[r, 0., x, sdiag]
-
-        ## If the jacobian is not rank deficient, the newton step provides a
-        ## lower bound, parl, for the zero of the function.  Otherwise set
-        ## this bound to zero.
-
-        parl = 0.
-        if nsing >= n:
-            wa1 = numpy.take(diag, ipvt)*numpy.take(wa2, ipvt)/dxnorm
-            wa1[0] = wa1[0] / r[0,0] ## Degenerate case
-            for j in range(1,n):   ## Note "1" here, not zero
-                sum = numpy.sum(r[0:j,j]*wa1[0:j])
-                wa1[j] = (wa1[j] - sum)/r[j,j]
-
-            temp = self.enorm(wa1)
-            parl = ((fp/delta)/temp)/temp
-
-        ## Calculate an upper bound, paru, for the zero of the function
-        for j in range(n):
-            sum = numpy.sum(r[0:j+1,j]*qtb[0:j+1])
-            wa1[j] = sum/diag[ipvt[j]]
-        gnorm = self.enorm(wa1)
-        paru = gnorm/delta
-        if paru == 0: paru = dwarf/min([delta,0.1])
-
-        ## If the input par lies outside of the interval (parl,paru), set
-        ## par to the closer endpoint
-
-        par = max([par,parl])
-        par = min([par,paru])
-        if par == 0: par = gnorm/dxnorm
-
-        ## Beginning of an interation
-        while(1):
-            iter = iter + 1
-
-            ## Evaluate the function at the current value of par
-            if par == 0: par = max([dwarf, paru*0.001])
-            temp = numpy.sqrt(par)
-            wa1 = temp * diag
-            [r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
-            wa2 = diag*x
-            dxnorm = self.enorm(wa2)
-            temp = fp
-            fp = dxnorm - delta
-
-            if ((abs(fp) <= 0.1*delta) or
-                    ((parl == 0) and (fp <= temp) and (temp < 0)) or
-                    (iter == 10)): break;
-
-            ## Compute the newton correction
-            wa1 = numpy.take(diag, ipvt)*numpy.take(wa2, ipvt)/dxnorm
-
-            for j in range(n-1):
-                wa1[j] = wa1[j]/sdiag[j]
-                wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
-            wa1[n-1] = wa1[n-1]/sdiag[n-1] ## Degenerate case
-
-            temp = self.enorm(wa1)
-            parc = ((fp/delta)/temp)/temp
-
-            ## Depending on the sign of the function, update parl or paru
-            if fp > 0: parl = max([parl,par])
-            if fp < 0: paru = min([paru,par])
-
-            ## Compute an improved estimate for par
-            par = max([parl, par+parc])
-
-            ## End of an iteration
-
-        ## Termination
-        return[r, par, x, sdiag]
-
-
-    ## Procedure to tie one parameter to another.
-    def tie(self, p, ptied=None):
-        if (self.debug): print('Entering tie...')
-        if (ptied == None): return
-        for i in range(len(ptied)):
-            if ptied[i] == '': continue
-            cmd = 'p[' + str(i) + '] = ' + ptied[i]
-            exec(cmd)
-        return(p)
-
-
-    #     Original FORTRAN documentation
-    #     **********
-    #
-    #     subroutine covar
-    #
-    #     given an m by n matrix a, the problem is to determine
-    #     the covariance matrix corresponding to a, defined as
-    #
-    #                    t
-    #           inverse(a *a) .
-    #
-    #     this subroutine completes the solution of the problem
-    #     if it is provided with the necessary information from the
-    #     qr factorization, with column pivoting, of a. that is, if
-    #     a*p = q*r, where p is a permutation matrix, q has orthogonal
-    #     columns, and r is an upper triangular matrix with diagonal
-    #     elements of nonincreasing magnitude, then covar expects
-    #     the full upper triangle of r and the permutation matrix p.
-    #     the covariance matrix is then computed as
-    #
-    #                      t     t
-    #           p*inverse(r *r)*p  .
-    #
-    #     if a is nearly rank deficient, it may be desirable to compute
-    #     the covariance matrix corresponding to the linearly independent
-    #     columns of a. to define the numerical rank of a, covar uses
-    #     the tolerance tol. if l is the largest integer such that
-    #
-    #           abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
-    #
-    #     then covar computes the covariance matrix corresponding to
-    #     the first l columns of r. for k greater than l, column
-    #     and row ipvt(k) of the covariance matrix are set to zero.
-    #
-    #     the subroutine statement is
-    #
-    #       subroutine covar(n,r,ldr,ipvt,tol,wa)
-    #
-    #     where
-    #
-    #       n is a positive integer input variable set to the order of r.
-    #
-    #       r is an n by n array. on input the full upper triangle must
-    #         contain the full upper triangle of the matrix r. on output
-    #         r contains the square symmetric covariance matrix.
-    #
-    #       ldr is a positive integer input variable not less than n
-    #         which specifies the leading dimension of the array r.
-    #
-    #       ipvt is an integer input array of length n which defines the
-    #         permutation matrix p such that a*p = q*r. column j of p
-    #         is column ipvt(j) of the identity matrix.
-    #
-    #       tol is a nonnegative input variable used to define the
-    #         numerical rank of a in the manner described above.
-    #
-    #       wa is a work array of length n.
-    #
-    #     subprograms called
-    #
-    #       fortran-supplied ... dabs
-    #
-    #     argonne national laboratory. minpack project. august 1980.
-    #     burton s. garbow, kenneth e. hillstrom, jorge j. more
-    #
-    #     **********
-
-    def calc_covar(self, rr, ipvt=None, tol=1.e-14):
-
-        if (self.debug): print('Entering calc_covar...')
-        if numpy.rank(rr) != 2:
-            print('ERROR: r must be a two-dimensional matrix')
-            return(-1)
-        s = numpy.shape(rr)
-        n = s[0]
-        if s[0] != s[1]:
-            print('ERROR: r must be a square matrix')
-            return(-1)
-
-        if (ipvt == None): ipvt = numpy.arange(n)
-        r = rr.copy()
-        r.shape = [n,n]
-
-        ## For the inverse of r in the full upper triangle of r
-        l = -1
-        tolr = tol * abs(r[0,0])
-        for k in range(n):
-            if (abs(r[k,k]) <= tolr): break
-            r[k,k] = 1./r[k,k]
-            for j in range(k):
-                temp = r[k,k] * r[j,k]
-                r[j,k] = 0.
-                r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
-            l = k
-
-        ## Form the full upper triangle of the inverse of (r transpose)*r
-        ## in the full upper triangle of r
-        if l >= 0:
-            for k in range(l+1):
-                for j in range(k):
-                    temp = r[j,k]
-                    r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
-                temp = r[k,k]
-                r[0:k+1,k] = temp * r[0:k+1,k]
-
-        ## For the full lower triangle of the covariance matrix
-        ## in the strict lower triangle or and in wa
-        wa = numpy.repeat([r[0,0]], n)
-        for j in range(n):
-            jj = ipvt[j]
-            sing = j > l
-            for i in range(j+1):
-                if sing: r[i,j] = 0.
-                ii = ipvt[i]
-                if ii > jj: r[ii,jj] = r[i,j]
-                if ii < jj: r[jj,ii] = r[i,j]
-            wa[jj] = r[j,j]
-
-        ## Symmetrize the covariance matrix in r
-        for j in range(n):
-            r[0:j+1,j] = r[j,0:j+1]
-            r[j,j] = wa[j]
-
-        return(r)
-
-class machar:
-    def __init__(self, double=1):
-        if (double == 0):
-            self.machep = 1.19209e-007
-            self.maxnum = 3.40282e+038
-            self.minnum = 1.17549e-038
-            self.maxgam = 171.624376956302725
-        else:
-            self.machep = 2.2204460e-016
-            self.maxnum = 1.7976931e+308
-            self.minnum = 2.2250739e-308
-            self.maxgam = 171.624376956302725
-
-        self.maxlog = numpy.log(self.maxnum)
-        self.minlog = numpy.log(self.minnum)
-        self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
-        self.rgiant = numpy.sqrt(self.maxnum) * 0.1
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/numerixenv.py b/required_pkgs/stsci.tools/lib/stsci/tools/numerixenv.py
deleted file mode 100644
index 83da73e..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/numerixenv.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from __future__ import division # confidence medium
-import os
-
-def check_input(xxx):
-    """Check if input is a Numarray Array."""
-    try:
-        import numarray
-        return isinstance(xxx,numarray.numarraycore.NumArray)    
-    except ImportError:
-        pass
-
-def check():
-    """Check for running numarray version of pyfits with numpy code."""
-    pass
-
-    
-
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/parseinput.py b/required_pkgs/stsci.tools/lib/stsci/tools/parseinput.py
deleted file mode 100644
index 5dba2e0..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/parseinput.py
+++ /dev/null
@@ -1,219 +0,0 @@
-
-#  Program: parseinput.py
-#  Author:  Christopher Hanley
-#
-#  License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-#
-#  History:
-#   Version 0.1,  11/02/2004: Initial Creation -- CJH
-#   Version 0.1.2 01/10/2005: Removed the appending of "_drz.fits" to extracted
-#       file names.  -- CJH
-#   Version 0.1.3 01/18/2005: Added the NICMOS '_asc.fits' to the list of
-#       valid association file names.
-#   Version 0.1.4 01/25/2005: Removed reliance on ASN dict keys for ordering 
-#                   the output filelist. WJH/CJH 
-#   Version 0.1.5 10/11/2005: Corrected typo in errorstr variable name discovered
-#                   by external user j.e.geach at durham.ac.uk.
-
-from __future__ import division # confidence high
-
-__version__ = '0.1.5 (10/11/2005)'
-__author__  = 'Christopher Hanley'
-
-# irafglob provides the ability to recursively parse user input that
-# is in the form of wildcards and '@' files.
-from . import irafglob
-from .irafglob import irafglob
-
-from . import fileutil
-from stsci.tools.asnutil import readASNTable
-
-def parseinput(inputlist,outputname=None, atfile=None):
-    """
-    Recursively parse user input based upon the irafglob
-    program and construct a list of files that need to be processed.
-    This program addresses the following deficiencies of the irafglob program::
-
-       parseinput can extract filenames from association tables
-    
-    Returns
-    -------
-    This program will return a list of input files that will need to
-    be processed in addition to the name of any outfiles specified in
-    an association table.
-
-    Parameters
-    ----------
-    inputlist - string
-        specification of input files using either wild-cards, @-file or 
-        comma-separated list of filenames
-
-    outputname - string
-        desired name for output product to be created from the input files
-
-    atfile - object
-        function to use in interpreting the @-file columns that gets passed to irafglob
-        
-    Returns
-    -------    
-    files - list of strings 
-        names of output files to be processed
-    newoutputname - string 
-        name of output file to be created.
-        
-    See Also
-    --------
-    stsci.tools.irafglob
-    
-    """
-
-    # Initalize some variables
-    files = [] # list used to store names of input files
-    newoutputname = outputname # Outputname returned to calling program.
-                               # The value of outputname is only changed
-                               # if it had a value of 'None' on input.    
-
-
-    # We can use irafglob to parse the input.  If the input wasn't
-    # an association table, it needs to be either a wildcard, '@' file,
-    # or comma seperated list.
-    files = irafglob(inputlist, atfile=atfile)
-    
-    # Now that we have expanded the inputlist into a python list
-    # containing the list of input files, it is necessary to examine
-    # each of the files to make sure none of them are association tables.
-    #
-    # If an association table is found, the entries should be read 
-    # Determine if the input is an association table
-    for file in files:
-        if (checkASN(file) == True):
-            # Create a list to store the files extracted from the
-            # association tiable
-            assoclist = []
-            
-            # The input is an association table
-            try:
-                # Open the association table
-                assocdict = readASNTable(file, None, prodonly=False)
-            except:
-                errorstr  = "###################################\n"
-                errorstr += "#                                 #\n"
-                errorstr += "# UNABLE TO READ ASSOCIATION FILE,#\n"
-                errorstr +=  str(file)+'\n'
-                errorstr += "# DURING FILE PARSING.            #\n"
-                errorstr += "#                                 #\n"
-                errorstr += "# Please determine if the file is #\n"
-                errorstr += "# in the current directory and    #\n"
-                errorstr += "# that it has been properly       #\n"
-                errorstr += "# formatted.                      #\n"
-                errorstr += "#                                 #\n"
-                errorstr += "# This error message is being     #\n"
-                errorstr += "# generated from within the       #\n"
-                errorstr += "# parseinput.py module.           #\n"
-                errorstr += "#                                 #\n"
-                errorstr += "###################################\n"
-                raise ValueError(errorstr)
-                
-            # Extract the output name from the association table if None
-            # was provided on input.
-            if outputname  == None:
-                    newoutputname = assocdict['output']
-
-            # Loop over the association dictionary to extract the input
-            # file names.
-            for f in assocdict['order']:
-                assoclist.append(fileutil.buildRootname(f))
-            
-            # Remove the name of the association table from the list of files
-            files.remove(file)
-            # Append the list of filenames generated from the association table
-            # to the master list of input files.
-            files.extend(assoclist)
-        
-    # Return the list of the input files and the output name if provided in an association.
-    return files,newoutputname
-
-
-def checkASN(filename):
-    """
-    Determine if the filename provided to the function belongs to
-    an association.
-
-    Parameters
-    ----------
-    filename: string
-    
-    Returns
-    -------
-    validASN  : boolean value
-     
-    """
-    # Extract the file extn type:
-    extnType = filename[filename.rfind('_')+1:filename.rfind('.')]
-    
-    # Determine if this extn name is valid for an assocation file
-    if isValidAssocExtn(extnType):
-        return True
-    else:
-        return False 
-    
-    
-def isValidAssocExtn(extname):
-    """
-    Determine if the extension name given as input could
-    represent a valid association file.
-
-    Parameters
-    ----------
-    extname : string
-    
-    Returns
-    -------
-    isValid  : boolean value
-    
-    """
-    # Define a list of valid extension types to define an association table.
-    validExtnNames = ['asn','asc']
-    
-    # Loop over the list of valid extension types and compare with the input 
-    # extension name.  If there is ever a match return True.
-    for validName in validExtnNames:
-        if (extname == validName):
-            return True
-    return False
-    
-def countinputs(inputlist):
-    """
-    Determine the number of inputfiles provided by the user and the
-    number of those files that are association tables
-    
-    Parameters
-    ----------
-    inputlist   : string 
-        the user input
-        
-    Returns
-    -------
-    numInputs: int
-        number of inputs provided by the user
-    numASNfiles: int
-        number of association files provided as input
-    """
-    
-    # Initialize return values
-    numInputs = 0
-    numASNfiles = 0
-    
-    # User irafglob to count the number of inputfiles
-    files = irafglob(inputlist, atfile=None)
-
-    # Use the "len" ufunc to count the number of entries in the list
-    numInputs = len(files)
-    
-    # Loop over the list and see if any of the entries are association files
-    for file in files:
-        if (checkASN(file) == True):
-            numASNfiles += 1
-    
-    return numInputs,numASNfiles
-    
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/readgeis.py b/required_pkgs/stsci.tools/lib/stsci/tools/readgeis.py
deleted file mode 100644
index 9470918..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/readgeis.py
+++ /dev/null
@@ -1,434 +0,0 @@
-#!/usr/bin/env python
-
-# $Id: readgeis.py 41177 2015-06-12 19:40:26Z bsimon $
-
-"""
-        readgeis: Read GEIS file and convert it to a FITS extension file.
-
-        License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-
-        Usage:
-
-                readgeis.py [options] GEISname FITSname
-
-                GEISname is the input GEIS file in GEIS format, and FITSname
-                is the output file in FITS format. GEISname can be a
-                directory name.  In this case, it will try to use all `*.??h`
-                files as input file names.
-
-                If FITSname is omitted or is a directory name, this task will
-                try to construct the output names from the input names, i.e.:
-
-                abc.xyh will have an output name of abc_xyf.fits
-
-        :Options:
-
-        -h     print the help (this text)
-
-        :Example:
-
-        If used in Pythons script, a user can, e. g.::
-
-            >>> import readgeis
-            >>> hdulist = readgeis.readgeis(GEISFileName)
-            (do whatever with hdulist)
-            >>> hdulist.writeto(FITSFileName)
-
-        The most basic usage from the command line::
-
-            readgeis.py test1.hhh test1.fits
-
-        This command will convert the input GEIS file test1.hhh to
-        a FITS file test1.fits.
-
-
-        From the command line::
-
-            readgeis.py .
-
-        this will convert all `*.??h` files in the current directory
-        to FITS files (of corresponding names) and write them in the
-        current directory.
-
-
-        Another example of usage from the command line::
-
-            readgeis.py "u*" "*"
-
-        this will convert all `u*.??h` files in the current directory
-        to FITS files (of corresponding names) and write them in the
-        current directory.  Note that when using wild cards, it is
-        necessary to put them in quotes.
-
-"""
-
-# Developed by Science Software Branch, STScI, USA.
-# This version needs pyfits 0.9.6.3 or later
-# and numpy version 1.0.4 or later
-
-from __future__ import division, print_function # confidence high
-
-__version__ = "2.2 (18 Feb, 2011), \xa9 AURA"
-
-import os, sys
-from astropy.io import fits
-import numpy
-from numpy import memmap
-from functools import reduce
-
-def stsci(hdulist):
-    """For STScI GEIS files, need to do extra steps."""
-
-    instrument = hdulist[0].header.get('INSTRUME', '')
-
-    # Update extension header keywords
-    if instrument in ("WFPC2", "FOC"):
-        rootname = hdulist[0].header.get('ROOTNAME', '')
-        filetype = hdulist[0].header.get('FILETYPE', '')
-        for i in range(1, len(hdulist)):
-            # Add name and extver attributes to match PyFITS data structure
-            hdulist[i].name = filetype
-            hdulist[i]._extver = i
-            # Add extension keywords for this chip to extension
-            hdulist[i].header['EXPNAME'] = (rootname, "9 character exposure identifier")
-            hdulist[i].header['EXTVER']= (i, "extension version number")
-            hdulist[i].header['EXTNAME'] = (filetype, "extension name")
-            hdulist[i].header['INHERIT'] = (True, "inherit the primary header")
-            hdulist[i].header['ROOTNAME'] = (rootname, "rootname of the observation set")
-
-
-def stsci2(hdulist, filename):
-    """For STScI GEIS files, need to do extra steps."""
-
-    # Write output file name to the primary header
-    instrument = hdulist[0].header.get('INSTRUME', '')
-    if instrument in ("WFPC2", "FOC"):
-        hdulist[0].header['FILENAME'] = filename
-
-
-def readgeis(input):
-
-    """Input GEIS files "input" will be read and a HDUList object will
-       be returned.
-
-       The user can use the writeto method to write the HDUList object to
-       a FITS file.
-    """
-
-    global dat
-    cardLen = fits.Card.length
-
-    # input file(s) must be of the form *.??h and *.??d
-    if input[-1] != 'h' or input[-4] != '.':
-        raise "Illegal input GEIS file name %s" % input
-
-    data_file = input[:-1]+'d'
-
-    _os = sys.platform
-    if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
-        bytes_per_line = cardLen+1
-    else:
-        raise "Platform %s is not supported (yet)." % _os
-
-    geis_fmt = {'REAL':'f', 'INTEGER':'i', 'LOGICAL':'i','CHARACTER':'S'}
-    end_card = 'END'+' '* (cardLen-3)
-
-    # open input file
-    im = open(input)
-
-    # Generate the primary HDU
-    cards = []
-    while 1:
-        line = im.read(bytes_per_line)[:cardLen]
-        line = line[:8].upper() + line[8:]
-        if line == end_card:
-            break
-        cards.append(fits.Card.fromstring(line))
-
-    phdr = fits.Header(cards)
-    im.close()
-
-    _naxis0 = phdr.get('NAXIS', 0)
-    _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
-    _naxis.insert(0, _naxis0)
-    _bitpix = phdr['BITPIX']
-    _psize = phdr['PSIZE']
-    if phdr['DATATYPE'][:4] == 'REAL':
-        _bitpix = -_bitpix
-    if _naxis0 > 0:
-        size = reduce(lambda x,y:x*y, _naxis[1:])
-        data_size = abs(_bitpix) * size // 8
-    else:
-        data_size = 0
-    group_size = data_size + _psize // 8
-
-    # decode the group parameter definitions,
-    # group parameters will become extension header
-    groups = phdr['GROUPS']
-    gcount = phdr['GCOUNT']
-    pcount = phdr['PCOUNT']
-
-    formats = []
-    bools = []
-    floats = []
-    _range = range(1, pcount+1)
-    key = [phdr['PTYPE'+str(j)] for j in _range]
-    comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
-
-    # delete group parameter definition header keywords
-    _list = ['PTYPE'+str(j) for j in _range] + \
-            ['PDTYPE'+str(j) for j in _range] + \
-            ['PSIZE'+str(j) for j in _range] + \
-            ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
-
-    # Construct record array formats for the group parameters
-    # as interpreted from the Primary header file
-    for i in range(1, pcount+1):
-        ptype = key[i-1]
-        pdtype = phdr['PDTYPE'+str(i)]
-        star = pdtype.find('*')
-        _type = pdtype[:star]
-        _bytes = pdtype[star+1:]
-
-        # collect boolean keywords since they need special attention later
-
-        if _type == 'LOGICAL':
-            bools.append(i)
-        if pdtype == 'REAL*4':
-            floats.append(i)
-
-        fmt = geis_fmt[_type] + _bytes
-        formats.append((ptype,fmt))
-
-    _shape = _naxis[1:]
-    _shape.reverse()
-    _code = fits.hdu.ImageHDU.NumCode[_bitpix]
-    _bscale = phdr.get('BSCALE', 1)
-    _bzero = phdr.get('BZERO', 0)
-    if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
-        _uint16 = 1
-        _bzero = 32768
-    else:
-        _uint16 = 0
-
-    # delete from the end, so it will not conflict with previous delete
-    for i in range(len(phdr)-1, -1, -1):
-        if phdr.cards[i].keyword in _list:
-            del phdr[i]
-
-    # clean up other primary header keywords
-    phdr['SIMPLE'] = True
-    phdr['BITPIX'] = 16
-    phdr['GROUPS'] = False
-    _after = 'NAXIS'
-    if _naxis0 > 0:
-        _after += str(_naxis0)
-    phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after)
-    phdr.set('NEXTEND', value=gcount, comment="Number of standard extensions")
-
-    hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=None)])
-
-    # Use copy-on-write for all data types since byteswap may be needed
-    # in some platforms.
-    f1 = open(data_file, mode='rb')
-    dat = f1.read()
-#    dat = memmap(data_file, mode='c')
-    hdulist.mmobject = dat
-
-    errormsg = ""
-
-    loc = 0
-    for k in range(gcount):
-        ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
-        ext_dat = ext_dat.reshape(_shape)
-        if _uint16:
-            ext_dat += _bzero
-        # Check to see whether there are any NaN's or infs which might indicate
-        # a byte-swapping problem, such as being written out on little-endian
-        #   and being read in on big-endian or vice-versa.
-        if _code.find('float') >= 0 and \
-            (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
-            errormsg += "===================================\n"
-            errormsg += "= WARNING:                        =\n"
-            errormsg += "=  Input image:                   =\n"
-            errormsg += input+"[%d]\n"%(k+1)
-            errormsg += "=  had floating point data values =\n"
-            errormsg += "=  of NaN and/or Inf.             =\n"
-            errormsg += "===================================\n"
-        elif _code.find('int') >= 0:
-            # Check INT data for max values
-            ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat)
-            if ext_dat_exp.max() == int(_bitpix) - 1:
-                # Potential problems with byteswapping
-                errormsg += "===================================\n"
-                errormsg += "= WARNING:                        =\n"
-                errormsg += "=  Input image:                   =\n"
-                errormsg += input+"[%d]\n"%(k+1)
-                errormsg += "=  had integer data values        =\n"
-                errormsg += "=  with maximum bitvalues.        =\n"
-                errormsg += "===================================\n"
-
-        ext_hdu = fits.ImageHDU(data=ext_dat)
-
-        rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats)
-
-        loc += group_size
-
-        # Create separate PyFITS Card objects for each entry in 'rec'
-        for i in range(1, pcount+1):
-            #val = rec.field(i-1)[0]
-            val = rec[0][i-1]
-            if val.dtype.kind == 'S':
-                val = val.decode('ascii')
-
-            if i in bools:
-                if val:
-                    val = True
-                else:
-                    val = False
-
-            if i in floats:
-                # use fromstring, format in Card is deprecated in pyfits 0.9
-                _str = '%-8s= %20.7G / %s' % (key[i-1], val, comm[i-1])
-                _card = fits.Card.fromstring(_str)
-            else:
-                _card = fits.Card(keyword=key[i-1], value=val, comment=comm[i-1])
-
-            ext_hdu.header.append(_card)
-
-        # deal with bscale/bzero
-        if (_bscale != 1 or _bzero != 0):
-            ext_hdu.header['BSCALE'] = _bscale
-            ext_hdu.header['BZERO'] = _bzero
-
-        hdulist.append(ext_hdu)
-
-    if errormsg != "":
-        errormsg += "===================================\n"
-        errormsg += "=  This file may have been        =\n"
-        errormsg += "=  written out on a platform      =\n"
-        errormsg += "=  with a different byte-order.   =\n"
-        errormsg += "=                                 =\n"
-        errormsg += "=  Please verify that the values  =\n"
-        errormsg += "=  are correct or apply the       =\n"
-        errormsg += "=  '.byteswap()' method.          =\n"
-        errormsg += "===================================\n"
-        print(errormsg)
-
-    f1.close()
-    stsci(hdulist)
-    return hdulist
-
-def parse_path(f1, f2):
-
-    """Parse two input arguments and return two lists of file names"""
-
-    import glob
-
-    # if second argument is missing or is a wild card, point it
-    # to the current directory
-    f2 = f2.strip()
-    if f2 == '' or f2 == '*':
-        f2 = './'
-
-    # if the first argument is a directory, use all GEIS files
-    if os.path.isdir(f1):
-        f1 = os.path.join(f1, '*.??h')
-    list1 = glob.glob(f1)
-    list1 = [name for name in list1 if name[-1] == 'h' and name[-4] == '.']
-
-    # if the second argument is a directory, use file names in the
-    # first argument to construct file names, i.e.
-    # abc.xyh will be converted to abc_xyf.fits
-    if os.path.isdir(f2):
-        list2 = []
-        for file in list1:
-            name = os.path.split(file)[-1]
-            fitsname = name[:-4] + '_' + name[-3:-1] + 'f.fits'
-            list2.append(os.path.join(f2, fitsname))
-    else:
-        list2 = [s.strip() for s in f2.split(",")]
-
-    if (list1 == [] or list2 == []):
-        str = ""
-        if (list1 == []): str += "Input files `%s` not usable/available. " % f1
-        if (list2 == []): str += "Input files `%s` not usable/available. " % f2
-        raise IOError(str)
-    else:
-        return list1, list2
-
-#-------------------------------------------------------------------------------
-# special initialization when this is the main program
-
-if __name__ == "__main__":
-
-    import getopt
-
-    try:
-        optlist, args = getopt.getopt(sys.argv[1:], 'h')
-    except getopt.error as e:
-        print(str(e))
-        print(__doc__)
-        print("\t", __version__)
-
-    # initialize default values
-    help = 0
-
-    # read options
-    for opt, value in optlist:
-        if opt == "-h":
-            help = 1
-
-    if (help):
-        print(__doc__)
-        print("\t", __version__)
-    else:
-        if len(args) == 1:
-            args.append('')
-        list1, list2 = parse_path (args[0], args[1])
-        npairs = min (len(list1), len(list2))
-        for i in range(npairs):
-            if os.path.exists(list2[i]):
-                print("Output file %s already exists, skip." % list2[i])
-                break
-            try:
-                hdulist = readgeis(list1[i])
-                stsci2(hdulist, list2[i])
-                hdulist.writeto(list2[i])
-                hdulist.close()
-                print("%s -> %s" % (list1[i], list2[i]))
-            except Exception as e :
-                print("Conversion fails for %s: %s" % (list1[i], str(e)))
-                break
-
-"""
-
-Copyright (C) 2003 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-      endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-"""
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/stash.py b/required_pkgs/stsci.tools/lib/stsci/tools/stash.py
deleted file mode 100644
index 720fc8a..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/stash.py
+++ /dev/null
@@ -1,30 +0,0 @@
-'''This is a tool for stashing certain information used by the
-continuous integration system at STScI.  It is not intended for,
-or even expected to work, in any other application.
-
---
-
-use this in shell scripts:
-    d=`python -m stsci.tools.stash`
-    cp file $d
-
-'''
-
-from __future__ import print_function
-import sys
-import os
-
-# use os.path.join because the file name may be used outside of
-# python and we need it to be right on Windows.
-stash_dir = os.path.join(os.path.dirname(__file__),'stash')
-
-try :
-    os.mkdir(stash_dir)
-except OSError :
-    pass
-
-if __name__ == '__main__' :
-    print(stash_dir)
-    if not os.path.exists(stash_dir) :
-        sys.exit(1)
-
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/stpyfits.py b/required_pkgs/stsci.tools/lib/stsci/tools/stpyfits.py
deleted file mode 100644
index 0aaff57..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/stpyfits.py
+++ /dev/null
@@ -1,337 +0,0 @@
-# $Id: stpyfits.py 38142 2015-03-06 13:42:21Z bsimon $
-
-"""
-The stpyfits module is an extension to the `astropy.io.fits` module which offers
-additional features specific to STScI.  These features include the handling
-of Constant Data Value Arrays.
-
-"""
-from __future__ import division
-
-import functools
-import sys
-import numpy as np
-
-from astropy.io import fits
-# A few imports for backward compatibility; in the earlier stpyfits these were
-# overridden, but with fits's new extension system it's not necessary
-from astropy.io.fits.util import _is_int
-from astropy.utils import lazyproperty
-
-PY3K = sys.version_info[0] > 2
-
-STPYFITS_ENABLED = False # Not threadsafe TODO: (should it be?)
-
-# Register the extension classes; simply importing stpyfits does not
-# automatically enable it.  Instead, it can be enabled/disabled using these
-# functions.
-def enable_stpyfits():
-    global STPYFITS_ENABLED
-    if not STPYFITS_ENABLED:
-        fits.register_hdu(ConstantValuePrimaryHDU)
-        fits.register_hdu(ConstantValueImageHDU)
-        STPYFITS_ENABLED = True
-
-
-def disable_stpyfits():
-    global STPYFITS_ENABLED
-    if STPYFITS_ENABLED:
-        fits.unregister_hdu(ConstantValuePrimaryHDU)
-        fits.unregister_hdu(ConstantValueImageHDU)
-        STPYFITS_ENABLED = False
-
-
-def with_stpyfits(func):
-    @functools.wraps(func)
-    def wrapped_with_stpyfits(*args, **kwargs):
-        global STPYFITS_ENABLED
-        was_enabled = STPYFITS_ENABLED
-        enable_stpyfits()
-        try:
-            retval = func(*args, **kwargs)
-        finally:
-            # Only disable stpyfits if it wasn't already enabled
-            if not was_enabled:
-                disable_stpyfits()
-        return retval
-    return wrapped_with_stpyfits
-
-
-class _ConstantValueImageBaseHDU(fits.hdu.image._ImageBaseHDU):
-    """
-    A class that extends the `astropy.io.fits.hdu.base._BaseHDU` class to extend its
-    behavior to implement STScI specific extensions to `astropy.io.fits`.
-
-    The `astropy.io.fits.hdu.base._BaseHDU class` is:
-    """
-
-    __doc__ += fits.hdu.image._ImageBaseHDU.__doc__
-
-    def __init__(self, data=None, header=None, do_not_scale_image_data=False,
-                 uint=False, **kwargs):
-        if header and 'PIXVALUE' in header and header['NAXIS'] == 0:
-            header = header.copy()
-            # Add NAXISn keywords for each NPIXn keyword in the header and
-            # remove the NPIXn keywords
-            naxis = 0
-            for card in reversed(header['NPIX*'].cards):
-                try:
-                    idx = int(card.keyword[len('NPIX'):])
-                except ValueError:
-                    continue
-                hdrlen = len(header)
-                header.set('NAXIS' + str(idx), card.value,
-                           card.comment, after='NAXIS')
-                del header[card.keyword]
-                if len(header) < hdrlen:
-                    # A blank card was used when updating the header; add the
-                    # blank back in.
-                    # TODO: Fix header.set so that it has an option not to
-                    # use a blank card--this is a detail that we really
-                    # shouldn't have to worry about otherwise
-                    header.append()
-
-                # Presumably the NPIX keywords are in order of their axis, but
-                # just in case somehow they're not...
-                naxis = max(naxis, idx)
-
-            # Update the NAXIS keyword with the correct number of axes
-            header['NAXIS'] = naxis
-        elif header and 'PIXVALUE' in header:
-            pixval = header['PIXVALUE']
-            if header['BITPIX'] > 0:
-                if PY3K:
-                    pixval = int(pixval)
-                else:
-                    pixval = long(pixval)
-            arrayval = self._check_constant_value_data(data)
-            if arrayval is not None:
-                header = header.copy()
-                # Update the PIXVALUE keyword if necessary
-                if arrayval != pixval:
-                    header['PIXVALUE'] = arrayval
-            else:
-                header = header.copy()
-                # There is a PIXVALUE keyword but NAXIS is not 0 and the data
-                # does not match the PIXVALUE.
-                # Must remove the PIXVALUE and NPIXn keywords so we recognize
-                # that there is non-constant data in the file.
-                del header['PIXVALUE']
-                for card in header['NPIX*'].cards:
-                    try:
-                        idx = int(card.keyword[len('NPIX'):])
-                    except ValueError:
-                        continue
-                    del header[card.keyword]
-
-        # Make sure to pass any arguments other than data and header as
-        # keyword arguments, because PrimaryHDU and ImageHDU have stupidly
-        # different signatures for __init__
-        super(_ConstantValueImageBaseHDU, self).__init__(
-            data, header, do_not_scale_image_data=do_not_scale_image_data,
-            uint=uint)
-
-    @property
-    def size(self):
-        """
-        The HDU's size should always come up as zero so long as there's no
-        actual data in it other than the constant value array.
-        """
-
-        if 'PIXVALUE' in self._header:
-            return 0
-        else:
-            return super(_ConstantValueImageBaseHDU, self).size
-
-    @lazyproperty
-    def data(self):
-        if ('PIXVALUE' in self._header and 'NPIX1' not in self._header and
-               self._header['NAXIS'] > 0):
-            bitpix = self._header['BITPIX']
-            dims = self.shape
-
-            # Special case where the pixvalue can be present but all the NPIXn
-            # keywords are zero.
-            if sum(dims) == 0:
-                return None
-
-            code = self.NumCode[bitpix]
-            pixval = self._header['PIXVALUE']
-            if code in ['uint8', 'int16', 'int32', 'int64']:
-                if PY3K:
-                    pixval = int(pixval)
-                else:
-                    pixval = long(pixval)
-
-            raw_data = np.zeros(shape=dims, dtype=code) + pixval
-
-            if raw_data.dtype.str[0] != '>':
-                raw_data = raw_data.byteswap(True)
-
-            raw_data.dtype = raw_data.dtype.newbyteorder('>')
-
-            if self._bzero != 0 or self._bscale != 1:
-                if bitpix > 16:  # scale integers to Float64
-                    data = np.array(raw_data, dtype=np.float64)
-                elif bitpix > 0:  # scale integers to Float32
-                    data = np.array(raw_data, dtype=np.float32)
-                else:  # floating point cases
-                    data = raw_data
-
-                if self._bscale != 1:
-                    np.multiply(data, self._bscale, data)
-                if self._bzero != 0:
-                    data += self._bzero
-
-                # delete the keywords BSCALE and BZERO after scaling
-                del self._header['BSCALE']
-                del self._header['BZERO']
-                self._header['BITPIX'] = self.ImgCode[data.dtype.name]
-            else:
-                data = raw_data
-            return data
-        else:
-            return super(_ConstantValueImageBaseHDU, self).data
-
-    @data.setter
-    def data(self, data):
-        self.__dict__['data'] = data
-        self._modified = True
-        if self.data is not None and not isinstance(data, np.ndarray):
-            # Try to coerce the data into a numpy array--this will work, on
-            # some level, for most objects
-            try:
-                data = np.array(data)
-            except:
-                raise TypeError('data object %r could not be coerced into an '
-                                'ndarray' % data)
-
-        if isinstance(data, np.ndarray):
-            self._bitpix = self.ImgCode[data.dtype.name]
-            self._axes = list(data.shape)
-            self._axes.reverse()
-        elif self.data is None:
-            self._axes = []
-        else:
-            raise ValueError('not a valid data array')
-
-        self.update_header()
-
-    @classmethod
-    def match_header(cls, header):
-        """A constant value HDU will only be recognized as such if the header
-        contains a valid PIXVALUE and NAXIS == 0.
-        """
-
-        pixvalue = header.get('PIXVALUE')
-        naxis = header.get('NAXIS', 0)
-
-        return (super(_ConstantValueImageBaseHDU, cls).match_header(header) and
-                   (isinstance(pixvalue, float) or _is_int(pixvalue)) and
-                   naxis == 0)
-
-
-    def update_header(self):
-        if (not self._modified and not self._header._modified and
-            (self._has_data and self.shape == self.data.shape)):
-            # Not likely that anything needs updating
-            return
-
-        super(_ConstantValueImageBaseHDU, self).update_header()
-
-        if 'PIXVALUE' in self._header and self._header['NAXIS'] > 0:
-            # This is a Constant Value Data Array.  Verify that the data
-            # actually matches the PIXVALUE
-            pixval = self._header['PIXVALUE']
-            if self._header['BITPIX'] > 0:
-                if PY3K:
-                    pixval = int(pixval)
-                else:
-                    pixval = long(pixval)
-
-            if self.data is None or self.data.nbytes == 0:
-                # Empty data array; just keep the existing PIXVALUE
-                arrayval = self._header['PIXVALUE']
-            else:
-                arrayval = self._check_constant_value_data(self.data)
-            if arrayval is not None:
-                if arrayval != pixval:
-                    self._header['PIXVALUE'] = arrayval
-
-                naxis = self._header['NAXIS']
-                self._header['NAXIS'] = 0
-                for idx in range(naxis, 0, -1):
-                    axisval = self._header['NAXIS%d' % idx]
-                    self._header.set('NPIX%d' % idx, axisval,
-                                     'length of constant array axis %d' % idx,
-                                     after='PIXVALUE')
-                    del self._header['NAXIS%d' % idx]
-            else:
-                # No longer a constant value array; remove any remaining
-                # NPIX or PIXVALUE keywords
-                try:
-                    del self._header['PIXVALUE']
-                except KeyError:
-                    pass
-
-                try:
-                    del self._header['NPIX*']
-                except KeyError:
-                    pass
-
-    def _summary(self):
-        summ = super(_ConstantValueImageBaseHDU, self)._summary()
-        return (summ[0], summ[1].replace('ConstantValue', '')) + summ[2:]
-
-    def _writedata_internal(self, fileobj):
-        if 'PIXVALUE' in self._header:
-            # This is a Constant Value Data Array, so no data is written
-            return 0
-        else:
-            return super(_ConstantValueImageBaseHDU, self).\
-                    _writedata_internal(fileobj)
-
-    def _check_constant_value_data(self, data):
-        """Verify that the HDU's data is a constant value array."""
-
-        arrayval = data.flat[0]
-        if np.all(data == arrayval):
-            return arrayval
-        return None
-
-
-
-class ConstantValuePrimaryHDU(_ConstantValueImageBaseHDU,
-                              fits.hdu.PrimaryHDU):
-    """Primary HDUs with constant value arrays."""
-
-
-class ConstantValueImageHDU(_ConstantValueImageBaseHDU, fits.hdu.ImageHDU):
-    """Image extension HDUs with constant value arrays."""
-
-
-# Import the rest of the astropy.io.fits module
-from astropy.io.fits import *
-
-# For backward-compatibility with older code that thinks PrimaryHDU and
-# ImageHDU should support the ConstantValue features
-PrimaryHDU = ConstantValuePrimaryHDU
-ImageHDU = ConstantValueImageHDU
-
-
-# Override the other "convenience" functions to use stpyfits
-open = fitsopen = with_stpyfits(fits.open)
-info = with_stpyfits(fits.info)
-append = with_stpyfits(fits.append)
-writeto = with_stpyfits(fits.writeto)
-update = with_stpyfits(fits.update)
-getheader = with_stpyfits(fits.getheader)
-getdata = with_stpyfits(fits.getdata)
-getval = with_stpyfits(fits.getval)
-setval = with_stpyfits(fits.setval)
-delval = with_stpyfits(fits.delval)
-
-
-__all__ = fits.__all__ + ['enable_stpyfits', 'disable_stpyfits',
-                          'with_stpyfits', 'ConstantValuePrimaryHDU',
-                          'ConstantValueImageHDU']
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/stsci_distutils_hack.py b/required_pkgs/stsci.tools/lib/stsci/tools/stsci_distutils_hack.py
deleted file mode 100644
index 3289dab..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/stsci_distutils_hack.py
+++ /dev/null
@@ -1,372 +0,0 @@
-#
-# $HeadURL: https://svn.stsci.edu/svn/ssb/stsci_python/stsci.tools/trunk/lib/stsci/tools/stsci_distutils_hack.py $
-# $Rev: 38142 $
-#
-# Implements setup.py code common to many of our packages.
-#
-# The new standard stsci module setup.py is just
-#
-#   import stsci.tools.stsci_distutils_hack
-#   stsci.tools.stsci_distutils_hack.run( pytools_version = "XX" )
-#
-# where XX is the version of stsci.tools you expect for the install to work
-#
-
-from __future__ import division, print_function # confidence high
-
-"""
-Special handling for stsci_python package installation.
-
-stsci_python is distributed as a single package, but it contains
-packages that are also distributed separately.  When we use this
-module to install our package, we can use the exact same definition
-file to control the setup.py of the individual package _and_ the
-setup.py of stsci_python.
-
-This module also preserves revision control data in the installed
-or distributed files.
-
-If you are not a developer at STScI, this module is probably not of
-much interest to you.
-
-"""
-
-__docformat__ = 'restructuredtext'
-
-
-######## ######## ######## ######## ######## ######## ######## ########
-#
-# actually perform the install
-#
-# NOTE: This is not used to install stsci.tools itself!
-
-import sys
-
-def run( pytools_version = None ) :
-    """
-    Perform a stsci_python install based on the information in defsetup.py
-
-    * gather our subversion revision number and the install time
-
-    * perform the install
-
-    usage:
-
-        import stsci.tools.stsci_distutils_hack
-        stsci.tools.stsci_distutils_hack.run(pytools_version = "3.1")
-
-    """
-
-    if not hasattr(sys, 'version_info') or sys.version_info < (2,3,0,'alpha',0):
-        raise SystemExit("Python 2.3 or later required.")
-
-    if pytools_version :
-        # Only try to import stsci.tools if we are asked to check for a version.
-        #
-        # ( We may have been extracted from stsci.tools and bundled with a package.
-        # In that case, we do not want to risk finding some _other_ stsci.tools
-        # and comparing that version. )
-        import stsci.tools
-
-        # bug: should use distutils version comparator to perform ">" comparisons
-        if ( stsci.tools.__version__ != pytools_version ) :
-            print("wrong version of stsci.tools!")
-            print("have "+str(stsci.tools.__version__))
-            print("want "+str(pytools_version))
-            sys.exit(1)
-
-    # look for include files that common linux distributions leave out
-    check_requirements()
-
-    from distutils.core import setup
-    from defsetup import setupargs, pkg
-
-    if "version" in sys.argv :
-        sys.exit(0)
-
-    # If they have multiple packages, we have to allow them to give a list.
-    # That is the unusual case, so we let them give a string if they have a single
-    # package.
-    if isinstance(pkg,str) :
-        pkg = [ pkg ]
-
-    # If they have multiple packages, they have to specify package_dir.  Otherwise,
-    # we can create one for them.
-    if not 'package_dir' in setupargs :
-        setupargs['package_dir'] = { pkg[0] : 'lib' }
-
-
-    for x in setupargs['package_dir'] :
-        x = setupargs['package_dir'][x]
-        # collect our subversion information
-        __set_svn_version__( x )
-
-        # save the date when we last ran setup.py
-        __set_setup_date__( x )
-
-    if "version" in sys.argv :
-        sys.exit(0)
-
-    return setup(
-        name =              pkg[0],
-        packages =          pkg,
-        **setupargs
-        )
-
-
-
-######## ######## ######## ######## ######## ######## ######## ########
-#
-# This part fixes install_data to put data files in the same directory
-# with the python library files, which is where our packages want
-# them.
-#
-# This is essentially "smart_install_data" as used in the old
-# setup.py files, except that it also understands wildcards
-# and os-specific paths.  This means the module author can
-# ask for data files with
-#       "data/generic/*"
-# instead of
-#       glob.glob(os.path.join('data', 'generic', '*'))
-
-
-import os
-import glob
-
-import distutils.util
-
-import distutils.command.install_data
-
-o =  distutils.command.install_data.install_data
-
-# same trick as smart_install_data used: save the old run() method and
-# insert our own run method ahead of it
-
-o.old_run = o.run
-
-def new_run ( self ) :
-        """
-        Hack for distutils to cause install_data to be in the same directory
-        as the python library files.  Our packages expect this.
-        """
-
-        # We want our data files in the directory with the library files
-        install_cmd = self.get_finalized_command('install')
-        self.install_dir = getattr(install_cmd, 'install_lib')
-
-
-        # self.data_files is a list of
-        #       ( destination_directory, [ source_file, source_file, source_file ] )
-        #
-        # We want to do wildcard expansion on all the file names.
-        #
-        l = [ ]
-        for f in self.data_files :
-            ( dest_dir, files ) = f
-            fl = [ ]
-            for ff in files :
-                ff = distutils.util.convert_path(ff)
-                ff = glob.glob(ff)
-                fl.extend(ff)
-            dest_dir = distutils.util.convert_path(dest_dir)
-            l.append( ( dest_dir, fl ) )
-        self.data_files = l
-
-        # now use the original run() function to finish
-        return distutils.command.install_data.install_data.old_run(self)
-
-o.run = new_run
-
-
-######## ######## ######## ######## ######## ######## ######## ########
-#
-# Function to collect svn version information - used to be stsci_python/version.py
-# with multiple copies in the system.
-#
-import os.path
-import re
-
-#
-# This is the entry point.  All you need to do is call this function from your
-# setup.py according to the example above.  It will create a file called
-# lib/svn_version.py ;  After that, you can
-#
-#   # find out what subversion information applies to yourpackage
-#   import yourpackage.svn_version
-#   print yourpackage.svn_version.__svn_version__
-#   print yourpackage.svn_version.__full_svn_info__
-#
-
-def __set_svn_version__(directory="./", fname='svn_version.py' ) :
-    #
-    # directory is both the directory where the version information will be stored
-    # (in the file fname) and the directory that we will run svn info on to
-    # get a version number.
-    #
-    # I think the default of ./ is probably useless at this point.
-    #
-    # fname is the name of the file to store the version information in.  Never change
-    # this.
-    #
-
-    info = None
-    rev = __get_svn_rev__(directory)
-    version_file = os.path.join(directory,fname)
-
-    # if we are unable to determine the revision, we default to leaving the
-    # revision file unchanged.  Otherwise, we fill it in with whatever
-    # we have
-
-    if rev is None:
-        if os.path.exists(version_file) :
-            return
-        revision = 'Unable to determine SVN revision'
-    else:
-        if ( rev == 'exported' or rev == 'unknown' ) and os.path.exists(version_file) :
-            return
-        revision = str(rev)
-
-    info = __get_full_info__(directory)
-
-    # now we can write the version information
-
-    f = open(version_file,'w')
-    f.write("__svn_version__ = %s\n" % repr(revision))
-
-    # info will be a multi-line string.  We are not using repr(info)
-    # for readability; the output of "svn info" can not contain '''
-    # unless you are doing something bad.
-    f.write("\n__full_svn_info__ = '''\n%s'''\n\n" % info)
-    f.close()
-
-
-def __get_svn_rev__(path):
-    m = None
-    try:
-        # with popen3,  stderr goes into a pipe where we ignore it,
-        # This means the user does not see errors.
-        cmd = 'svnversion '+path
-        (sin, sout, serr) = os.popen3(cmd)
-
-        # pick up the first line of output
-        m=sout.read().strip()
-
-        # if it looks like valid svnversion output, return it
-        if m == 'exported' :
-            return m
-        if re.match('^[0-9][0-9:]*[A-Z]*$',m) :
-            return m
-
-        # if we get here, it was not valid - that probably means
-        # an error of some kind.
-    except:
-        pass
-
-    return None
-
-def __get_full_info__(path):
-    info = None
-    try:
-        # with popen3,  stderr goes into a pipe where we ignore it,
-        # This means the user does not see errors.
-        (sin, sout, serr) = os.popen3('svn info %s' % path)
-
-        # pick up all the lines of output
-        info = [l.strip() for l in sout.readlines()]
-
-        # if no output, there was an error and we don't know anything
-        if len(info) == 0 :
-            return "unknown"
-
-        # there was output, so join it all together
-        return '\n'.join(info)
-
-    except:
-        pass
-
-    return "unknown"
-
-######## ######## ######## ######## ######## ######## ######## ########
-#
-# note when we last ran setup.py -- what we really want is when the
-# software was installed, but we can use the time we ran setup.py as
-# a proxy for that.
-#
-
-def __set_setup_date__( path="./", fname='svn_version.py') :
-    import datetime
-    file = os.path.join(path,fname)
-    d = datetime.datetime.now()
-    l = [ ]
-    try :
-        # we don't expect this to fail ever, but it might
-        f = open(file,"r")
-        for line in f :
-            if line.find("# setupdate") < 0 :
-                l.append(line)
-        f.close()
-    except IOError :
-        pass
-    f=open(file,"w")
-    for line in l :
-        f.write(line)
-
-    f.write("%s # setupdate\n" % "import datetime")
-    f.write("%s # setupdate\n" % ("setupdate = "+repr(d)))
-    f.close()
-
-
-######## ######## ######## ######## ######## ######## ######## ########
-#
-#
-
-def check_requirements() :
-
-    import distutils.sysconfig
-
-    dev_pkg_missing =0
-    numpy_missing = 0
-
-    d = distutils.sysconfig.get_python_inc( plat_specific=0 )
-    if not os.path.exists( d + '/Python.h') :
-        print("ERROR: Python development files are missing from "+d)
-        dev_pkg_missing=1
-
-    d = distutils.sysconfig.get_python_inc( plat_specific=1 )
-    if not os.path.exists( d + '/pyconfig.h') :
-        print("ERROR: Python development files are missing from "+d)
-        dev_pkg_missing=1
-
-    try :
-        import numpy
-    except ImportError:
-        numpy_missing = 1
-
-    if not numpy_missing :
-        d = numpy.get_include()
-        if not os.path.exists( d + '/numpy/arrayobject.h') :
-            print("ERROR: Numpy development files are missing from "+d)
-            dev_pkg_missing=1
-
-    # print explanations for whatever problems there are
-    if numpy_missing:
-        print("""
-This installation requires the numpy package.  You may find it in
-your operating system distribution, or you may find it at
-http://numpy.scipy.org
-""")
-
-    if dev_pkg_missing :
-        print("""
-Many OS distributions separate Python and Numpy into user and
-developer packages.  You need both packages to complete this install,
-but this machine appears to be missing one of the developer packages.
-The package names are different on different systems, but usually
-the necessary package is named somethng like 'python-dev' or
-'python-devel' (or 'numpy-dev' or 'numpy-devel', for numpy).
-
-""")
-
-    if numpy_missing or dev_pkg_missing :
-        import sys
-        sys.exit(0)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/swapgeis.py b/required_pkgs/stsci.tools/lib/stsci/tools/swapgeis.py
deleted file mode 100644
index 46a80f7..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/swapgeis.py
+++ /dev/null
@@ -1,624 +0,0 @@
-#!/usr/bin/env python
-
-# $Id: readgeis.py 10520 2010-10-11 16:39:49Z hack $
-
-"""
-        swapgeis: Read GEIS file, byteswap it and write out to a new GEIS file.
-
-        License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-
-        Usage:
-
-                swapgeis.py [options] GEISname newGEISname
-
-                GEISname is the input GEIS file in GEIS format, and FITSname
-                is the output file in FITS format. GEISname can be a
-                directory name.  In this case, it will try to use all `*.??h`
-                files as input file names.
-
-                If newGEISname is omitted or is a directory name, this task will
-                try to construct the output names from the input names, i.e.:
-
-                abc.xyh will have an output name of abc_swap.xyh
-
-        :Options:
-
-        -h     print the help (this text)
-
-        -n     do NOT clobber pre-existing output files
-
-        :Example:
-
-        If used in Pythons script, a user can, e. g.::
-
-            >>> import swapgeis
-            >>> swapgeis.byteswap(GEISFileName)
-
-        The most basic usage from the command line::
-
-            swapgeis.py test1.hhh test1_swap.hhh
-
-        This command will convert the input GEIS file test1.hhh written
-        out on one platform (Solaris?) to a byteswapped version test1_linux.hhh.
-
-
-        From the command line::
-
-            swapgeis.py .
-
-        this will byteswap all `*.??h` files in the current directory
-        to GEIS files (of corresponding names) and write them in the
-        current directory.
-
-
-        Another example of usage from the command line::
-
-            swapgeis.py "u*"
-
-        this will convert all `u*.??h` files in the current directory
-        to byteswapped files (of corresponding names) and write them in the
-        current directory.  Note that when using wild cards, it is
-        necessary to put them in quotes.
-
-"""
-
-# Developed by Science Software Branch, STScI, USA.
-# This version needs pyfits 0.9.6.3 or later
-# and numpy version 1.0.4 or later
-
-from __future__ import division, print_function # confidence high
-
-__version__ = "1.0 (25 Feb, 2011), \xa9 AURA"
-
-import os, sys, string, shutil
-from astropy.io import fits
-import numpy
-from functools import reduce
-dat = None
-
-dat = None
-
-# definitions used to convert GEIS record into numpy objects
-geis_fmt = {'REAL':'f', 'DOUBLE': 'f', 'INTEGER':'i', 'LOGICAL':'i','CHARACTER':'S'}
-# definitions used to convert data into numpy array for use in `astropy.io.fits.Column`
-cols_fmt = {'REAL':'float', 'DOUBLE':'float', 'INTEGER':'int', 'LOGICAL':'S', 'CHARACTER': 'S'}
-# definitions used to define print format for `astropy.io.fits.Column`
-cols_pfmt = {'REAL':'E', 'DOUBLE': 'D', 'INTEGER': 'J', 'LOGICAL':'A', 'CHARACTER': 'A'}
-
-# Keywords which require special unit conversion
-# keywords which are output as long-floats without using exponential formatting
-kw_DOUBLE = ['CRVAL1','CRVAL2','FPKTTIME','LPKTTIME']
-
-def byteswap(input,output=None,clobber=True):
-
-    """Input GEIS files "input" will be read and converted to a new GEIS file
-    whose byte-order has been swapped from its original state.
-
-    Parameters
-    ----------
-    input - str
-        Full filename with path of input GEIS image header file
-
-    output - str
-        Full filename with path of output GEIS image header file
-        If None, a default name will be created as input_swap.??h
-
-    clobber - bool
-        Overwrite any pre-existing output file? [Default: True]
-
-    Notes
-    -----
-    This function will automatically read and write out the data file using the
-    GEIS image naming conventions.
-
-    """
-
-    global dat
-    cardLen = fits.Card.length
-
-    # input file(s) must be of the form *.??h and *.??d
-    if input[-1] != 'h' or input[-4] != '.':
-        raise "Illegal input GEIS file name %s" % input
-
-    data_file = input[:-1]+'d'
-
-    # Create default output name if no output name was specified by the user
-    if output is None:
-        output = input.replace('.','_swap.')
-
-    out_data = output[:-1]+'d'
-    if os.path.exists(output) and not clobber:
-        errstr = 'Output file already exists! Please remove or rename and start again...'
-        raise IOError(errstr)
-
-    _os = sys.platform
-    if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
-        bytes_per_line = cardLen+1
-    else:
-        raise "Platform %s is not supported (yet)." % _os
-
-    end_card = 'END'+' '* (cardLen-3)
-
-    # open input file
-    im = open(input)
-
-    # Generate the primary HDU so we can have access to keywords which describe
-    # the number of groups and shape of each group's array
-    #
-    cards = []
-    while 1:
-        line = im.read(bytes_per_line)[:cardLen]
-        line = line[:8].upper() + line[8:]
-        if line == end_card:
-            break
-        cards.append(fits.Card.fromstring(line))
-
-    phdr = fits.Header(cards)
-    im.close()
-
-    _naxis0 = phdr.get('NAXIS', 0)
-    _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
-    _naxis.insert(0, _naxis0)
-    _bitpix = phdr['BITPIX']
-    _psize = phdr['PSIZE']
-    if phdr['DATATYPE'][:4] == 'REAL':
-        _bitpix = -_bitpix
-    if _naxis0 > 0:
-        size = reduce(lambda x,y:x*y, _naxis[1:])
-        data_size = abs(_bitpix) * size // 8
-    else:
-        data_size = 0
-    group_size = data_size + _psize // 8
-
-    # decode the group parameter definitions,
-    # group parameters will become extension header
-    groups = phdr['GROUPS']
-    gcount = phdr['GCOUNT']
-    pcount = phdr['PCOUNT']
-
-    formats = []
-    bools = []
-    floats = []
-    _range = list(range(1, pcount+1))
-    key = [phdr['PTYPE'+str(j)] for j in _range]
-    comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
-
-    # delete group parameter definition header keywords
-    _list = ['PTYPE'+str(j) for j in _range] + \
-            ['PDTYPE'+str(j) for j in _range] + \
-            ['PSIZE'+str(j) for j in _range] + \
-            ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
-
-    # Construct record array formats for the group parameters
-    # as interpreted from the Primary header file
-    for i in range(1, pcount+1):
-        ptype = key[i-1]
-        pdtype = phdr['PDTYPE'+str(i)]
-        star = pdtype.find('*')
-        _type = pdtype[:star]
-        _bytes = pdtype[star+1:]
-
-        # collect boolean keywords since they need special attention later
-
-        if _type == 'LOGICAL':
-            bools.append(i)
-        if pdtype == 'REAL*4':
-            floats.append(i)
-
-        fmt = geis_fmt[_type] + _bytes
-        formats.append((ptype,fmt))
-
-    _shape = _naxis[1:]
-    _shape.reverse()
-    _code = fits.hdu.ImageHDU.NumCode[_bitpix]
-    _bscale = phdr.get('BSCALE', 1)
-    _bzero = phdr.get('BZERO', 0)
-    if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
-        _uint16 = 1
-        _bzero = 32768
-    else:
-        _uint16 = 0
-
-
-    # Use copy-on-write for all data types since byteswap may be needed
-    # in some platforms.
-    f1 = open(data_file, mode='rb')
-    dat = f1.read()
-    f1.close()
-
-    errormsg = ""
-
-    loc = 0
-    outdat = b''
-    for k in range(gcount):
-        ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
-        ext_dat = ext_dat.reshape(_shape).byteswap()
-        outdat += ext_dat.tostring()
-
-        ext_hdu = fits.hdu.ImageHDU(data=ext_dat)
-
-        rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats).byteswap()
-        outdat += rec.tostring()
-
-        loc += group_size
-
-    if os.path.exists(output):
-        os.remove(output)
-    if os.path.exists(out_data):
-        os.remove(out_data)
-
-    shutil.copy(input,output)
-    outfile = open(out_data,mode='wb')
-    outfile.write(outdat)
-    outfile.close()
-    print('Finished byte-swapping ',input,' to ',output)
-
-#-------------------------------------------------------------------------------
-
-
-    """Input GEIS files "input" will be read and a HDUList object will
-       be returned that matches the waiver-FITS format written out by 'stwfits' in IRAF.
-
-       The user can use the writeto method to write the HDUList object to
-       a FITS file.
-    """
-
-#   global dat  # !!! (looks like this is a function missing its head)
-    cardLen = fits.Card.length
-
-    # input file(s) must be of the form *.??h and *.??d
-    if input[-1] != 'h' or input[-4] != '.':
-        raise "Illegal input GEIS file name %s" % input
-
-    data_file = input[:-1]+'d'
-
-    _os = sys.platform
-    if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin':
-        bytes_per_line = cardLen+1
-    else:
-        raise "Platform %s is not supported (yet)." % _os
-
-    end_card = 'END'+' '* (cardLen-3)
-
-    # open input file
-    im = open(input)
-
-    # Generate the primary HDU
-    cards = []
-    while 1:
-        line = im.read(bytes_per_line)[:cardLen]
-        line = line[:8].upper() + line[8:]
-        if line == end_card:
-            break
-        cards.append(fits.Card.fromstring(line))
-
-    phdr = fits.Header(cards)
-    im.close()
-
-    phdr.set('FILENAME', value=input, after='DATE')
-
-    # Determine starting point for adding Group Parameter Block keywords to Primary header
-    phdr_indx = phdr.index('PSIZE')
-
-
-    _naxis0 = phdr.get('NAXIS', 0)
-    _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)]
-    _naxis.insert(0, _naxis0)
-    _bitpix = phdr['BITPIX']
-    _psize = phdr['PSIZE']
-    if phdr['DATATYPE'][:4] == 'REAL':
-        _bitpix = -_bitpix
-    if _naxis0 > 0:
-        size = reduce(lambda x,y:x*y, _naxis[1:])
-        data_size = abs(_bitpix) * size // 8
-    else:
-        data_size = 0
-    group_size = data_size + _psize // 8
-
-    # decode the group parameter definitions,
-    # group parameters will become extension table
-    groups = phdr['GROUPS']
-    gcount = phdr['GCOUNT']
-    pcount = phdr['PCOUNT']
-
-    formats = []
-    bools = []
-    floats = []
-    cols = [] # column definitions used for extension table
-    cols_dict = {} # provides name access to Column defs
-    _range = list(range(1, pcount+1))
-    key = [phdr['PTYPE'+str(j)] for j in _range]
-    comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range]
-
-    # delete group parameter definition header keywords
-    _list = ['PTYPE'+str(j) for j in _range] + \
-            ['PDTYPE'+str(j) for j in _range] + \
-            ['PSIZE'+str(j) for j in _range] + \
-            ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO']
-
-    # Construct record array formats for the group parameters
-    # as interpreted from the Primary header file
-    for i in range(1, pcount+1):
-        ptype = key[i-1]
-        pdtype = phdr['PDTYPE'+str(i)]
-        star = pdtype.find('*')
-        _type = pdtype[:star]
-        _bytes = pdtype[star+1:]
-
-        # collect boolean keywords since they need special attention later
-
-        if _type == 'LOGICAL':
-            bools.append(i)
-        if pdtype == 'REAL*4':
-            floats.append(i)
-
-        # identify keywords which require conversion to special units
-        if ptype in kw_DOUBLE:
-            _type = 'DOUBLE'
-
-        fmt = geis_fmt[_type] + _bytes
-        formats.append((ptype,fmt))
-
-        # Set up definitions for use in creating the group-parameter block table
-        nrpt = ''
-        nbits = str(int(_bytes)*8)
-        if 'CHAR' in _type:
-            nrpt = _bytes
-            nbits = _bytes
-
-        afmt = cols_fmt[_type]+ nbits
-        if 'LOGICAL' in _type:
-            afmt = cols_fmt[_type]
-        cfmt = cols_pfmt[_type]+nrpt
-
-        #print 'Column format for ',ptype,': ',cfmt,' with dtype of ',afmt
-        cols_dict[ptype] = fits.Column(name=ptype,format=cfmt,array=numpy.zeros(gcount,dtype=afmt))
-        cols.append(cols_dict[ptype]) # This keeps the columns in order
-
-    _shape = _naxis[1:]
-    _shape.reverse()
-    _code = fits.hdu.ImageHDU.NumCode[_bitpix]
-    _bscale = phdr.get('BSCALE', 1)
-    _bzero = phdr.get('BZERO', 0)
-    if phdr['DATATYPE'][:10] == 'UNSIGNED*2':
-        _uint16 = 1
-        _bzero = 32768
-    else:
-        _uint16 = 0
-
-    # delete from the end, so it will not conflict with previous delete
-    for i in range(len(phdr)-1, -1, -1):
-        if phdr.cards[i].keyword in _list:
-            del phdr[i]
-
-    # clean up other primary header keywords
-    phdr['SIMPLE'] = True
-    phdr['GROUPS'] = False
-    _after = 'NAXIS'
-    if _naxis0 > 0:
-        _after += str(_naxis0)
-    phdr.set('EXTEND', value=True,
-             comment="FITS dataset may contain extensions",
-             after=_after)
-
-    # Use copy-on-write for all data types since byteswap may be needed
-    # in some platforms.
-    f1 = open(data_file, mode='rb')
-    dat = f1.read()
-    errormsg = ""
-
-    # Define data array for all groups
-    arr_shape = _naxis[:]
-    arr_shape[0] = gcount
-    arr_stack = numpy.zeros(arr_shape,dtype=_code)
-
-    loc = 0
-    for k in range(gcount):
-        ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code)
-        ext_dat = ext_dat.reshape(_shape)
-        if _uint16:
-            ext_dat += _bzero
-        # Check to see whether there are any NaN's or infs which might indicate
-        # a byte-swapping problem, such as being written out on little-endian
-        #   and being read in on big-endian or vice-versa.
-        if _code.find('float') >= 0 and \
-            (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))):
-            errormsg += "===================================\n"
-            errormsg += "= WARNING:                        =\n"
-            errormsg += "=  Input image:                   =\n"
-            errormsg += input+"[%d]\n"%(k+1)
-            errormsg += "=  had floating point data values =\n"
-            errormsg += "=  of NaN and/or Inf.             =\n"
-            errormsg += "===================================\n"
-        elif _code.find('int') >= 0:
-            # Check INT data for max values
-            ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat)
-            if ext_dat_exp.max() == int(_bitpix) - 1:
-                # Potential problems with byteswapping
-                errormsg += "===================================\n"
-                errormsg += "= WARNING:                        =\n"
-                errormsg += "=  Input image:                   =\n"
-                errormsg += input+"[%d]\n"%(k+1)
-                errormsg += "=  had integer data values        =\n"
-                errormsg += "=  with maximum bitvalues.        =\n"
-                errormsg += "===================================\n"
-
-        arr_stack[k] = ext_dat
-
-        rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats)
-
-        loc += group_size
-
-        # Add data from this GPB to table
-        for i in range(1, pcount+1):
-            val = rec[0][i-1]
-            if i in bools:
-                if val:
-                    val = 'T'
-                else:
-                    val = 'F'
-            cols[i-1].array[k] = val
-
-        # Based on the first group, add GPB keywords to PRIMARY header
-        if k == 0:
-            # Create separate PyFITS Card objects for each entry in 'rec'
-            # and update Primary HDU with these keywords after PSIZE
-            for i in range(1, pcount+1):
-                #val = rec.field(i-1)[0]
-                val = rec[0][i-1]
-                if val.dtype.kind == 'S':
-                    val = val.decode('ascii')
-
-                if i in bools:
-                    if val:
-                        val = True
-                    else:
-                        val = False
-                if i in floats:
-                    # use fromstring, format in Card is deprecated in pyfits 0.9
-                    _str = '%-8s= %20.13G / %s' % (key[i-1], val, comm[i-1])
-                    _card = fits.Card.fromstring(_str)
-                else:
-                    _card = fits.Card(keyword=key[i-1], value=val, comment=comm[i-1])
-                phdr.insert(phdr_indx+i, _card)
-
-            # deal with bscale/bzero
-            if (_bscale != 1 or _bzero != 0):
-                phdr['BSCALE'] = _bscale
-                phdr['BZERO'] = _bzero
-
-        #hdulist.append(ext_hdu)
-    # Define new table based on Column definitions
-    ext_table = fits.new_table(cols, tbtype='TableHDU')
-    ext_table.header.set('EXTNAME', value=input+'.tab', after='TFIELDS')
-    # Add column descriptions to header of table extension to match stwfits output
-    for i in range(len(key)):
-        ext_table.header.append(fits.Card(keyword=key[i], value=comm[i]))
-
-    if errormsg != "":
-        errormsg += "===================================\n"
-        errormsg += "=  This file may have been        =\n"
-        errormsg += "=  written out on a platform      =\n"
-        errormsg += "=  with a different byte-order.   =\n"
-        errormsg += "=                                 =\n"
-        errormsg += "=  Please verify that the values  =\n"
-        errormsg += "=  are correct or apply the       =\n"
-        errormsg += "=  '.byteswap()' method.          =\n"
-        errormsg += "===================================\n"
-        print(errormsg)
-
-    f1.close()
-
-    hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=arr_stack)])
-    hdulist.append(ext_table)
-
-    return hdulist
-
-def parse_path(f1, f2):
-
-    """Parse two input arguments and return two lists of file names"""
-
-    import glob
-
-    # if second argument is missing or is a wild card, point it
-    # to the current directory
-    f2 = f2.strip()
-    if f2 == '' or f2 == '*':
-        f2 = './'
-
-    # if the first argument is a directory, use all GEIS files
-    if os.path.isdir(f1):
-        f1 = os.path.join(f1, '*.??h')
-    list1 = glob.glob(f1)
-    list1 = [name for name in list1 if name[-1] == 'h' and name[-4] == '.']
-
-    # if the second argument is a directory, use file names in the
-    # first argument to construct file names, i.e.
-    # abc.xyh will be converted to abc_xyf.fits
-    if os.path.isdir(f2):
-        list2 = []
-        for file in list1:
-            name = os.path.split(file)[-1]
-            fitsname = name[:-4] + '_' + name[-3:-1] + 'f.fits'
-            list2.append(os.path.join(f2, fitsname))
-    else:
-        list2 = [s.strip() for s in f2.split(",")]
-
-    if (list1 == [] or list2 == []):
-        str = ""
-        if (list1 == []): str += "Input files `%s` not usable/available. " % f1
-        if (list2 == []): str += "Input files `%s` not usable/available. " % f2
-        raise IOError(str)
-    else:
-        return list1, list2
-
-#-------------------------------------------------------------------------------
-# special initialization when this is the main program
-
-if __name__ == "__main__":
-
-    import getopt
-
-    try:
-        optlist, args = getopt.getopt(sys.argv[1:], 'hn')
-    except getopt.error as e:
-        print(str(e))
-        print(__doc__)
-        print("\t", __version__)
-
-    # initialize default values
-    help = 0
-    clobber = True
-    # read options
-    for opt, value in optlist:
-        if opt == "-h":
-            help = 1
-        if opt == '-n':
-            clobber = False
-    if (help):
-        print(__doc__)
-        print("\t", __version__)
-    else:
-        if len(args) == 1:
-            args.append('')
-        list1, list2 = parse_path (args[0], args[1])
-        npairs = min (len(list1), len(list2))
-        for i in range(npairs):
-            try:
-                byteswap(list1[i],list2[i],clobber=clobber)
-                print("%s -> %s" % (list1[i], list2[i]))
-            except Exception as e:
-                print("Conversion fails for %s: %s" % (list1[i], str(e)))
-                break
-
-"""
-
-Copyright (C) 2003 Association of Universities for Research in Astronomy (AURA)
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-    1. Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-
-    2. Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-
-    3. The name of AURA and its representatives may not be used to
-      endorse or promote products derived from this software without
-      specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-"""
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/taskpars.py b/required_pkgs/stsci.tools/lib/stsci/tools/taskpars.py
deleted file mode 100644
index bbbec99..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/taskpars.py
+++ /dev/null
@@ -1,64 +0,0 @@
-""" Contains the TaskPars class and any related functions.
-
-$Id: taskpars.py 16939 2012-05-23 13:36:09Z sontag $
-"""
-from __future__ import division # confidence high
-
-class NoExecError(Exception): pass
-
-class TaskPars:
-    """ This represents a task's collection of configurable parameters.
-    This class is meant to be mostly abstract, though there is some
-    functionality included which could be common to most derived classes.
-    This also serves to document the interface which must be met for EPAR.
-    """
-
-    def getName(self, *args, **kw):
-        """ Returns the string name of the task. """
-        raise NotImplementedError("class TaskPars is not to be used directly")
-
-    def getPkgname(self, *args, **kw):
-        """ Returns the string name of the package, if applicable. """
-        raise NotImplementedError("class TaskPars is not to be used directly")
-
-    def getParList(self, *args, **kw):
-        """ Returns a list of parameter objects. """
-        raise NotImplementedError("class TaskPars is not to be used directly")
-
-    def getDefaultParList(self, *args, **kw):
-        """ Returns a list of parameter objects with default values set. """
-        raise NotImplementedError("class TaskPars is not to be used directly")
-
-    def setParam(self, *args, **kw):
-        """ Allows one to set the value of a single parameter.
-            Initial signature is setParam(name, value, scope='', check=1) """
-        raise NotImplementedError("class TaskPars is not to be used directly")
-
-    def getFilename(self, *args, **kw):
-        """ Returns the string name of any associated config/parameter file. """
-        raise NotImplementedError("class TaskPars is not to be used directly")
-
-    def saveParList(self, *args, **kw):
-        """ Allows one to save the entire set to a file. """
-        raise NotImplementedError("class TaskPars is not to be used directly")
-
-    def run(self, *args, **kw):
-        """ Runs the task with the known parameters. """
-        raise NoExecError("Bug: class TaskPars is not to be used directly")
-
-    def canPerformValidation(self):
-        """ Returns bool.  If True, expect tryValue() to be called next. """
-        return False
-
-    def knowAsNative(self):
-        """ Returns bool.  Return true if the class prefers in-memory objects
-        to keep (know) their parameter values in native format instead of as
-        strings. """
-        return False
-
-    def getHelpAsString(self):
-        """ Meant to be overridden - return a task specific help string. """
-        return 'No help string available for task "'+self.getName()+'".\n '+ \
-               'Implement getHelpAsString() in your TaskPars sub-class.'
-
-    # also, eparam, lParam, tParam, dParam, tryValue ?
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/teal.py b/required_pkgs/stsci.tools/lib/stsci/tools/teal.py
deleted file mode 100644
index 4f3dc58..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/teal.py
+++ /dev/null
@@ -1,1161 +0,0 @@
-""" Main module for the ConfigObj version of the parameter task editor: TEAL.
-$Id: teal.py 38909 2015-04-08 17:41:07Z bsimon $
-"""
-from __future__ import absolute_import, division, print_function # confidence high
-
-import os, sys, traceback
-from . import configobj, cfgpars, editpar, vtor_checks
-from .cfgpars import APP_NAME
-from .irafutils import printColsAuto, rglob, setWritePrivs
-from . import capable
-
-PY3K = sys.version_info[0] > 2
-
-if capable.OF_GRAPHICS:
-    if PY3K:
-        from tkinter.filedialog import askopenfilename
-        from tkinter.messagebox import showerror, showwarning
-    else:
-        from tkFileDialog import askopenfilename
-        from tkMessageBox import showerror, showwarning
-
-# tool help
-tealHelpString = """\
-The TEAL (Task Editor And Launcher) GUI is used to edit task parameters in a
-parameter-dependent way.  After editing, it allows the user to launch
-(execute) the task.  It also allows the user to view task help in a separate
-window that remains accessible while the parameters are being edited.
-
-
-Editing Parameters
---------------------
-
-Parameter values are modified using various GUI widgets that depend on the
-parameter properties.  It is possible to edit parameters using either the mouse
-or the keyboard.  Most parameters have a context-dependent menu accessible via
-right-clicking that enables resetting the parameter (restoring its value to
-the task default), clearing the value, or even activating a file browser that
-allows a filename to be selected and entered into the parameter field.  Some
-items on the right-click pop-up menu may be disabled depending on the parameter
-type (e.g. the file browser cannot be used for numeric parameters.)
-
-The mouse-editing behavior should be intuitive, so the notes below focus on
-keyboard-editing.  When the editor starts, the first parameter is selected.  To
-select another parameter, use the Tab key (Shift-Tab to go backwards) or Return
-to move the focus from item to item. The Up and Down arrow keys also move
-between fields.  The toolbar buttons can also be selected with Tab.  Use the
-space bar to "push" buttons or activate menus.
-
-Enumerated Parameters
-        Parameters that have a list of choices use a drop-down menu.  The space
-        bar causes the menu to appear; once it is present, the up/down arrow
-        keys can be used to select different items.  Items in the list have
-        accelerators (underlined, generally the first letter) that can be typed
-        to jump directly to that item.  When editing is complete, hit Return or
-        Tab to accept the changes, or type Escape to close the menu without
-        changing the current parameter value.
-
-Boolean Parameters
-        Boolean parameters appear as Yes/No radio buttons.  Hitting the space
-        bar toggles the setting, while 'y' and 'n' can be typed to select the
-        desired value.
-
-Text Entry Fields
-        Strings, integers, floats, etc. appear as text-entry fields.  Values
-        are verified to to be legal before being stored in the parameter. If an
-        an attempt is made to set a parameter to an illegal value, the program
-        beeps and a warning message appears in the status bar at the bottom of
-        the window.
-
-        To see the value of a string that is longer than the entry widget,
-        either use the left mouse button to do a slow "scroll" through the
-        entry or use the middle mouse button to "pull" the value in the entry
-        back and forth quickly.  In either case, just click in the entry widget
-        with the mouse and then drag to the left or right.  If there is a
-        selection highlighted, the middle mouse button may paste it in when
-        clicked.  It may be necessary to click once with the left mouse
-        button to undo the selection before using the middle button.
-
-        You can also use the left and right arrow keys to scroll through the
-        selection.  Control-A jumps to the beginning of the entry, and
-        Control-E jumps to the end of the entry.
-
-
-The Menu Bar
---------------
-
-File menu:
-    Execute
-             Start the task running with the currently edited parameter values.
-             If the Option "Save and Close on Execute" is set, this will save
-             all the parameters and close the editor window.
-    Save
-             Save the parameters to the file named in the title bar.  This
-             does not close the editor window, nor does it execute the task.
-             If however, this button appears as "Save & Quit", then it will
-             in fact close the editor window after saving.
-    Save As...
-             Save the parameters to a user-specified file.  This does not
-             close the editor window, nor does it execute the task.
-    Defaults
-             Reset all parameters to the system default values for this
-             task.  Note that individual parameters can be reset using the
-             menu shown by right-clicking on the parameter entry.
-    Close
-             Close the parameter editor.  If there are unsaved changes, the
-             user is prompted to save them.  Either way, this action returns
-             to the calling routine a Python dict of the currently selected
-             parameter values.
-    Cancel
-             Cancel the editing session by exiting the parameter editor.  All
-             recent changes that were made to the parameters are lost (going
-             back until the last Save or Save As).  This action returns
-             a Python None to the calling routine.
-
-Open... menu:
-     Load and edit parameters from any applicable file found for the current
-     task.  This changes the current file being edited (see the name listed
-     in the title bar) to the one selected to be opened.  If no such files
-     are found, this menu is not shown.
-
-Options menu:
-    Display Task Help in a Window
-             Help on the task is available through the Help menu.  If this
-             option is selected, the help text is displayed in a pop-up window.
-             This is the default behavior.
-    Display Task Help in a Browser
-             If this option is selected, instead of a pop-up window, help is
-             displayed in the user's web browser.  This requires access to
-             the internet and is a somewhat experimental feature.  Any HTML
-             version of the task's help need to be provided by the task.
-    Save and Close on Execute
-             If this option is selected, the parameter editing window will be
-             closed right before task execution as if the Close button had
-             been clicked.  This is the default behavior.  For short-running
-             tasks, it may be interesting to leave TEAL open and continue to
-             execute while tweaking certain parameter values.
-
-Help menu:
-    Task Help
-             Display help on the task whose parameters are being edited.
-             By default the help pops up in a new window, but the help can also
-             be displayed in a web browser by modifying the Options.
-    TEAL Help
-             Display this help.
-    Show Log
-             Display the historical log of all the status messages that so
-             far have been displayed in the status area at the very bottom
-             of the user interface.
-
-
-Toolbar Buttons
------------------
-
-The Toolbar contains a set of buttons that provide shortcuts for the most
-common menu bar actions.  Their names are the same as the menu items given
-above: Execute, Save (or Save & Quit), Close, Cancel, and Defaults.
-
-Note that the toolbar buttons are accessible from the keyboard using the Tab
-and Shift-Tab keys.  They are located in sequence before the first parameter.
-If the first parameter is selected, Shift-Tab backs up to the "Task Help"
-button, and if the last parameter is selected then Tab wraps around and selects
-the "Execute" button.
-"""
-
-
-# Starts a GUI session, or simply loads a file
-def teal(theTask, parent=None, loadOnly=False, returnAs="dict",
-         canExecute=True, strict=False, errorsToTerm=False,
-         autoClose=True, defaults=False):
-#        overrides=None):
-    """ Start the GUI session, or simply load a task's ConfigObj. """
-    if loadOnly: # this forces returnAs="dict"
-        obj = None
-        try:
-            obj = cfgpars.getObjectFromTaskArg(theTask, strict, defaults)
-#           obj.strictUpdate(overrides) # ! would need to re-verify after this !
-        except Exception as re: # catches RuntimeError and KeyError and ...
-            # Since we are loadOnly, don't pop up the GUI for this
-            if strict:
-                raise
-            else:
-                print(re.message.replace('\n\n','\n'))
-        return obj
-    else:
-        assert returnAs in ("dict", "status", None), \
-               "Invalid value for returnAs arg: "+str(returnAs)
-        dlg = None
-        try:
-            # if setting to all defaults, go ahead and load it here, pre-GUI
-            if defaults:
-                theTask = cfgpars.getObjectFromTaskArg(theTask, strict, True)
-            # now create/run the dialog
-            dlg = ConfigObjEparDialog(theTask, parent=parent,
-                                      autoClose=autoClose,
-                                      strict=strict,
-                                      canExecute=canExecute)
-#                                     overrides=overrides)
-        except cfgpars.NoCfgFileError as ncf:
-            log_last_error()
-            if errorsToTerm:
-                print(str(ncf).replace('\n\n','\n'))
-            else:
-                popUpErr(parent=parent,message=str(ncf),title="Unfound Task")
-        except Exception as re: # catches RuntimeError and KeyError and ...
-            log_last_error()
-            if errorsToTerm:
-                print(re.message.replace('\n\n','\n'))
-            else:
-                popUpErr(parent=parent, message=re.message,
-                         title="Bad Parameters")
-
-        # Return, depending on the mode in which we are operating
-        if returnAs == None:
-            return
-
-        if returnAs == "dict":
-            if dlg is None or dlg.canceled():
-                return None
-            else:
-                return dlg.getTaskParsObj()
-
-        # else, returnAs == "status"
-        if dlg is None or dlg.canceled():
-            return -1
-        if dlg.executed():
-            return 1
-        return 0 # save/closed
-        # Note that you should be careful not to use "status" and
-        # autoClose=False, because the user can Save then Cancel
-
-
-def load(theTask, canExecute=True, strict=True, defaults=False):
-    """ Shortcut to load TEAL .cfg files for non-GUI access where
-    loadOnly=True. """
-    return teal(theTask, parent=None, loadOnly=True, returnAs="dict",
-                canExecute=canExecute, strict=strict, errorsToTerm=True,
-                defaults=defaults)
-
-
-def log_last_error():
-    import time
-    f = open(cfgpars.getAppDir()+os.sep+'last_error.txt','w')
-    f.write(time.asctime()+'\n\n')
-    f.write(traceback.format_exc()+'\n')
-    f.close()
-
-
-def unlearn(taskPkgName, deleteAll=False):
-    """ Find the task named taskPkgName, and delete any/all user-owned
-    .cfg files in the user's resource directory which apply to that task.
-    Like a unix utility, this returns 0 on success (no files found or only
-    1 found but deleted).  For multiple files found, this uses deleteAll,
-    returning the file-name-list if deleteAll is False (to indicate the
-    problem) and without deleting any files.  MUST check return value.
-    This does not prompt the user or print to the screen. """
-
-    # this WILL throw an exception if the taskPkgName isn't found
-    flist = cfgpars.getUsrCfgFilesForPyPkg(taskPkgName) # can raise
-    if flist == None or len(flist) == 0:
-        return 0
-    if len(flist) == 1:
-        os.remove(flist[0])
-        return 0
-    # at this point, we know more than one matching file was found
-    if deleteAll:
-        for f in flist:
-            os.remove(f)
-        return 0
-    else:
-        return flist # let the caller know this is an issue
-
-
-def diffFromDefaults(theTask, report=False):
-    """ Load the given file (or existing object), and return a dict
-    of its values which are different from the default values.  If report
-    is set, print to stdout the differences. """
-    # get the 2 dicts (trees: dicts of dicts)
-    defaultTree = load(theTask, canExecute=False, strict=True, defaults=True)
-    thisTree    = load(theTask, canExecute=False, strict=True, defaults=False)
-    # they must be flattenable
-    defaultFlat = cfgpars.flattenDictTree(defaultTree)
-    thisFlat    = cfgpars.flattenDictTree(thisTree)
-    # use the "set" operations till there is a dict.diff()
-    # thanks to:  http://stackoverflow.com/questions/715234
-    diffFlat = dict( set(thisFlat.items()) - \
-                     set(defaultFlat.items()) )
-    if report:
-        defaults_of_diffs_only = {}
-#       { k:defaultFlat[k] for k in diffFlat.keys() }
-        for k in diffFlat:
-            defaults_of_diffs_only[k] = defaultFlat[k]
-        msg = 'Non-default values of "'+str(theTask)+'":\n'+ \
-              _flat2str(diffFlat)+ \
-              '\n\nDefault values:\n'+ \
-              _flat2str(defaults_of_diffs_only)
-        print(msg)
-    return diffFlat
-
-def _flat2str(fd): # waiting for a nice pretty-print
-    rv = '{\n'
-    for k in fd.keys(): rv += repr(k)+': '+repr(fd[k])+'\n'
-    return rv+'}'
-
-def _isInstalled(fullFname):
-    """ Return True if the given file name is located in an
-    installed area (versus a user-owned file) """
-    if not fullFname: return False
-    if not os.path.exists(fullFname): return False
-    instAreas = []
-    try:
-        import site
-        instAreas = site.getsitepackages()
-    except:
-        pass # python 2.6 and lower don't have site.getsitepackages()
-    if len(instAreas) < 1:
-        instAreas = [ os.path.dirname(os.__file__) ]
-    for ia in instAreas:
-        if fullFname.find(ia) >= 0:
-            return True
-    return False
-
-def popUpErr(parent=None, message="", title="Error"):
-    # withdraw root, could standardize w/ EditParDialog.__init__()
-    if parent == None:
-        if PY3K:
-            import tkinter
-            root = tkinter.Tk()
-        else:
-            import Tkinter
-            root = Tkinter.Tk()
-#       root.lift()
-        root.after_idle(root.withdraw)
-    showerror(message=message, title=title, parent=parent)
-
-# We'd love to somehow force the dialog to the front here in popUpErr (on OSX)
-# but cannot since the Python process started from the Terminal is not an
-# Aqua app (unless it became so within PyRAF).  This thread
-#    http://objectmix.com/python/350288-tkinter-osx-lift.html
-# describes it well.
-
-
-
-def execEmbCode(SCOPE, NAME, VAL, TEAL, codeStr):
-    """ .cfgspc embedded code execution is done here, in a relatively confined
-        space.  The variables available to the code to be executed are:
-              SCOPE, NAME, VAL, PARENT, TEAL
-        The code string itself is expected to set a var named OUT
-    """
-    # This was all we needed in Python 2.x
-#   OUT = None
-#   exec codeStr
-#   return OUT
-
-    # In Python 3 (& 2.x) be more explicit:  http://bugs.python.org/issue4831
-    PARENT = None
-    if TEAL:
-        PARENT = TEAL.top
-    OUT = None
-    ldict = locals() # will have OUT in it
-    exec(codeStr, globals(), ldict)
-    return ldict['OUT']
-
-
-
-def print_tasknames(pkgName, aDir, term_width=80, always=False,
-                    hidden=None):
-    """ Print a message listing TEAL-enabled tasks available under a
-        given installation directory (where pkgName resides).
-        If always is True, this will always print when tasks are
-        found; otherwise it will only print found tasks when in interactive
-        mode.
-        The parameter 'hidden' supports a list of input tasknames that should
-        not be reported even though they still exist.
-    """
-    # See if we can bail out early
-    if not always:
-        # We can't use the sys.ps1 check if in PyRAF since it changes sys
-        if 'pyraf' not in sys.modules:
-           # sys.ps1 is only defined in interactive mode
-           if not hasattr(sys, 'ps1'):
-               return # leave here, we're in someone's script
-
-    # Check for tasks
-    taskDict = cfgpars.findAllCfgTasksUnderDir(aDir)
-    tasks = [x for x in taskDict.values() if len(x) > 0]
-    if hidden: # could even account for a single taskname as input here if needed
-        for x in hidden:
-            if x in tasks: tasks.remove(x)
-    # only be verbose if there something found
-    if len(tasks) > 0:
-        sortedUniqTasks = sorted(set(tasks))
-        if len(sortedUniqTasks) == 1:
-            tlines = 'The following task in the '+pkgName+\
-                     ' package can be run with TEAL:\n'
-        else:
-            tlines = 'The following tasks in the '+pkgName+\
-                     ' package can be run with TEAL:\n'
-        tlines += printColsAuto(sortedUniqTasks, term_width=term_width,
-                                min_pad=2)
-        print(tlines)
-
-def getHelpFileAsString(taskname,taskpath):
-    """
-    This functions will return useful help as a string read from a file
-    in the task's installed directory called "<module>.help".
-
-    If no such file can be found, it will simply return an empty string.
-
-    Notes
-    -----
-    The location of the actual help file will be found under the task's
-    installed directory using 'irafutils.rglob' to search all sub-dirs to
-    find the file. This allows the help file to be either in the tasks
-    installed directory or in any sub-directory, such as a "help/" directory.
-
-    Parameters
-    ----------
-    taskname: string
-        Value of `__taskname__` for a module/task
-
-    taskpath: string
-        Value of `__file__` for an installed module which defines the task
-
-    Returns
-    -------
-    helpString: string
-        multi-line string read from the file '<taskname>.help'
-
-    """
-    #get the local library directory where the code is stored
-    pathsplit=os.path.split(taskpath) # taskpath should be task's __file__
-    if taskname.find('.') > -1: # if taskname is given as package.taskname...
-        helpname=taskname.split(".")[1]    # taskname should be __taskname__ from task's module
-    else:
-        helpname = taskname
-    localdir = pathsplit[0]
-    if localdir == '':
-       localdir = '.'
-    helpfile=rglob(localdir,helpname+".help")[0]
-
-    if os.access(helpfile,os.R_OK):
-        fh=open(helpfile,'r')
-        ss=fh.readlines()
-        fh.close()
-        helpString=""
-        for line in ss:
-            helpString+=line
-    else:
-        helpString= ''
-
-    return helpString
-
-
-def cfgGetBool(theObj, name, dflt):
-    """ Get a stringified val from a ConfigObj obj and return it as bool """
-    strval = theObj.get(name, None)
-    if strval is None:
-        return dflt
-    return strval.lower().strip() == 'true'
-
-
-# Main class
-class ConfigObjEparDialog(editpar.EditParDialog): # i.e. TEAL
-    """ The TEAL GUI. """
-
-    FALSEVALS = (None, False, '', 0, 0.0, '0', '0.0', 'OFF', 'Off', 'off',
-                 'NO', 'No', 'no', 'N', 'n', 'FALSE', 'False', 'false')
-
-    def __init__(self, theTask, parent=None, title=APP_NAME,
-                 isChild=0, childList=None, autoClose=False,
-                 strict=False, canExecute=True):
-#                overrides=None,
-        self._do_usac = autoClose
-
-        # Keep track of any passed-in args before creating the _taskParsObj
-#       self._overrides = overrides
-        self._canExecute = canExecute
-        self._strict = strict
-
-        # Init base - calls _setTaskParsObj(), sets self.taskName, etc
-        # Note that this calls _overrideMasterSettings()
-        editpar.EditParDialog.__init__(self, theTask, parent, isChild,
-                                       title, childList,
-                                       resourceDir=cfgpars.getAppDir())
-        # We don't return from this until the GUI is closed
-
-
-    def _overrideMasterSettings(self):
-        """ Override so that we can run in a different mode. """
-        # config-obj dict of defaults
-        cod = self._getGuiSettings()
-
-        # our own GUI setup
-        self._appName              = APP_NAME
-        self._appHelpString        = tealHelpString
-        self._useSimpleAutoClose   = self._do_usac
-        self._showExtraHelpButton  = False
-        self._saveAndCloseOnExec   = cfgGetBool(cod, 'saveAndCloseOnExec', True)
-        self._showHelpInBrowser    = cfgGetBool(cod, 'showHelpInBrowser', False)
-        self._writeProtectOnSaveAs = cfgGetBool(cod, 'writeProtectOnSaveAsOpt', True)
-        self._flagNonDefaultVals   = cfgGetBool(cod, 'flagNonDefaultVals', None)
-        self._optFile              = APP_NAME.lower()+".optionDB"
-
-        # our own colors
-        # prmdrss teal: #00ffaa, pure cyan (teal) #00ffff (darker) #008080
-        # "#aaaaee" is a darker but good blue, but "#bbbbff" pops
-        ltblu = "#ccccff" # light blue
-        drktl = "#008888" # darkish teal
-        self._frmeColor = cod.get('frameColor', drktl)
-        self._taskColor = cod.get('taskBoxColor', ltblu)
-        self._bboxColor = cod.get('buttonBoxColor', ltblu)
-        self._entsColor = cod.get('entriesColor', ltblu)
-        self._flagColor = cod.get('flaggedColor', 'brown')
-
-        # double check _canExecute, but only if it is still set to the default
-        if self._canExecute and self._taskParsObj: # default _canExecute=True
-            self._canExecute = self._taskParsObj.canExecute()
-        self._showExecuteButton = self._canExecute
-
-        # check on the help string - just to see if it is HTML
-        # (could use HTMLParser here if need be, be quick and simple tho)
-        hhh = self.getHelpString(self.pkgName+'.'+self.taskName)
-        if hhh:
-            hhh = hhh.lower()
-            if hhh.find('<html') >= 0 or hhh.find('</html>') > 0:
-                self._knowTaskHelpIsHtml = True
-            elif hhh.startswith('http:') or hhh.startswith('https:'):
-                self._knowTaskHelpIsHtml = True
-            elif hhh.startswith('file:') and \
-                 (hhh.endswith('.htm') or hhh.endswith('.html')):
-                self._knowTaskHelpIsHtml = True
-
-
-    def _preMainLoop(self):
-        """ Override so that we can do some things right before activating. """
-        # Put the fname in the title. EditParDialog doesn't do this by default
-        self.updateTitle(self._taskParsObj.filename)
-
-
-    def _doActualSave(self, fname, comment, set_ro=False, overwriteRO=False):
-        """ Override this so we can handle case of file not writable, as
-            well as to make our _lastSavedState copy. """
-        self.debug('Saving, file name given: '+str(fname)+', set_ro: '+\
-                   str(set_ro)+', overwriteRO: '+str(overwriteRO))
-        cantWrite = False
-        inInstArea = False
-        if fname in (None, ''): fname = self._taskParsObj.getFilename()
-        # now do some final checks then save
-        try:
-            if _isInstalled(fname): # check: may be installed but not read-only
-                inInstArea = cantWrite = True
-            else:
-                # in case of save-as, allow overwrite of read-only file
-                if overwriteRO and os.path.exists(fname):
-                    setWritePrivs(fname, True, True) # try make writable
-                # do the save
-                rv=self._taskParsObj.saveParList(filename=fname,comment=comment)
-        except IOError:
-            cantWrite = True
-
-        # User does not have privs to write to this file. Get name of local
-        # choice and try to use that.
-        if cantWrite:
-            fname = self._taskParsObj.getDefaultSaveFilename()
-            # Tell them the context is changing, and where we are saving
-            msg = 'Read-only config file for task "'
-            if inInstArea:
-                msg = 'Installed config file for task "'
-            msg += self._taskParsObj.getName()+'" is not to be overwritten.'+\
-                  '  Values will be saved to: \n\n\t"'+fname+'".'
-            showwarning(message=msg, title="Will not overwrite!")
-            # Try saving to their local copy
-            rv=self._taskParsObj.saveParList(filename=fname, comment=comment)
-
-        # Treat like a save-as (update title for ALL save ops)
-        self._saveAsPostSave_Hook(fname)
-
-        # Limit write privs if requested (only if not in the rc dir)
-        if set_ro and os.path.dirname(os.path.abspath(fname)) != \
-                                      os.path.abspath(self._rcDir):
-            cfgpars.checkSetReadOnly(fname)
-
-        # Before returning, make a copy so we know what was last saved.
-        # The dict() method returns a deep-copy dict of the keyvals.
-        self._lastSavedState = self._taskParsObj.dict()
-        return rv
-
-
-    def _saveAsPostSave_Hook(self, fnameToBeUsed_UNUSED):
-        """ Override this so we can update the title bar. """
-        self.updateTitle(self._taskParsObj.filename) # _taskParsObj is correct
-
-
-    def hasUnsavedChanges(self):
-        """ Determine if there are any edits in the GUI that have not yet been
-        saved (e.g. to a file). """
-
-        # Sanity check - this case shouldn't occur
-        assert self._lastSavedState != None, \
-               "BUG: Please report this as it should never occur."
-
-        # Force the current GUI values into our model in memory, but don't
-        # change anything.  Don't save to file, don't even convert bad
-        # values to their previous state in the gui.  Note that this can
-        # leave the GUI in a half-saved state, but since we are about to exit
-        # this is OK.  We only want prompting to occur if they decide to save.
-        badList = self.checkSetSaveEntries(doSave=False, fleeOnBadVals=True,
-                                           allowGuiChanges=False)
-        if badList:
-            return True
-
-        # Then compare our data to the last known saved state.  MAKE SURE
-        # the LHS is the actual dict (and not 'self') to invoke the dict
-        # comparison only.
-        return self._lastSavedState != self._taskParsObj
-
-
-    # Employ an edited callback for a given item?
-    def _defineEditedCallbackObjectFor(self, parScope, parName):
-        """ Override to allow us to use an edited callback. """
-
-        # We know that the _taskParsObj is a ConfigObjPars
-        triggerStrs = self._taskParsObj.getTriggerStrings(parScope, parName)
-
-        # Some items will have a trigger, but likely most won't
-        if triggerStrs and len(triggerStrs) > 0:
-            return self
-        else:
-            return None
-
-
-    def _nonStandardEparOptionFor(self, paramTypeStr):
-        """ Override to allow use of TealActionParButton.
-        Return None or a class which derives from EparOption. """
-
-        if paramTypeStr == 'z':
-            from . import teal_bttn
-            return teal_bttn.TealActionParButton
-        else:
-            return None
-
-
-    def updateTitle(self, atitle):
-        """ Override so we can append read-only status. """
-        if atitle and os.path.exists(atitle):
-            if _isInstalled(atitle):
-                atitle += '  [installed]'
-            elif not os.access(atitle, os.W_OK):
-                atitle += '  [read only]'
-        super(ConfigObjEparDialog, self).updateTitle(atitle)
-
-
-    def edited(self, scope, name, lastSavedVal, newVal, action):
-        """ This is the callback function invoked when an item is edited.
-            This is only called for those items which were previously
-            specified to use this mechanism.  We do not turn this on for
-            all items because the performance might be prohibitive.
-            This kicks off any previously registered triggers. """
-
-        # Get name(s) of any triggers that this par triggers
-        triggerNamesTup = self._taskParsObj.getTriggerStrings(scope, name)
-        assert triggerNamesTup != None and len(triggerNamesTup) > 0, \
-               'Empty trigger name for: "'+name+'", consult the .cfgspc file.'
-
-        # Loop through all trigger names - each one is a trigger to kick off -
-        # in the order that they appear in the tuple we got.  Most cases will
-        # probably only have a single trigger in the tuple.
-        for triggerName in triggerNamesTup:
-            # First handle the known/canned trigger names
-#           print (scope, name, newVal, action, triggerName) # DBG: debug line
-
-            # _section_switch_
-            if triggerName == '_section_switch_':
-                # Try to uniformly handle all possible par types here, not
-                # just boolean (e.g. str, int, float, etc.)
-                # Also, see logic in _BooleanMixin._coerceOneValue()
-                state = newVal not in self.FALSEVALS
-                self._toggleSectionActiveState(scope, state, (name,))
-                continue
-
-            # _2_section_switch_ (see notes above in _section_switch_)
-            if triggerName == '_2_section_switch_':
-                state = newVal not in self.FALSEVALS
-                # toggle most of 1st section (as usual) and ALL of next section
-                self._toggleSectionActiveState(scope, state, (name,))
-                # get first par of next section (fpons) - is a tuple
-                fpons = self.findNextSection(scope, name)
-                nextSectScope = fpons[0]
-                if nextSectScope:
-                    self._toggleSectionActiveState(nextSectScope, state, None)
-                continue
-
-            # Now handle rules with embedded code (eg. triggerName=='_rule1_')
-            if '_RULES_' in self._taskParsObj and \
-               triggerName in self._taskParsObj['_RULES_'].configspec:
-                # Get codeStr to execute it, but before we do so, check 'when' -
-                # make sure this is an action that is allowed to cause a trigger
-                ruleSig = self._taskParsObj['_RULES_'].configspec[triggerName]
-                chkArgsDict = vtor_checks.sigStrToKwArgsDict(ruleSig)
-                codeStr = chkArgsDict.get('code') # or None if didn't specify
-                when2run = chkArgsDict.get('when') # or None if didn't specify
-
-                greenlight = False # do we have a green light to eval the rule?
-                if when2run is None:
-                    greenlight = True # means run rule for any possible action
-                else: # 'when' was set to something so we need to check action
-                    # check value of action (poor man's enum)
-                    assert action in editpar.GROUP_ACTIONS, \
-                        "Unknown action: "+str(action)+', expected one of: '+ \
-                        str(editpar.GROUP_ACTIONS)
-                    # check value of 'when' (allow them to use comma-sep'd str)
-                    # (readers be aware that values must be those possible for
-                    #  'action', and 'always' is also allowed)
-                    whenlist = when2run.split(',')
-                    # warn for invalid values
-                    for w in whenlist:
-                        if not w in editpar.GROUP_ACTIONS and w != 'always':
-                           print('WARNING: skipping bad value for when kwd: "'+\
-                                  w+'" in trigger/rule: '+triggerName)
-                    # finally, do the correlation
-                    greenlight = 'always' in whenlist or action in whenlist
-
-                # SECURITY NOTE: because this part executes arbitrary code, that
-                # code string must always be found only in the configspec file,
-                # which is intended to only ever be root-installed w/ the pkg.
-                if codeStr:
-                    if not greenlight:
-                        continue # not an error, just skip this one
-                    self.showStatus("Evaluating "+triggerName+' ...') #dont keep
-                    self.top.update_idletasks() #allow msg to draw prior to exec
-                    # execute it and retrieve the outcome
-                    try:
-                        outval = execEmbCode(scope, name, newVal, self, codeStr)
-                    except Exception as ex:
-                        outval = 'ERROR in '+triggerName+': '+str(ex)
-                        print(outval)
-                        msg = outval+':\n'+('-'*99)+'\n'+traceback.format_exc()
-                        msg += 'CODE:  '+codeStr+'\n'+'-'*99+'\n'
-                        self.debug(msg)
-                        self.showStatus(outval, keep=1)
-
-                    # Leave this debug line in until it annoys someone
-                    msg = 'Value of "'+name+'" triggered "'+triggerName+'"'
-                    stroutval = str(outval)
-                    if len(stroutval) < 30: msg += '  -->  "'+stroutval+'"'
-                    self.showStatus(msg, keep=0)
-                    # Now that we have triggerName evaluated to outval, we need
-                    # to look through all the parameters and see if there are
-                    # any items to be affected by triggerName (e.g. '_rule1_')
-                    self._applyTriggerValue(triggerName, outval)
-                    continue
-
-            # If we get here, we have an unknown/unusable trigger
-            raise RuntimeError('Unknown trigger for: "'+name+'", named: "'+ \
-                  str(triggerName)+'".  Please consult the .cfgspc file.')
-
-
-    def findNextSection(self, scope, name):
-        """ Starts with given par (scope+name) and looks further down the list
-        of parameters until one of a different non-null scope is found.  Upon
-        success, returns the (scope, name) tuple, otherwise (None, None). """
-        # first find index of starting point
-        plist = self._taskParsObj.getParList()
-        start = 0
-        for i in range(len(plist)):
-            if scope == plist[i].scope and name == plist[i].name:
-                start = i
-                break
-        else:
-            print('WARNING: could not find starting par: '+scope+'.'+name)
-            return (None, None)
-
-        # now find first different (non-null) scope in a par, after start
-        for i in range(start, len(plist)):
-            if len(plist[i].scope) > 0 and plist[i].scope != scope:
-                return (plist[i].scope, plist[i].name)
-        # else didn't find it
-        return (None, None)
-
-
-    def _setTaskParsObj(self, theTask):
-        """ Overridden version for ConfigObj. theTask can be either
-            a .cfg file name or a ConfigObjPars object. """
-        # Create the ConfigObjPars obj
-        self._taskParsObj = cfgpars.getObjectFromTaskArg(theTask,
-                                    self._strict, False)
-        # Tell it that we can be used for catching debug lines
-        self._taskParsObj.setDebugLogger(self)
-
-        # Immediately make a copy of it's un-tampered internal dict.
-        # The dict() method returns a deep-copy dict of the keyvals.
-        self._lastSavedState = self._taskParsObj.dict()
-        # do this here ??!! or before _lastSavedState ??!!
-#       self._taskParsObj.strictUpdate(self._overrides)
-
-
-    def _getSaveAsFilter(self):
-        """ Return a string to be used as the filter arg to the save file
-            dialog during Save-As. """
-        # figure the dir to use, start with the one from the file
-        absRcDir = os.path.abspath(self._rcDir)
-        thedir = os.path.abspath(os.path.dirname(self._taskParsObj.filename))
-        # skip if not writeable, or if is _rcDir
-        if thedir == absRcDir or not os.access(thedir, os.W_OK):
-            thedir = os.path.abspath(os.path.curdir)
-        # create save-as filter string
-        filt = thedir+'/*.cfg'
-        envVarName = APP_NAME.upper()+'_CFG'
-        if envVarName in os.environ:
-            upx = os.environ[envVarName]
-            if len(upx) > 0:  filt = upx+"/*.cfg"
-        # done
-        return filt
-
-
-    def _getOpenChoices(self):
-        """ Go through all possible sites to find applicable .cfg files.
-            Return as an iterable. """
-        tsk = self._taskParsObj.getName()
-        taskFiles = set()
-        dirsSoFar = [] # this helps speed this up (skip unneeded globs)
-
-        # last dir
-        aDir = os.path.dirname(self._taskParsObj.filename)
-        if len(aDir) < 1: aDir = os.curdir
-        dirsSoFar.append(aDir)
-        taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
-
-        # current dir
-        aDir = os.getcwd()
-        if aDir not in dirsSoFar:
-            dirsSoFar.append(aDir)
-            taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
-
-        # task's python pkg dir (if tsk == python pkg name)
-        try:
-            x, pkgf = cfgpars.findCfgFileForPkg(tsk, '.cfg', taskName=tsk,
-                              pkgObj=self._taskParsObj.getAssocPkg())
-            taskFiles.update( (pkgf,) )
-        except cfgpars.NoCfgFileError:
-            pass # no big deal - maybe there is no python package
-
-        # user's own resourceDir
-        aDir = self._rcDir
-        if aDir not in dirsSoFar:
-            dirsSoFar.append(aDir)
-            taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
-
-        # extra loc - see if they used the app's env. var
-        aDir = dirsSoFar[0] # flag to skip this if no env var found
-        envVarName = APP_NAME.upper()+'_CFG'
-        if envVarName in os.environ: aDir = os.environ[envVarName]
-        if aDir not in dirsSoFar:
-            dirsSoFar.append(aDir)
-            taskFiles.update(cfgpars.getCfgFilesInDirForTask(aDir, tsk))
-
-        # At the very end, add an option which we will later interpret to mean
-        # to open the file dialog.
-        taskFiles = list(taskFiles) # so as to keep next item at end of seq
-        taskFiles.sort()
-        taskFiles.append("Other ...")
-
-        return taskFiles
-
-
-    # OPEN: load parameter settings from a user-specified file
-    def pfopen(self, event=None):
-        """ Load the parameter settings from a user-specified file. """
-
-        # Get the selected file name
-        fname = self._openMenuChoice.get()
-
-        # Also allow them to simply find any file - do not check _task_name_...
-        # (could use Tkinter's FileDialog, but this one is prettier)
-        if fname[-3:] == '...':
-            if capable.OF_TKFD_IN_EPAR:
-                fname = askopenfilename(title="Load Config File",
-                                        parent=self.top)
-            else:
-                from . import filedlg
-                fd = filedlg.PersistLoadFileDialog(self.top,
-                                                   "Load Config File",
-                                                   self._getSaveAsFilter())
-                if fd.Show() != 1:
-                    fd.DialogCleanup()
-                    return
-                fname = fd.GetFileName()
-                fd.DialogCleanup()
-
-        if not fname: return # canceled
-        self.debug('Loading from: '+fname)
-
-        # load it into a tmp object (use associatedPkg if we have one)
-        try:
-            tmpObj = cfgpars.ConfigObjPars(fname, associatedPkg=\
-                                           self._taskParsObj.getAssocPkg(),
-                                           strict=self._strict)
-        except Exception as ex:
-            showerror(message=ex.message, title='Error in '+os.path.basename(fname))
-            self.debug('Error in '+os.path.basename(fname))
-            self.debug(traceback.format_exc())
-            return
-
-        # check it to make sure it is a match
-        if not self._taskParsObj.isSameTaskAs(tmpObj):
-            msg = 'The current task is "'+self._taskParsObj.getName()+ \
-                  '", but the selected file is for task "'+ \
-                  str(tmpObj.getName())+'".  This file was not loaded.'
-            showerror(message=msg, title="Error in "+os.path.basename(fname))
-            self.debug(msg)
-            self.debug(traceback.format_exc())
-            return
-
-        # Set the GUI entries to these values (let the user Save after)
-        newParList = tmpObj.getParList()
-        try:
-            self.setAllEntriesFromParList(newParList, updateModel=True)
-                # go ahead and updateModel, even though it will take longer,
-                # we need it updated for the copy of the dict we make below
-        except editpar.UnfoundParamError as pe:
-            showwarning(message=str(pe), title="Error in "+os.path.basename(fname))
-        # trip any triggers
-        self.checkAllTriggers('fopen')
-
-        # This new fname is our current context
-        self.updateTitle(fname)
-        self._taskParsObj.filename = fname # !! maybe try setCurrentContext() ?
-        self.freshenFocus()
-        self.showStatus("Loaded values from: "+fname, keep=2)
-
-        # Since we are in a new context (and have made no changes yet), make
-        # a copy so we know what the last state was.
-        # The dict() method returns a deep-copy dict of the keyvals.
-        self._lastSavedState = self._taskParsObj.dict()
-
-
-    def unlearn(self, event=None):
-        """ Override this so that we can set to default values our way. """
-        self.debug('Clicked defaults')
-        self._setToDefaults()
-        self.freshenFocus()
-
-
-    def _handleParListMismatch(self, probStr, extra=False):
-        """ Override to include ConfigObj filename and specific errors.
-        Note that this only handles "missing" pars and "extra" pars, not
-        wrong-type pars.  So it isn't that big of a deal. """
-
-        # keep down the duplicate errors
-        if extra:
-            return True # the base class is already stating it will be ignored
-
-        # find the actual errors, and then add that to the generic message
-        errmsg = 'Warning: '
-        if self._strict:
-            errmsg = 'ERROR: '
-        errmsg = errmsg+'mismatch between default and current par lists ' + \
-                 'for task "'+self.taskName+'".'
-        if probStr:
-            errmsg += '\n\t'+probStr
-        errmsg += '\nTry editing/deleting: "' + \
-                  self._taskParsObj.filename+'" (or, if in PyRAF: "unlearn ' + \
-                  self.taskName+'").'
-        print(errmsg)
-        return True # as we said, not that big a deal
-
-
-    def _setToDefaults(self):
-        """ Load the default parameter settings into the GUI. """
-
-        # Create an empty object, where every item is set to it's default value
-        try:
-            tmpObj = cfgpars.ConfigObjPars(self._taskParsObj.filename,
-                                           associatedPkg=\
-                                           self._taskParsObj.getAssocPkg(),
-                                           setAllToDefaults=self.taskName,
-                                           strict=False)
-        except Exception as ex:
-            msg = "Error Determining Defaults"
-            showerror(message=msg+'\n\n'+ex.message, title="Error Determining Defaults")
-            return
-
-        # Set the GUI entries to these values (let the user Save after)
-        tmpObj.filename = self._taskParsObj.filename = '' # name it later
-        newParList = tmpObj.getParList()
-        try:
-            self.setAllEntriesFromParList(newParList) # needn't updateModel yet
-            self.checkAllTriggers('defaults')
-            self.updateTitle('')
-            self.showStatus("Loaded default "+self.taskName+" values via: "+ \
-                 os.path.basename(tmpObj._original_configspec), keep=1)
-        except editpar.UnfoundParamError as pe:
-            showerror(message=str(pe), title="Error Setting to Default Values")
-
-    def getDict(self):
-        """ Retrieve the current parameter settings from the GUI."""
-        # We are going to have to return the dict so let's
-        # first make sure all of our models are up to date with the values in
-        # the GUI right now.
-        badList = self.checkSetSaveEntries(doSave=False)
-        if badList:
-            self.processBadEntries(badList, self.taskName, canCancel=False)
-        return self._taskParsObj.dict()
-
-    def loadDict(self, theDict):
-        """ Load the parameter settings from a given dict into the GUI. """
-        # We are going to have to merge this info into ourselves so let's
-        # first make sure all of our models are up to date with the values in
-        # the GUI right now.
-        badList = self.checkSetSaveEntries(doSave=False)
-        if badList:
-            if not self.processBadEntries(badList, self.taskName):
-                return
-        # now, self._taskParsObj is up-to-date
-        # So now we update _taskParsObj with the input dict
-        cfgpars.mergeConfigObj(self._taskParsObj, theDict)
-        # now sync the _taskParsObj dict with its par list model
-        #    '\n'.join([str(jj) for jj in self._taskParsObj.getParList()])
-        self._taskParsObj.syncParamList(False)
-
-        # Set the GUI entries to these values (let the user Save after)
-        try:
-            self.setAllEntriesFromParList(self._taskParsObj.getParList(),
-                                          updateModel=True)
-            self.checkAllTriggers('fopen')
-            self.freshenFocus()
-            self.showStatus('Loaded '+str(len(theDict))+ \
-                ' user par values for: '+self.taskName, keep=1)
-        except Exception as ex:
-            showerror(message=ex.message, title="Error Setting to Loaded Values")
-
-
-    def _getGuiSettings(self):
-        """ Return a dict (ConfigObj) of all user settings found in rcFile. """
-        # Put the settings into a ConfigObj dict (don't use a config-spec)
-        rcFile = self._rcDir+os.sep+APP_NAME.lower()+'.cfg'
-        if os.path.exists(rcFile):
-            try:
-                return configobj.ConfigObj(rcFile)
-            except:
-                raise RuntimeError('Error parsing: '+os.path.realpath(rcFile))
-
-            # tho, for simple types, unrepr=True eliminates need for .cfgspc
-            # also, if we turn unrepr on, we don't need cfgGetBool
-        else:
-            return {}
-
-
-    def _saveGuiSettings(self):
-        """ The base class doesn't implement this, so we will - save settings
-        (only GUI stuff, not task related) to a file. """
-        # Put the settings into a ConfigObj dict (don't use a config-spec)
-        rcFile = self._rcDir+os.sep+APP_NAME.lower()+'.cfg'
-        #
-        if os.path.exists(rcFile): os.remove(rcFile)
-        co = configobj.ConfigObj(rcFile) # can skip try-block, won't read file
-
-        co['showHelpInBrowser']       = self._showHelpInBrowser
-        co['saveAndCloseOnExec']      = self._saveAndCloseOnExec
-        co['writeProtectOnSaveAsOpt'] = self._writeProtectOnSaveAs
-        co['flagNonDefaultVals']      = self._flagNonDefaultVals
-        co['frameColor']              = self._frmeColor
-        co['taskBoxColor']            = self._taskColor
-        co['buttonBoxColor']          = self._bboxColor
-        co['entriesColor']            = self._entsColor
-        co['flaggedColor']            = self._flagColor
-
-        co.initial_comment = ['Automatically generated by '+\
-            APP_NAME+'.  All edits will eventually be overwritten.']
-        co.initial_comment.append('To use platform default colors, delete each color line below.')
-        co.final_comment = [''] # ensure \n at EOF
-        co.write()
-
-
-    def _applyTriggerValue(self, triggerName, outval):
-        """ Here we look through the entire .cfgspc to see if any parameters
-        are affected by this trigger. For those that are, we apply the action
-        to the GUI widget.  The action is specified by depType. """
-
-        # First find which items are dependent upon this trigger (cached)
-        # e.g. { scope1.name1 : dep'cy-type, scope2.name2 : dep'cy-type, ... }
-        depParsDict = self._taskParsObj.getParsWhoDependOn(triggerName)
-        if not depParsDict: return
-        if 0: print("Dependent parameters:\n"+str(depParsDict)+"\n")
-
-        # Get model data, the list of pars
-        theParamList = self._taskParsObj.getParList()
-
-        # Then go through the dependent pars and apply the trigger to them
-        settingMsg = ''
-        for absName in depParsDict:
-            used = False
-            # For each dep par, loop to find the widget for that scope.name
-            for i in range(self.numParams):
-                scopedName = theParamList[i].scope+'.'+theParamList[i].name # diff from makeFullName!!
-                if absName == scopedName: # a match was found
-                    depType = depParsDict[absName]
-                    if depType == 'active_if':
-                        self.entryNo[i].setActiveState(outval)
-                    elif depType == 'inactive_if':
-                        self.entryNo[i].setActiveState(not outval)
-                    elif depType == 'is_set_by':
-                        self.entryNo[i].forceValue(outval, noteEdited=True)
-                        # WARNING! noteEdited=True may start recursion!
-                        if len(settingMsg) > 0: settingMsg += ", "
-                        settingMsg += '"'+theParamList[i].name+'" to "'+\
-                                      outval+'"'
-                    elif depType in ('set_yes_if', 'set_no_if'):
-                        if bool(outval):
-                            newval = 'yes'
-                            if depType == 'set_no_if': newval = 'no'
-                            self.entryNo[i].forceValue(newval, noteEdited=True)
-                            # WARNING! noteEdited=True may start recursion!
-                            if len(settingMsg) > 0: settingMsg += ", "
-                            settingMsg += '"'+theParamList[i].name+'" to "'+\
-                                          newval+'"'
-                        else:
-                            if len(settingMsg) > 0: settingMsg += ", "
-                            settingMsg += '"'+theParamList[i].name+\
-                                          '" (no change)'
-                    elif depType == 'is_disabled_by':
-                        # this one is only used with boolean types
-                        on = self.entryNo[i].convertToNative(outval)
-                        if on:
-                            # do not activate whole section or change
-                            # any values, only activate this one
-                            self.entryNo[i].setActiveState(True)
-                        else:
-                            # for off, set the bool par AND grey WHOLE section
-                            self.entryNo[i].forceValue(outval, noteEdited=True)
-                            self.entryNo[i].setActiveState(False)
-                            # we'd need this if the par had no _section_switch_
-#                           self._toggleSectionActiveState(
-#                                theParamList[i].scope, False, None)
-                            if len(settingMsg) > 0: settingMsg += ", "
-                            settingMsg += '"'+theParamList[i].name+'" to "'+\
-                                          outval+'"'
-                    else:
-                        raise RuntimeError('Unknown dependency: "'+depType+ \
-                                           '" for par: "'+scopedName+'"')
-                    used = True
-                    break
-
-            # Or maybe it is a whole section
-            if absName.endswith('._section_'):
-                scope = absName[:-10]
-                depType = depParsDict[absName]
-                if depType == 'active_if':
-                    self._toggleSectionActiveState(scope, outval, None)
-                elif depType == 'inactive_if':
-                    self._toggleSectionActiveState(scope, not outval, None)
-                used = True
-
-            # Help to debug the .cfgspc rules
-            if not used:
-                raise RuntimeError('UNUSED "'+triggerName+'" dependency: '+ \
-                      str({absName:depParsDict[absName]}))
-
-        if len(settingMsg) > 0:
-# why ?!    self.freshenFocus()
-            self.showStatus('Automatically set '+settingMsg, keep=1)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/teal_bttn.py b/required_pkgs/stsci.tools/lib/stsci/tools/teal_bttn.py
deleted file mode 100644
index f7f88b1..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/teal_bttn.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""teal_bttn.py: for defining the action "parameter" button widget
-   to be used in TEAL.
-
-$Id: teal_bttn.py 38909 2015-04-08 17:41:07Z bsimon $
-"""
-from __future__ import division, print_function # confidence high
-
-import traceback
-from . import eparoption, vtor_checks
-
-class TealActionParButton(eparoption.ActionEparButton):
-
-    def getButtonLabel(self):
-        """ Return string to be used on as button label - "value" of par. """
-        # If the value has a comma, return the 2nd part, else use whole thing
-        return self.value.split(',')[-1].strip()
-
-    def getShowName(self):
-        """ Return string to be used on LHS of button - "name" of par. """
-        # If the value has a comma, return the 1st part, else leave empty
-        if self.value.find(',') >= 0:
-            return self.value.split(',')[0]
-        else:
-            return ''
-
-    def flagThisPar(self, currentVal, force):
-        """ Override this to do nothing - the value of this par will
-        never be wrong and thus never need to be flagged. """
-        pass
-
-    def clicked(self):
-        """ Called when this button is clicked. Execute code from .cfgspc """
-        try:
-            from . import teal
-        except:
-            teal = None
-        try:
-            # start drilling down into the tpo to get the code
-            tealGui = self._mainGuiObj
-            tealGui.showStatus('Clicked "'+self.getButtonLabel()+'"', keep=1)
-            pscope = self.paramInfo.scope
-            pname = self.paramInfo.name
-            tpo = tealGui._taskParsObj
-            tup = tpo.getExecuteStrings(pscope, pname)
-            code = ''
-            if not tup:
-                if teal:
-                    teal.popUpErr(tealGui.top, "No action to perform",
-                                  "Action Button Error")
-                return
-            for exname in tup:
-                if '_RULES_' in tpo and exname in tpo['_RULES_'].configspec:
-                    ruleSig = tpo['_RULES_'].configspec[exname]
-                    chkArgsDict = vtor_checks.sigStrToKwArgsDict(ruleSig)
-                    code = chkArgsDict.get('code') # a string or None
-                    # now go ahead and execute it
-                    teal.execEmbCode(pscope, pname, self.getButtonLabel(),
-                                     tealGui, code)
-            # done
-            tealGui.debug('Finished: "'+self.getButtonLabel()+'"')
-        except Exception as ex:
-            msg = 'Error executing: "'+self.getButtonLabel()+'"\n'+ex.message
-            msgFull = msg+'\n'+''.join(traceback.format_exc())
-            msgFull+= "CODE:\n"+code
-            if tealGui:
-                if teal: teal.popUpErr(tealGui.top, msg, "Action Button Error")
-                tealGui.debug(msgFull)
-            else:
-                if teal: teal.popUpErr(None, msg, "Action Button Error")
-                print(msgFull)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/tester.py b/required_pkgs/stsci.tools/lib/stsci/tools/tester.py
deleted file mode 100644
index 88df535..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/tester.py
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Package: stsci.tools
-Author: Christopher Hanley
-
-Purpose:
-========
-Provide driver function for package tests.
-
-Dependencies:
-=============
-
-- nose 0.10.4 or greater.
-
-Usage Example:
-==============
-All packages will need to import stsci.tools.tester and add the following
-function to the __init__.py of their package:
-
-import stsci.tools.tester
-def test(*args,**kwds):
-    return stsci.tools.tester.test(modname=__name__, *args, **kwds)
-
-
-This assumes that all software packages are installed with the structure:
-
-package/
-    __init__.py
-    modules.py
-    tests/
-    tests/__init__.py
-    tests/test_whatever.py
-
-Where the /tests subdirectory containts the python files that nose will
-recognize as tests.
-
-"""
-
-from __future__ import division, print_function
-
-import os
-import os.path
-import sys
-
-pytools_tester_active = False
-
-def test(modname, mode='nose', *args, **kwds):
-    """
-    Purpose:
-    ========
-    test: Run refcore nosetest suite of tests. The tests are located in the
-    tests/ directory of the installed modules.
-
-    """
-
-    global pytools_tester_active
-
-    if modname is not None :
-        curdir = sys.modules[modname].__file__
-        curdir = os.path.abspath(curdir)
-        curdir = os.path.dirname(curdir)
-    else:
-        raise ValueError('name of module to test not given')
-
-    DIRS = [os.path.join(curdir, testdir) for testdir in ['tests', 'test']]
-
-    dirname = None
-    for x in DIRS:
-        if os.path.isdir(x) :
-            dirname = x
-            break
-
-    if dirname is None :
-            print('no tests found in: %s' % repr(DIRS))
-            return False
-
-    if mode == 'nose' :
-
-        print("Testing with nose in %s\n"%dirname)
-        try:
-            import nose
-        except ImportError:
-            print("Nose 0.10.4 or greater is required for running tests.")
-            raise
-
-        # First arg is blank, since it's skipped by nose
-        # --exe is needed because easy_install sets all .py files as executable for
-        # some reason
-        args = ['', '--exe', '-w', dirname ]
-
-        result = False
-
-        try :
-            pytools_tester_active = True
-            result = nose.run(argv=args)
-        except :
-            pytools_tester_active = False
-            raise
-        pytools_tester_active = False
-
-        return result
-
-    if mode == 'pytest' :
-
-        print("Testing with pytest in %s\n"%dirname)
-
-        try :
-            import pytest
-        except ImportError :
-            print("py.test is required for running tests")
-            raise
-
-        # do not use --doctest-modules ; it doesn't work right
-        args = [ dirname ]
-
-        try :
-            import pandokia
-            args = ['-p', 'pandokia.helpers.pytest_plugin' ] + args
-        except ImportError :
-            pass
-
-        result = False
-
-        try :
-            pytools_tester_active = True
-            result = pytest.main(args)
-        except :
-            pytools_tester_active = False
-            raise
-        pytools_tester_active = False
-
-        return result
-
-    raise ValueError("invalid test specification - mode must be one of 'nose' or 'pytest'")
-
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/tests/__init__.py b/required_pkgs/stsci.tools/lib/stsci/tools/tests/__init__.py
deleted file mode 100644
index b06eaf1..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from __future__ import division
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/tests/cdva2.fits b/required_pkgs/stsci.tools/lib/stsci/tools/tests/cdva2.fits
deleted file mode 100644
index 1895ba5..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/tests/cdva2.fits
+++ /dev/null
@@ -1 +0,0 @@
-SIMPLE  =                    T / conforms to FITS standard                      BITPIX  =                   32 / array data type                                NAXIS   =                    0 / number of array dimensions                     EXTEND  =                    T                                                  PIXVALUE=                    1 / Constant Pixel Value                           NPIX1   =                   10 / length of constant array axis 1                NPIX2   =    [...]
\ No newline at end of file
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/tests/o4sp040b0_raw.fits b/required_pkgs/stsci.tools/lib/stsci/tools/tests/o4sp040b0_raw.fits
deleted file mode 100644
index c0da848..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/tests/o4sp040b0_raw.fits
+++ /dev/null
@@ -1 +0,0 @@
-SIMPLE  =                    T / Fits standard                                  BITPIX  =                   16 / Bits per pixel                                 NAXIS   =                    0 / Number of axes                                 EXTEND  =                    T / File may contain extensions                    ORIGIN  = 'NOAO-IRAF FITS Image Kernel July 2003' / FITS file originator        IRAF-TLM= '14:58:02 (23/02/2007)' / Time of last modification                   NEXTEND =    [...]
\ No newline at end of file
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/tests/testStpyfits.py b/required_pkgs/stsci.tools/lib/stsci/tools/tests/testStpyfits.py
deleted file mode 100644
index a053e74..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/tests/testStpyfits.py
+++ /dev/null
@@ -1,700 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division  # confidence high
-
-import os
-import tempfile
-
-import numpy as np
-from nose.tools import assert_true, assert_false, assert_equal, assert_raises
-
-import stsci.tools.stpyfits as stpyfits
-#import pyfits
-from astropy.io import fits
-#from pyfits.tests import PyfitsTestCase
-from astropy.io.fits.tests import FitsTestCase
-
-
-class TestStpyfitsFunctions(FitsTestCase):
-    def setup(self):
-        self.data_dir = os.path.dirname(__file__)
-        self.temp_dir = tempfile.mkdtemp(prefix='stpyfits-test-')
-
-    def testInfoConvienceFunction(self):
-        """Test the info convience function in both the fits and stpyfits
-           namespace."""
-
-        assert_equal(
-            stpyfits.info(self.data('o4sp040b0_raw.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 215, (), '', ''),
-             (1, 'SCI', 'ImageHDU', 141, (62, 44), 'int16', ''),
-             (2, 'ERR', 'ImageHDU', 71, (62, 44), 'int16', ''),
-             (3, 'DQ', 'ImageHDU', 71, (62, 44), 'int16', ''),
-             (4, 'SCI', 'ImageHDU', 141, (62, 44), 'int16', ''),
-             (5, 'ERR', 'ImageHDU', 71, (62, 44), 'int16', ''),
-             (6, 'DQ', 'ImageHDU', 71, (62, 44), 'int16', '')])
-
-
-        assert_equal(
-            fits.info(self.data('o4sp040b0_raw.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 215, (), '', ''),
-             (1, 'SCI', 'ImageHDU', 141, (62, 44), 'int16', ''),
-             (2, 'ERR', 'ImageHDU', 71, (), '', ''),
-             (3, 'DQ', 'ImageHDU', 71, (), '', ''),
-             (4, 'SCI', 'ImageHDU', 141, (62, 44), 'int16', ''),
-             (5, 'ERR', 'ImageHDU', 71, (), '', ''),
-             (6, 'DQ', 'ImageHDU', 71, (), '', '')])
-
-        assert_equal(
-            stpyfits.info(self.data('cdva2.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', '')])
-
-        assert_equal(
-            fits.info(self.data('cdva2.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (), '', '')])
-
-
-    def testOpenConvienceFunction(self):
-        """Test the open convience function in both the fits and stpyfits
-           namespace."""
-
-        hdul = stpyfits.open(self.data('cdva2.fits'))
-        hdul1 = fits.open(self.data('cdva2.fits'))
-
-        assert_equal(hdul[0].header['NAXIS'], 2)
-        assert_equal(hdul1[0].header['NAXIS'], 0)
-        assert_equal(hdul[0].header['NAXIS1'], 10)
-        assert_equal(hdul[0].header['NAXIS2'], 10)
-
-        assert_raises(KeyError, lambda: hdul1[0].header['NAXIS1'])
-        assert_raises(KeyError, lambda: hdul1[0].header['NAXIS2'])
-        assert_raises(KeyError, lambda: hdul[0].header['NPIX1'])
-        assert_raises(KeyError, lambda: hdul[0].header['NPIX2'])
-
-        assert_equal(hdul1[0].header['NPIX1'], 10)
-        assert_equal(hdul1[0].header['NPIX2'], 10)
-
-        assert_true((hdul[0].data == np.ones((10, 10), dtype=np.int32)).all())
-
-        assert_equal(hdul1[0].data, None)
-
-        hdul.close()
-        hdul1.close()
-
-    def testGetHeaderConvienceFunction(self):
-        """Test the getheader convience function in both the fits and
-           stpyfits namespace."""
-
-        hd = stpyfits.getheader(self.data('cdva2.fits'))
-        hd1 = fits.getheader(self.data('cdva2.fits'))
-
-        assert_equal(hd['NAXIS'], 2)
-        assert_equal(hd1['NAXIS'], 0)
-        assert_equal(hd['NAXIS1'], 10)
-        assert_equal(hd['NAXIS2'], 10)
-
-        assert_raises(KeyError, lambda: hd1['NAXIS1'])
-        assert_raises(KeyError, lambda: hd1['NAXIS2'])
-        assert_raises(KeyError, lambda: hd['NPIX1'])
-        assert_raises(KeyError, lambda: hd['NPIX2'])
-
-        assert_equal(hd1['NPIX1'], 10)
-        assert_equal(hd1['NPIX2'], 10)
-
-        hd = stpyfits.getheader(self.data('o4sp040b0_raw.fits'), 2)
-        hd1 = fits.getheader(self.data('o4sp040b0_raw.fits'), 2)
-
-        assert_equal(hd['NAXIS'], 2)
-        assert_equal(hd1['NAXIS'], 0)
-        assert_equal(hd['NAXIS1'], 62)
-        assert_equal(hd['NAXIS2'], 44)
-
-        assert_raises(KeyError, lambda: hd1['NAXIS1'])
-        assert_raises(KeyError, lambda: hd1['NAXIS2'])
-        assert_raises(KeyError, lambda: hd['NPIX1'])
-        assert_raises(KeyError, lambda: hd['NPIX2'])
-
-        assert_equal(hd1['NPIX1'], 62)
-        assert_equal(hd1['NPIX2'], 44)
-
-    def testGetDataConvienceFunction(self):
-        """Test the getdata convience function in both the fits and
-           stpyfits namespace."""
-
-        d = stpyfits.getdata(self.data('cdva2.fits'))
-        assert_true((d == np.ones((10, 10), dtype=np.int32)).all())
-
-        assert_raises(IndexError, fits.getdata, self.data('cdva2.fits'))
-
-    def testGetValConvienceFunction(self):
-        """Test the getval convience function in both the fits and
-           stpyfits namespace."""
-
-        val = stpyfits.getval(self.data('cdva2.fits'), 'NAXIS', 0)
-        val1 = fits.getval(self.data('cdva2.fits'), 'NAXIS', 0)
-        assert_equal(val, 2)
-        assert_equal(val1, 0)
-
-    def testwritetoConvienceFunction(self):
-        """Test the writeto convience function in both the fits and stpyfits
-           namespace."""
-
-        hdul = stpyfits.open(self.data('cdva2.fits'))
-        hdul1 = fits.open(self.data('cdva2.fits'))
-
-        header = hdul[0].header.copy()
-        header['NAXIS'] = 0
-
-        stpyfits.writeto(self.temp('new.fits'), hdul[0].data, header,
-                         clobber=True)
-        fits.writeto(self.temp('new1.fits'), hdul1[0].data,hdul1[0].header,
-                     clobber=True)
-
-        hdul.close()
-        hdul1.close()
-
-        info1 = fits.info(self.temp('new.fits'), output=False)
-        info2 = stpyfits.info(self.temp('new.fits'), output=False)
-        info3 = fits.info(self.temp('new1.fits'), output=False)
-        info4 = stpyfits.info(self.temp('new1.fits'), output=False)
-
-        assert_equal(info1, [(0, 'PRIMARY', 'PrimaryHDU', 6, (), '', '')])
-        assert_equal(info2,
-            [(0, 'PRIMARY', 'PrimaryHDU', 6, (10, 10), 'int32', '')])
-        assert_equal(info3, [(0, 'PRIMARY', 'PrimaryHDU', 6, (), '', '')])
-        assert_equal(info4,
-            [(0, 'PRIMARY', 'PrimaryHDU', 6, (10, 10), 'uint8', '')])
-
-    def testappendConvienceFunction(self):
-        """Test the append convience function in both the fits and stpyfits
-           namespace."""
-
-        hdul = stpyfits.open(self.data('cdva2.fits'))
-        hdul1 = fits.open(self.data('cdva2.fits'))
-
-        stpyfits.writeto(self.temp('new.fits'), hdul[0].data, hdul[0].header,
-                         clobber=True)
-        fits.writeto(self.temp('new1.fits'), hdul1[0].data, hdul1[0].header,
-                       clobber=True)
-
-        hdu = stpyfits.ImageHDU()
-        hdu1 = fits.ImageHDU()
-
-        hdu.data = hdul[0].data
-        hdu1.data = hdul1[0].data
-        hdu.header.set('BITPIX', 32)
-        hdu1.header.set('BITPIX', 32)
-        hdu.header.set('NAXIS', 2)
-        hdu.header.set('NAXIS1', 10, 'length of constant array axis 1',
-                       after='NAXIS')
-        hdu.header.set('NAXIS2', 10, 'length of constant array axis 2',
-                       after='NAXIS1')
-        hdu.header.set('PIXVALUE', 1, 'Constant pixel value', after='GCOUNT')
-        hdu1.header.set('PIXVALUE', 1, 'Constant pixel value', after='GCOUNT')
-        hdu1.header.set('NPIX1', 10, 'length of constant array axis 1',
-                        after='GCOUNT')
-        hdu1.header.set('NPIX2', 10, 'length of constant array axis 2',
-                        after='NPIX1')
-        stpyfits.append(self.temp('new.fits'), hdu.data, hdu.header)
-        fits.append(self.temp('new1.fits'), hdu1.data, hdu1.header)
-
-        assert_equal(stpyfits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', ''),
-             (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')])
-        assert_equal(stpyfits.info(self.temp('new1.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'uint8', ''),
-             (1, '', 'ImageHDU', 8, (10, 10), 'uint8', '')])
-        assert_equal(fits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', ''),
-             (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')])
-        assert_equal(fits.info(self.temp('new1.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (), '', ''),
-             (1, '', 'ImageHDU', 8, (), '', '')])
-
-        hdul5 = stpyfits.open(self.temp('new.fits'))
-        hdul6 = fits.open(self.temp('new1.fits'))
-        assert_equal(hdul5[1].header['NAXIS'], 2)
-        assert_equal(hdul6[1].header['NAXIS'], 0)
-        assert_equal(hdul5[1].header['NAXIS1'], 10)
-        assert_equal(hdul5[1].header['NAXIS2'], 10)
-
-        assert_raises(KeyError, lambda: hdul6[1].header['NAXIS1'])
-        assert_raises(KeyError, lambda: hdul6[1].header['NAXIS2'])
-        assert_raises(KeyError, lambda: hdul5[1].header['NPIX1'])
-        assert_raises(KeyError, lambda: hdul5[1].header['NPIX2'])
-
-        assert_equal(hdul6[1].header['NPIX1'], 10)
-        assert_equal(hdul6[1].header['NPIX2'], 10)
-
-        assert_true((hdul5[1].data == np.ones((10, 10), dtype=np.int32)).all())
-
-        assert_equal(hdul6[1].data, None)
-
-        hdul5.close()
-        hdul6.close()
-        hdul.close()
-        hdul1.close()
-
-    def testupdateConvienceFunction(self):
-        """Test the update convience function in both the fits and stpyfits
-           namespace."""
-
-        hdul = stpyfits.open(self.data('cdva2.fits'))
-        hdul1 = fits.open(self.data('cdva2.fits'))
-
-        header = hdul[0].header.copy()
-        header['NAXIS'] = 0
-        stpyfits.writeto(self.temp('new.fits'), hdul[0].data, header,
-                         clobber=True)
-
-        hdu = stpyfits.ImageHDU()
-        hdu1 = fits.ImageHDU()
-
-        hdu.data = hdul[0].data
-        hdu1.data = hdul1[0].data
-        hdu.header.set('BITPIX', 32)
-        hdu1.header.set('BITPIX', 32)
-        hdu.header.set('NAXIS', 0)
-        hdu.header.set('PIXVALUE', 1, 'Constant pixel value', after='GCOUNT')
-        hdu1.header.set('PIXVALUE', 1, 'Constant pixel value', after='GCOUNT')
-        hdu.header.set('NPIX1', 10, 'length of constant array axis 1',
-                       after='GCOUNT')
-        hdu.header.set('NPIX2', 10, 'length of constant array axis 2',
-                       after='NPIX1')
-        stpyfits.append(self.temp('new.fits'), hdu.data, hdu.header)
-
-        d = hdu.data * 0
-
-        stpyfits.update(self.temp('new.fits'), d, hdu.header, 1)
-
-        assert_equal(fits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (), '', ''),
-             (1, '', 'ImageHDU', 8, (), '', '')])
-        assert_equal(stpyfits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', ''),
-             (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')])
-
-        hdul7 = stpyfits.open(self.temp('new.fits'))
-        assert_equal(hdul7[1].header['NAXIS'], 2)
-        assert_equal(hdul7[1].header['NAXIS1'], 10)
-        assert_equal(hdul7[1].header['NAXIS2'], 10)
-        assert_equal(hdul7[1].header['PIXVALUE'], 0)
-
-        assert_raises(KeyError, lambda: hdul7[1].header['NPIX1'])
-        assert_raises(KeyError, lambda: hdul7[1].header['NPIX2'])
-
-        assert_true((hdul7[1].data ==
-                     np.zeros((10, 10), dtype=np.int32)).all())
-
-        hdul8 = fits.open(self.temp('new.fits'))
-        assert_equal(hdul8[1].header['NAXIS'], 0)
-        assert_equal(hdul8[1].header['NPIX1'], 10)
-        assert_equal(hdul8[1].header['NPIX2'], 10)
-        assert_equal(hdul8[1].header['PIXVALUE'], 0)
-
-        assert_raises(KeyError, lambda: hdul8[1].header['NAXIS1'])
-        assert_raises(KeyError, lambda: hdul8[1].header['NAXIS2'])
-
-        assert_equal(hdul8[1].data, None)
-
-        hdul7.close()
-        hdul8.close()
-        hdul.close()
-        hdul1.close()
-
-    def testImageHDUConstructor(self):
-        """Test the ImageHDU constructor in both the fits and stpyfits
-           namespace."""
-
-        hdu = stpyfits.ImageHDU()
-        assert_true(isinstance(hdu, stpyfits.ConstantValueImageHDU))
-        hdu1 = fits.ImageHDU()
-        assert_true(isinstance(hdu, fits.ImageHDU))
-
-    def testPrimaryHDUConstructor(self):
-        """Test the PrimaryHDU constructor in both the fits and stpyfits
-           namespace.  Although stpyfits does not reimplement the
-           constructor, it does add _ConstantValueImageBaseHDU to the
-           inheritance hierarchy of fits.PrimaryHDU when accessed through the
-           stpyfits namespace.  This method tests that that inheritance is
-           working"""
-
-        n = np.zeros(10)
-        n = n + 1
-
-        hdu = stpyfits.PrimaryHDU(n)
-        hdu.header.set('PIXVALUE', 1.0, 'Constant pixel value', after='EXTEND')
-        hdu.header.set('NAXIS', 0)
-        stpyfits.writeto(self.temp('new.fits'), hdu.data, hdu.header,
-                         clobber=True)
-        hdul = stpyfits.open(self.temp('new.fits'))
-        hdul1 = fits.open(self.temp('new.fits'))
-
-        assert_equal(hdul[0].header['NAXIS'], 1)
-        assert_equal(hdul[0].header['NAXIS1'], 10)
-        assert_equal(hdul[0].header['PIXVALUE'], 1.0)
-
-        assert_raises(KeyError, lambda: hdul[0].header['NPIX1'])
-
-        assert_true((hdul[0].data == np.ones(10, dtype=np.float32)).all())
-
-        assert_equal(hdul1[0].header['NAXIS'], 0)
-        assert_equal(hdul1[0].header['NPIX1'], 10)
-        assert_equal(hdul1[0].header['PIXVALUE'], 1.0)
-
-        assert_raises(KeyError, lambda: hdul1[0].header['NAXIS1'])
-
-        assert_equal(hdul1[0].data, None)
-
-        hdul.close()
-        hdul1.close()
-
-    def testHDUListWritetoMethod(self):
-        """Test the writeto method of HDUList in both the fits and stpyfits
-           namespace."""
-
-        hdu = stpyfits.PrimaryHDU()
-        hdu1 = stpyfits.ImageHDU()
-        hdu.data = np.zeros((10, 10), dtype=np.int32)
-        hdu1.data = hdu.data + 2
-        hdu.header.set('BITPIX', 32)
-        hdu1.header.set('BITPIX', 32)
-        hdu.header.set('NAXIS', 2)
-        hdu.header.set('NAXIS1', 10, 'length of constant array axis 1',
-                       after='NAXIS')
-        hdu.header.set('NAXIS2', 10, 'length of constant array axis 2',
-                       after='NAXIS1')
-        hdu.header.set('PIXVALUE', 0, 'Constant pixel value')
-        hdu1.header.set('PIXVALUE', 2, 'Constant pixel value', after='GCOUNT')
-        hdu1.header.set('NAXIS', 2)
-        hdu1.header.set('NAXIS1', 10, 'length of constant array axis 1',
-                        after='NAXIS')
-        hdu1.header.set('NAXIS2', 10, 'length of constant array axis 2',
-                        after='NAXIS1')
-        hdul = stpyfits.HDUList([hdu,hdu1])
-        hdul.writeto(self.temp('new.fits'), clobber=True)
-
-        assert_equal(stpyfits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (10, 10), 'int32', ''),
-             (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')])
-
-        assert_equal(fits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 7, (), '', ''),
-             (1, '', 'ImageHDU', 8, (), '', '')])
-
-        hdul1 = stpyfits.open(self.temp('new.fits'))
-        hdul2 = fits.open(self.temp('new.fits'))
-
-        assert_equal(hdul1[0].header['NAXIS'], 2)
-        assert_equal(hdul1[0].header['NAXIS1'], 10)
-        assert_equal(hdul1[0].header['NAXIS2'], 10)
-        assert_equal(hdul1[0].header['PIXVALUE'], 0)
-
-        assert_raises(KeyError, lambda: hdul1[0].header['NPIX1'])
-        assert_raises(KeyError, lambda: hdul1[0].header['NPIX2'])
-
-        assert_true((hdul1[0].data ==
-                     np.zeros((10, 10), dtype=np.int32)).all())
-
-        assert_equal(hdul1[1].header['NAXIS'], 2)
-        assert_equal(hdul1[1].header['NAXIS1'], 10)
-        assert_equal(hdul1[1].header['NAXIS2'], 10)
-        assert_equal(hdul1[1].header['PIXVALUE'], 2)
-
-        assert_raises(KeyError, lambda: hdul1[1].header['NPIX1'])
-        assert_raises(KeyError, lambda: hdul1[1].header['NPIX2'])
-
-        assert_true((hdul1[1].data ==
-                     (np.zeros((10, 10), dtype=np.int32) + 2)).all())
-
-        assert_equal(hdul2[0].header['NAXIS'], 0)
-        assert_equal(hdul2[0].header['NPIX1'], 10)
-        assert_equal(hdul2[0].header['NPIX2'], 10)
-        assert_equal(hdul2[0].header['PIXVALUE'], 0)
-
-        assert_raises(KeyError, lambda: hdul2[0].header['NAXIS1'])
-        assert_raises(KeyError, lambda: hdul2[0].header['NAXIS2'])
-
-        assert_equal(hdul2[0].data, None)
-
-        assert_equal(hdul2[1].header['NAXIS'], 0)
-        assert_equal(hdul2[1].header['NPIX1'], 10)
-        assert_equal(hdul2[1].header['NPIX2'], 10)
-        assert_equal(hdul2[1].header['PIXVALUE'], 2)
-
-        assert_raises(KeyError, lambda: hdul2[1].header['NAXIS1'])
-        assert_raises(KeyError, lambda: hdul2[1].header['NAXIS2'])
-
-        hdul1.close()
-        hdul2.close()
-
-    def testHDUList__getitem__Method(self):
-        """Test the __getitem__ method of st_HDUList in the stpyfits
-           namespace."""
-
-        n = np.zeros(10)
-        n = n + 1
-
-        hdu = stpyfits.PrimaryHDU(n)
-        hdu.header.set('PIXVALUE', 1., 'constant pixel value', after='EXTEND')
-
-        hdu.writeto(self.temp('new.fits'), clobber=True)
-
-        hdul = stpyfits.open(self.temp('new.fits'))
-        hdul1 = fits.open(self.temp('new.fits'))
-
-        hdu = hdul[0]
-        hdu1 = hdul1[0]
-
-        assert_equal(hdu.header['NAXIS'], 1)
-        assert_equal(hdu.header['NAXIS1'], 10)
-        assert_equal(hdu.header['PIXVALUE'], 1.0)
-
-        assert_raises(KeyError, lambda: hdu.header['NPIX1'])
-
-        assert_true((hdu.data == np.ones(10, dtype=np.float32)).all())
-        assert_equal(hdu1.header['NAXIS'], 0)
-        assert_equal(hdu1.header['NPIX1'], 10)
-        assert_equal(hdu1.header['PIXVALUE'], 1.0)
-
-        assert_raises(KeyError, lambda: hdu1.header['NAXIS1'])
-
-        assert_equal(hdu1.data, None)
-
-        hdul.close()
-        hdul1.close()
-
-    def testHDUListFlushMethod(self):
-        """Test the flush method of HDUList in both the fits and stpyfits
-           namespace."""
-
-        hdu = stpyfits.PrimaryHDU()
-        hdu1 = stpyfits.ImageHDU()
-        hdu.data = np.zeros((10, 10), dtype=np.int32)
-        hdu1.data = hdu.data + 2
-        hdu.header.set('BITPIX', 32)
-        hdu1.header.set('BITPIX', 32)
-        hdu.header.set('NAXIS', 2)
-        hdu.header.set('NAXIS1', 10, 'length of constant array axis 1',
-                       after='NAXIS')
-        hdu.header.set('NAXIS2', 10, 'length of constant array axis 2',
-                       after='NAXIS1')
-        hdu.header.set('PIXVALUE', 0, 'Constant pixel value')
-        hdu1.header.set('PIXVALUE', 2, 'Constant pixel value', after='GCOUNT')
-        hdu1.header.set('NAXIS', 2)
-        hdu1.header.set('NAXIS1', 10, 'length of constant array axis 1',
-                        after='NAXIS')
-        hdu1.header.set('NAXIS2', 10, 'length of constant array axis 2',
-                        after='NAXIS1')
-        hdul = stpyfits.HDUList([hdu, hdu1])
-        hdul.writeto(self.temp('new.fits'), clobber=True)
-
-        hdul = stpyfits.open(self.temp('new.fits'), 'update')
-        d = np.arange(10, dtype=np.int32)
-        d = d * 0
-        d = d + 3
-        hdul[0].data = d
-        hdul.flush()
-        hdul.close()
-
-        assert_equal(stpyfits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 6, (10,), 'int32', ''),
-             (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')])
-        assert_equal(fits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 6, (), '', ''),
-             (1, '', 'ImageHDU', 8, (), '', '')])
-
-        hdul1 = stpyfits.open(self.temp('new.fits'))
-        hdul2 = fits.open(self.temp('new.fits'))
-
-        assert_equal(hdul1[0].header['NAXIS'], 1)
-        assert_equal(hdul1[0].header['NAXIS1'], 10)
-        assert_equal(hdul1[0].header['PIXVALUE'], 3)
-
-        assert_raises(KeyError, lambda: hdul1[0].header['NPIX1'])
-
-        assert_true((hdul1[0].data ==
-                     (np.zeros(10, dtype=np.int32) + 3)).all())
-
-        assert_equal(hdul2[0].header['NAXIS'], 0)
-        assert_equal(hdul2[0].header['NPIX1'], 10)
-        assert_equal(hdul2[0].header['PIXVALUE'], 3)
-
-        assert_raises(KeyError, lambda: hdul2[0].header['NAXIS1'])
-
-        assert_equal(hdul2[0].data, None)
-
-        hdul1.close()
-        hdul2.close()
-
-        hdul3 = stpyfits.open(self.temp('new.fits'), 'update')
-        d = np.arange(15, dtype=np.int32)
-        d = d * 0
-        d = d + 4
-        hdul3[0].data = d
-        hdul3.close()      # Note that close calls flush
-
-        assert_equal(stpyfits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 6, (15,), 'int32', ''),
-             (1, '', 'ImageHDU', 8, (10, 10), 'int32', '')])
-        assert_equal(fits.info(self.temp('new.fits'), output=False),
-            [(0, 'PRIMARY', 'PrimaryHDU', 6, (), '', ''),
-             (1, '', 'ImageHDU', 8, (), '', '')])
-
-        hdul1 = stpyfits.open(self.temp('new.fits'))
-        hdul2 = fits.open(self.temp('new.fits'))
-
-        assert_equal(hdul1[0].header['NAXIS'], 1)
-        assert_equal(hdul1[0].header['NAXIS1'], 15)
-        assert_equal(hdul1[0].header['PIXVALUE'], 4)
-
-        assert_raises(KeyError, lambda: hdul1[0].header['NPIX1'])
-
-        assert_true((hdul1[0].data ==
-                     (np.zeros(15, dtype=np.int32) + 4)).all())
-
-        assert_equal(hdul2[0].header['NAXIS'], 0)
-        assert_equal(hdul2[0].header['NPIX1'], 15)
-        assert_equal(hdul2[0].header['PIXVALUE'], 4)
-
-        assert_raises(KeyError, lambda: hdul2[0].header['NAXIS1'])
-
-        assert_equal(hdul2[0].data, None)
-
-        hdul1.close()
-        hdul2.close()
-
-    def testImageBaseHDU__getattr__Method(self):
-        """Test the __getattr__ method of ImageBaseHDU in both the fits
-           and stpyfits namespace."""
-
-        hdul = stpyfits.open(self.data('cdva2.fits'))
-        hdul1 = fits.open(self.data('cdva2.fits'))
-
-        hdu = hdul[0]
-        hdu1 = hdul1[0]
-
-        assert_true((hdu.data == np.ones((10, 10), dtype=np.int32)).all())
-        assert_equal(hdu1.data, None)
-
-        hdul.close()
-        hdul1.close()
-
-    def testImageBaseHDUWriteToMethod(self):
-        """Test the writeto method of _ConstantValueImageBaseHDU in the
-        stpyfits namespace."""
-
-        n = np.zeros(10)
-        n = n + 1
-
-        hdu = stpyfits.PrimaryHDU(n)
-        hdu.header.set('PIXVALUE', 1., 'constant pixel value', after='EXTEND')
-
-        hdu.writeto(self.temp('new.fits'), clobber=True)
-
-        hdul = stpyfits.open(self.temp('new.fits'))
-        hdul1 = fits.open(self.temp('new.fits'))
-
-        assert_equal(hdul[0].header['NAXIS'], 1)
-        assert_equal(hdul[0].header['NAXIS1'], 10)
-        assert_equal(hdul[0].header['PIXVALUE'], 1.0)
-
-        assert_raises(KeyError, lambda: hdul[0].header['NPIX1'])
-
-        assert_true((hdul[0].data == np.ones(10, dtype=np.float32)).all())
-
-        assert_equal(hdul1[0].header['NAXIS'], 0)
-        assert_equal(hdul1[0].header['NPIX1'], 10)
-        assert_equal(hdul1[0].header['PIXVALUE'], 1.0)
-
-        assert_raises(KeyError, lambda: hdul1[0].header['NAXIS1'])
-
-        assert_equal(hdul1[0].data, None)
-
-        hdul.close()
-        hdul1.close()
-
-    def testStrayPixvalue(self):
-        """Regression test for #885
-        (https://svn.stsci.edu/trac/ssb/stsci_python/ticket/885)
-
-        Tests that HDUs containing a non-zero NAXIS as well as a PIXVALUE
-        keyword in their header are not treated as constant value HDUs.
-        """
-
-        data = np.arange(100).reshape((10, 10))
-        phdu = fits.PrimaryHDU(data=data)
-        hdu = fits.ImageHDU(data=data)
-
-        phdu.header['PIXVALUE'] = 10
-        hdu.header['PIXVALUE'] = 10
-
-        hdul = fits.HDUList([phdu, hdu])
-        hdul.writeto(self.temp('test.fits'))
-
-        with stpyfits.open(self.temp('test.fits')) as h:
-            assert_false(isinstance(h[0], stpyfits.ConstantValuePrimaryHDU))
-            assert_false(isinstance(h[1], stpyfits.ConstantValueImageHDU))
-            assert_true((h[0].data == data).all())
-            assert_true((h[1].data == data).all())
-
-    def testDimensionlessConstantValueArray(self):
-        """Tests a case that was reported where an HDU can be a constant
-        value HDU (it has a PIXVALUE and NAXIS=0) but NPIX1 = NPIX2 = 0 as
-        well.
-        """
-
-        hdu = stpyfits.PrimaryHDU()
-        hdu.header['NAXIS'] = 0
-        hdu.header['BITPIX'] = 16
-        hdu.header['NPIX1'] = 0
-        hdu.header['NPIX2'] = 0
-        hdu.header['PIXVALUE'] = 0
-
-        hdu.writeto(self.temp('test.fits'))
-
-        with stpyfits.open(self.temp('test.fits')) as h:
-            assert_true(h[0].data is None)
-
-            h.writeto(self.temp('test2.fits'))
-
-    def testDeconvertConstantArray(self):
-        """When a constant value array's data is overridden with non-
-        constant data, test that when saving the file it removes
-        all constant value array keywords and is treated as a normal image
-        HDU.
-        """
-
-        data = np.ones((100, 100))
-        hdu = stpyfits.PrimaryHDU(data=data)
-        hdu.header['PIXVALUE'] = 1
-        hdu.writeto(self.temp('test.fits'))
-
-        with stpyfits.open(self.temp('test.fits'), mode='update') as h:
-            assert_equal(h[0].header['PIXVALUE'], 1)
-            h[0].data[20:80, 20:80] = 2
-
-        with fits.open(self.temp('test.fits')) as h:
-            assert_true('PIXVALUE' not in h[0].header)
-            assert_true('NPIX1' not in h[0].header)
-            assert_true('NPIX2' not in h[0].header)
-            assert_equal(h[0].header.count('NAXIS'), 1)
-            assert_equal(h[0].header['NAXIS'], 2)
-            assert_equal(h[0].header['NAXIS1'], 100)
-            assert_equal(h[0].header['NAXIS2'], 100)
-            assert_equal(h[0].data.max(), 2)
-            assert_equal(h[0].data.min(), 1)
-
-    def testGetvalExtensionHDU(self):
-        """Regression test for an issue that came up with the fact that
-        ImageHDU has a different argument signature from PrimaryHDU.
-        """
-
-        data = np.ones((100, 100))
-        hdu = stpyfits.ImageHDU(data=data)
-        hdu.header['PIXVALUE'] = 1
-        hdu.header['FOO'] = 'test'
-        hdul = stpyfits.HDUList([stpyfits.PrimaryHDU(), hdu])
-        hdul.writeto(self.temp('test.fits'))
-
-        assert_equal(stpyfits.getval(self.temp('test.fits'), 'FOO', ext=1),
-                     'test')
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/tests/test_xyinterp.py b/required_pkgs/stsci.tools/lib/stsci/tools/tests/test_xyinterp.py
deleted file mode 100644
index 09cc11a..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/tests/test_xyinterp.py
+++ /dev/null
@@ -1,70 +0,0 @@
-from __future__ import division # confidence high
-
-from stsci.tools.xyinterp import xyinterp
-import numpy as N
-
-x=N.array((1,2,3,4,5))
-y=x.copy()
-
-def test_xyinterp_1():
-    #test 1
-    ans = xyinterp(x,y,3)
-    assert ans == 3, "Test 1 failed, ans = %f, should be 3"%ans
-
-def test_xyinterp_2():
-    #test 2
-    ans = xyinterp(x,y,3.5)
-    assert ans == 3.5, "Test 2 failed, ans = %f, should be 3.5"%ans
-
-def test_xyinterp_3():
-    #test 3
-    try:
-        ans = xyinterp(x,y,-3)
-        raise AssertionError( "Test 3 failed; should have thrown an exception, answer = %s" % str(ans))
-    except ValueError:
-        pass
-
-def test_xyinterp_4():
-    #test 4
-    try:
-        ans = xyinterp(x,y,5.6)
-        raise AssertionError( "Test 4 failed; should have thrown an exception, answer = %s" % str(ans))
-    except ValueError:
-        pass
-    
-def test_xyinterp_5():
-    #test 5
-    x=N.array((1,3,7,9,12))
-    y=N.array((5,10,15,20,25))
-    ans = xyinterp(x,y,8)
-    assert ans == 17.5, "Test 5 failed, ans = %f, should be 17.5"%ans
-
-def test_xyinterp_6():
-    #test 6
-    x=N.array((5,3,6,2,7,0))
-    y=N.array((4,6,2,4,6,2))
-    try:
-        ans = xyinterp(x,y,2)
-        raise AssertionError( "Test 6 failed; should have thrown an exception, answer = %s" % str(ans))
-    except ValueError:
-        pass
-
-def test_xyinterp_7():
-    #test 7
-    x=N.array((1,2,3,4,5))
-    y=N.arange(20)
-    
-    try:
-        ans = xyinterp(x,y,2)
-        raise AssertionError( "Test 7 failed; should have thrown an exception, answer = %s" % str(ans))
-    except ValueError:
-        pass
-
-if __name__ == '__main__':
-    test_xyinterp_1()
-    test_xyinterp_2()
-    test_xyinterp_3()
-    test_xyinterp_4()
-    test_xyinterp_5()
-    test_xyinterp_6()
-    test_xyinterp_7()
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/testutil.py b/required_pkgs/stsci.tools/lib/stsci/tools/testutil.py
deleted file mode 100644
index 04c6686..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/testutil.py
+++ /dev/null
@@ -1,237 +0,0 @@
-from __future__ import division, print_function
-import math, os, sys, time
-import unittest
-import numpy as N
-""" This module extends the built-in unittest capabilities to facilitate
-performing floating point comparisons on scalars and numpy arrays. It also
-provides functions that automate building a test suite from all tests
-present in the module, and running the tests in standard or debug mode.
-
-To use this module, import it along with unittest [QUESTION: should this
-module import everything from unittest into its namespace to make life
-even easier?]. Subclass test cases from testutil.FPTestCase instead of
-unittest.TestCase. Call testall or debug from this module:
-
-import testutil
-
-class FileTestCase(testutil.FPTestCase):
-    def setUp(self):
-        assorted_test_setup
-
-    def testone(self):
-        self.assertEqual(1,2)
-
-    def testtwo(self):
-        self.assertApproxNumpy(arr1,arr2,accuracy=1e-6)
-
-    def tearDown(self):
-        assorted_test_teardown
-
-if __name__ == '__main__':
-    if 'debug' in sys.argv:
-        testutil.debug(__name__)
-    else:
-        testutil.testall(__name__,2)
-
-To run the tests in normal mode from the shell, then do the following:
-    python my_module.py
-It will run all tests, success or failure, and print a summary of the results.
-
-To run the tests in debug mode from the shell, do the following:
-    python -i my_module.py debug
-    >>> import pdb
-    >>> pdb.pm()
-In debug mode, it will run until it encounters the first failed test, then
-stop. Thus if you run with the -i switch, you can then import pdb and
-proceed with the usual debugger commands.
-
-If you prefer to run your tests from within the python interpreter,
-you may import this module and call its testall() and debug() functions
-explicitly. The modules you are testing must be visible in your sys.path.
-
->>>import testutil as U
->>> U.testall('ui_test')
-
-"""
-
-class LogTestCase(unittest.TestCase):
-   """Override the .run() method to do some logging"""
-   def run(self, result=None):
-        if result is None: result = self.defaultTestResult()
-        result.startTest(self)
-        testMethod = getattr(self, self._testMethodName)
-        try:
-            try:
-                self.setUp()
-            except KeyboardInterrupt:
-                raise
-            except:
-                result.addError(self, self._exc_info())
-                self.log('E')
-                return
-
-            ok = False
-            try:
-                testMethod()
-                ok = True
-                self.log("P")
-            except self.failureException:
-                result.addFailure(self, self._exc_info())
-                self.log("F")
-            except KeyboardInterrupt:
-                raise
-            except:
-                result.addError(self, self._exc_info())
-                self.log("E")
-
-            try:
-                self.tearDown()
-            except KeyboardInterrupt:
-                raise
-            except:
-                result.addError(self, self._exc_info())
-                ok = False
-            if ok: result.addSuccess(self)
-        finally:
-            result.stopTest(self)
-
-   def log(self,status,name=None):
-      """Creates a log file containing the test name, status,and timestamp,
-      as well as any attributes in the tda and tra dictionaries if present.
-      Does not yet support fancy separating of multi-line items."""
-      if name is None:
-         try:
-            name=self.name
-         except AttributeError:
-            name=self.id()
-      try:
-         f=open(name+'.log','w')
-      except IOError as e:
-         print("Error opening log file: %s"%e.strerror)
-         print("***No Logging Performed***")
-         return
-
-      f.write("%s:: Name=%s\n"%(name,name))
-      f.write("%s:: Status=%s\n"%(name,status))
-      f.write("%s:: Time=%s\n"%(name,time.asctime()))
-      try:
-         for k in self.tda:
-            f.write("%s:: tda_%s=%s\n"%(name,str(k),str(self.tda[k])))
-      except AttributeError:
-         pass
-
-      try:
-         for k in self.tra:
-            f.write("%s:: tra_%s=%s\n"%(name,str(k),str(self.tra[k])))
-      except AttributeError:
-         pass
-
-      if status == 'E':
-          f.write("%s:: tra_Trace=%s\n"%(name,str(self._exc_info())))
-
-      f.write("END\n")
-      f.close()
-
-
-
-
-
-class FPTestCase(unittest.TestCase):
-    ''' Base class to hold some functionality related to floating-point
-    precision and array comparisons'''
-
-    def assertApproxFP(self, testvalue, expected, accuracy=1.0e-5):
-        ''' Floating point comparison  '''
-        result = math.fabs((testvalue - expected) / expected)
-        self.failUnless(result <= accuracy,"test: %g, ref: %g"%(testvalue,expected))
-
-    def assertApproxNumpy(self, testarray, expected, accuracy=1.0e-5):
-        ''' Floating point array comparison '''
-        result=N.abs(testarray-expected)/expected
-        self.failUnless(N.alltrue(result <= accuracy))
-
-    def assertEqualNumpy(self, testarray, expected):
-        ''' Identical FP array comparison '''
-        self.failUnless(N.alltrue(testarray == expected))
-
-class LogTextRunner(unittest.TextTestRunner):
-    """ Redefines the .run() method to call a .log() method on the test
-    when it is complete. """
-
-    def run(self, test):
-        "Run the given test case or test suite."
-        result = self._makeResult()
-        startTime = time.time()
-        test(result)
-        stopTime = time.time()
-        timeTaken = stopTime - startTime
-        result.printErrors()
-        self.stream.writeln(result.separator2)
-        run = result.testsRun
-        self.stream.writeln("Ran %d test%s in %.3fs" %
-                            (run, run != 1 and "s" or "", timeTaken))
-        self.stream.writeln()
-        if not result.wasSuccessful():
-            self.stream.write("FAILED (")
-            failed, errored = list(map(len, (result.failures, result.errors)))
-            if failed:
-                self.stream.write("failures=%d" % failed)
-                test.log("F")
-            if errored:
-                if failed: self.stream.write(", ")
-                self.stream.write("errors=%d" % errored)
-                test.log("E")
-            self.stream.writeln(")")
-        else:
-            self.stream.writeln("OK")
-            test.log("P")
-
-        return result
-
-def buildsuite(module):
-    """Builds a test suite containing all tests found in the module.
-    Returns the suite."""
-    M = __import__(module)
-    suite = unittest.defaultTestLoader.loadTestsFromModule(M)
-    return suite
-
-def debug(module):
-    """ Build the test suite, then run in debug mode, which allows for postmortems"""
-    buildsuite(module).debug()
-
-def testall(module,verb=0):
-    """ Build and run the suite through the testrunner. Verbosity level
-    defaults to quiet but can be set to 2 to produce a line as it runs
-    each test. A summary of the number of tests run, errors, and failures
-    is always printed at the end."""
-    result=unittest.TextTestRunner(verbosity=verb).run(buildsuite(module))
-    return result
-
-def testlog(module,verb=0):
-    result=LogTextRunner(verbosity=verb).run(buildsuite(module))
-    return result
-
-def dump_file(fname, hdrwidth=80):
-    """ Convenience function to dump a named file to the stdout, with
-    an optional header listing the filename.  This is easy to do without
-    a convenience function like this, but having one reduces code in the XML
-    test files. """
-    assert os.path.exists(fname), "dump_file could not find: "+fname
-    sys.stdout.flush()
-    if hdrwidth>0:
-        print("")
-        print("="*hdrwidth)
-        print(fname+':')
-        print("="*hdrwidth)
-    f = open(fname, 'r')
-    for line in f:
-        print(line.rstrip())
-    f.close()
-
-def dump_all_log_files(hdrwidth=80):
-    """ Convenience function to dump all *.log files in cwd to the stdout,
-    with an optional header listing each filename. See dump_file. """
-    import glob
-    flist = glob.glob('*.log')
-    for f in flist:
-        dump_file(f, hdrwidth=hdrwidth)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/textutil.py b/required_pkgs/stsci.tools/lib/stsci/tools/textutil.py
deleted file mode 100644
index a53cb3f..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/textutil.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""Text output-related utilities."""
-
-
-import textwrap
-
-
-def wrap(text, width, *args, **kwargs):
-    """
-    Like :func:`textwrap.wrap` but preserves existing newlines which
-    :func:`textwrap.wrap` does not otherwise handle well.
-
-    See Also
-    --------
-    :func:`textwrap.wrap`
-    """
-
-    return sum([textwrap.wrap(line, width, *args, **kwargs)
-                if line else [''] for line in text.splitlines()], [])
-
-
-def textbox(text, width=78, boxchar='#', indent=0):
-    """
-    Outputs line-wrapped text wrapped in a box drawn with a repeated (usually
-    ASCII) character.
-
-    For example:
-
-        >>> textbox('Text to wrap', width=16)
-        ################
-        #              #
-        # Text to wrap #
-        #              #
-        ################
-
-    Parameters
-    ----------
-    text : string
-        The text to wrap
-
-    width : int
-        The width of the entire box, including the perimeter and
-        the indentation space.  Because the
-        wrapped text is padded with an additional column of whitespace on each
-        side, the minimum width is 5--any width less than that is
-        is automatically increased to 5 (default: 78)
-
-    boxchar : string
-        (No pun intended.) The character to draw the box with.  May also
-        be a string of multiple characters (default: '#')
-
-    indent : int
-        Amount of space by which the box should be indented. (default: 0)
-
-    """
-
-    min_width = len(boxchar) * 2 + 3
-    width = max(width-indent, min_width)
-    indentspace = indent * ' '
-
-    wrap_width = width - min_width + 1
-
-    q, r = divmod(width, len(boxchar))
-    # The top/bottom border
-    top_border = indentspace + boxchar * q + boxchar[:r]
-    top_padding  = indentspace + boxchar + ' ' * (width - len(boxchar) * 2) + boxchar
-
-    lines = ['%s%s %s %s' % (indentspace, boxchar, line.ljust(wrap_width),
-                             boxchar)
-             for line in wrap(text, wrap_width)]
-    top = [top_border, top_padding]
-    bottom = [top_padding, top_border]
-    return '\n'.join(top + lines + bottom)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/tkrotext.py b/required_pkgs/stsci.tools/lib/stsci/tools/tkrotext.py
deleted file mode 100644
index aa96a0d..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/tkrotext.py
+++ /dev/null
@@ -1,103 +0,0 @@
-""" Read-Only Tkinter Text Widget.  This is a variation of the Tkinter Text
-widget in that the text itself is not editable (it is read-only), but it allows
-selection for cut/paste to other apps.  Cut-paste may currently only work
-under X11. (9/2015 enabled under OSX by adding 'c' to ALLOWED_SYMS)
-
-A vastly simpler way of doing this is to use a Tkinter.Text widget and set
-it to DISABLED, but then you cannot select text.
-$Id$
-"""
-from __future__ import division # confidence high
-
-# System level modules
-import sys
-PY3K = sys.version_info[0] > 2
-
-if PY3K:
-    import tkinter as Tkinter
-else:
-    import Tkinter
-
-ALLOWED_SYMS = ('c','Up','Down','Left','Right','Home','End','Prior','Next', \
-                'Shift_L', 'Shift_R')
-
-class ROText(Tkinter.Text):
-
-    def __init__(self, master, **kw):
-        """  defer most of __init__ to the base class """
-        self._fbto = None
-        if 'focusBackTo' in kw:
-            self._fbto = kw['focusBackTo']
-            del kw['focusBackTo']
-        Tkinter.Text.__init__(self, master, **kw)
-        # override some bindings to return a "break" string
-        self.bind("<Key>", self.ignoreMostKeys)
-        self.bind("<Button-2>", lambda e: "break")
-        self.bind("<Button-3>", lambda e: "break")
-        if self._fbto:
-            self.bind("<Leave>", self.mouseLeft)
-        self.config(insertwidth=0)
-
-    # disallow common insert calls, but allow a backdoor when needed
-    def insert(self, index, text, *tags, **kw):
-        if 'force' in kw and kw['force']:
-            Tkinter.Text.insert(self, index, text, *tags)
-
-    # disallow common delete calls, but allow a backdoor when needed
-    def delete(self, start, end=None, force=False):
-        if force:
-            Tkinter.Text.delete(self, start, end)
-
-    # a way to disable text manip
-    def ignoreMostKeys(self, event):
-        if event.keysym not in ALLOWED_SYMS:
-            return "break" # have to return this string to stop the event
-        # To get copy/paste working on OSX we allow 'c' so that
-        # they can type 'Command-c', but don't let a regular 'c' through.
-        if event.keysym in ('c','C'):
-            if sys.platform=='darwin' and hasattr(event,'state') and event.state != 0:
-                pass # allow this through, it is Command-c
-            else:
-                return "break"
-
-
-    def mouseLeft(self, event):
-        if self._fbto:
-            self._fbto.focus_set()
-        return "break" # have to return this string to stop the event
-
-
-# Test the above class
-if __name__ == '__main__':
-
-    import sys, time
-
-    rot = None
-
-    def quit():
-        sys.exit()
-
-    def clicked():
-        rot.insert(Tkinter.END, "\nClicked at "+time.asctime(), force=True)
-        rot.see(Tkinter.END)
-
-    # make our test window
-    top = Tkinter.Tk()
-    f = Tkinter.Frame(top)
-
-    sc = Tkinter.Scrollbar(f)
-    sc.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)
-    rot = ROText(f, wrap=Tkinter.WORD, height=10, yscrollcommand=sc.set,
-                 focusBackTo=top)
-    rot.pack(side=Tkinter.TOP, fill=Tkinter.X, expand=True)
-    sc.config(command=rot.yview)
-    f.pack(side=Tkinter.TOP, fill=Tkinter.X)
-
-    b = Tkinter.Button(top, text='Click Me', command=clicked)
-    b.pack(side=Tkinter.TOP, fill=Tkinter.X, expand=1)
-
-    q = Tkinter.Button(top, text='Quit', command=quit)
-    q.pack(side=Tkinter.TOP)
-
-    # start
-    top.mainloop()
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/validate.py b/required_pkgs/stsci.tools/lib/stsci/tools/validate.py
deleted file mode 100644
index 61d3d5f..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/validate.py
+++ /dev/null
@@ -1,1460 +0,0 @@
-# validate.py
-# A Validator object
-# Copyright (C) 2005-2010 Michael Foord, Mark Andrews, Nicola Larosa
-# E-mail: fuzzyman AT voidspace DOT org DOT uk
-#         mark AT la-la DOT com
-#         nico AT tekNico DOT net
-
-# This software is licensed under the terms of the BSD license.
-# http://www.voidspace.org.uk/python/license.shtml
-# Basically you're free to copy, modify, distribute and relicense it,
-# So long as you keep a copy of the license with it.
-
-# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
-# For information about bugfixes, updates and support, please join the
-# ConfigObj mailing list:
-# http://lists.sourceforge.net/lists/listinfo/configobj-develop
-# Comments, suggestions and bug reports welcome.
-
-"""
-    The Validator object is used to check that supplied values 
-    conform to a specification.
-    
-    The value can be supplied as a string - e.g. from a config file.
-    In this case the check will also *convert* the value to
-    the required type. This allows you to add validation
-    as a transparent layer to access data stored as strings.
-    The validation checks that the data is correct *and*
-    converts it to the expected type.
-    
-    Some standard checks are provided for basic data types.
-    Additional checks are easy to write. They can be
-    provided when the ``Validator`` is instantiated or
-    added afterwards.
-    
-    The standard functions work with the following basic data types :
-    
-    * integers
-    * floats
-    * booleans
-    * strings
-    * ip_addr
-    
-    plus lists of these datatypes
-    
-    Adding additional checks is done through coding simple functions.
-    
-    The full set of standard checks are : 
-    
-    * 'integer': matches integer values (including negative)
-                 Takes optional 'min' and 'max' arguments : ::
-    
-                   integer()
-                   integer(3, 9)  # any value from 3 to 9
-                   integer(min=0) # any positive value
-                   integer(max=9)
-    
-    * 'float': matches float values
-               Has the same parameters as the integer check.
-    
-    * 'boolean': matches boolean values - ``True`` or ``False``
-                 Acceptable string values for True are :
-                   true, on, yes, 1
-                 Acceptable string values for False are :
-                   false, off, no, 0
-    
-                 Any other value raises an error.
-    
-    * 'ip_addr': matches an Internet Protocol address, v.4, represented
-                 by a dotted-quad string, i.e. '1.2.3.4'.
-    
-    * 'string': matches any string.
-                Takes optional keyword args 'min' and 'max'
-                to specify min and max lengths of the string.
-    
-    * 'list': matches any list.
-              Takes optional keyword args 'min', and 'max' to specify min and
-              max sizes of the list. (Always returns a list.)
-    
-    * 'tuple': matches any tuple.
-              Takes optional keyword args 'min', and 'max' to specify min and
-              max sizes of the tuple. (Always returns a tuple.)
-    
-    * 'int_list': Matches a list of integers.
-                  Takes the same arguments as list.
-    
-    * 'float_list': Matches a list of floats.
-                    Takes the same arguments as list.
-    
-    * 'bool_list': Matches a list of boolean values.
-                   Takes the same arguments as list.
-    
-    * 'ip_addr_list': Matches a list of IP addresses.
-                     Takes the same arguments as list.
-    
-    * 'string_list': Matches a list of strings.
-                     Takes the same arguments as list.
-    
-    * 'mixed_list': Matches a list with different types in 
-                    specific positions. List size must match
-                    the number of arguments.
-    
-                    Each position can be one of :
-                    'integer', 'float', 'ip_addr', 'string', 'boolean'
-    
-                    So to specify a list with two strings followed
-                    by two integers, you write the check as : ::
-    
-                      mixed_list('string', 'string', 'integer', 'integer')
-    
-    * 'pass': This check matches everything ! It never fails
-              and the value is unchanged.
-    
-              It is also the default if no check is specified.
-    
-    * 'option': This check matches any from a list of options.
-                You specify this check with : ::
-    
-                  option('option 1', 'option 2', 'option 3')
-    
-    You can supply a default value (returned if no value is supplied)
-    using the default keyword argument.
-    
-    You specify a list argument for default using a list constructor syntax in
-    the check : ::
-    
-        checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3'))
-    
-    A badly formatted set of arguments will raise a ``VdtParamError``.
-"""
-
-from __future__ import division, print_function # confidence high
-
-__version__ = '1.0.1'
-
-
-__all__ = (
-    '__version__',
-    'dottedQuadToNum',
-    'numToDottedQuad',
-    'ValidateError',
-    'VdtUnknownCheckError',
-    'VdtParamError',
-    'VdtTypeError',
-    'VdtValueError',
-    'VdtValueTooSmallError',
-    'VdtValueTooBigError',
-    'VdtValueTooShortError',
-    'VdtValueTooLongError',
-    'VdtMissingValue',
-    'Validator',
-    'is_integer',
-    'is_float',
-    'is_boolean',
-    'is_list',
-    'is_tuple',
-    'is_ip_addr',
-    'is_string',
-    'is_int_list',
-    'is_bool_list',
-    'is_float_list',
-    'is_string_list',
-    'is_ip_addr_list',
-    'is_mixed_list',
-    'is_option',
-    '__docformat__',
-)
-
-
-import re
-import sys
-PY3K = sys.version_info[0] > 2
-
-if PY3K:
-    string_types = str
-    number_types = (int, float)
-    int_or_string_types = (int, str)
-    number_or_string_types = (int, float, str)
-    long = int
-else:
-    string_types = basestring
-    number_types = (int, long, float)
-    int_or_string_types = (int, long, basestring) 
-    number_or_string_types = (int, long, float, basestring)
-
-_list_arg = re.compile(r'''
-    (?:
-        ([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\(
-            (
-                (?:
-                    \s*
-                    (?:
-                        (?:".*?")|              # double quotes
-                        (?:'.*?')|              # single quotes
-                        (?:[^'",\s\)][^,\)]*?)  # unquoted
-                    )
-                    \s*,\s*
-                )*
-                (?:
-                    (?:".*?")|              # double quotes
-                    (?:'.*?')|              # single quotes
-                    (?:[^'",\s\)][^,\)]*?)  # unquoted
-                )?                          # last one
-            )
-        \)
-    )
-''', re.VERBOSE | re.DOTALL)    # two groups
-
-_list_members = re.compile(r'''
-    (
-        (?:".*?")|              # double quotes
-        (?:'.*?')|              # single quotes
-        (?:[^'",\s=][^,=]*?)       # unquoted
-    )
-    (?:
-    (?:\s*,\s*)|(?:\s*$)            # comma
-    )
-''', re.VERBOSE | re.DOTALL)    # one group
-
-_paramstring = r'''
-    (?:
-        (
-            (?:
-                [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\(
-                    (?:
-                        \s*
-                        (?:
-                            (?:".*?")|              # double quotes
-                            (?:'.*?')|              # single quotes
-                            (?:[^'",\s\)][^,\)]*?)       # unquoted
-                        )
-                        \s*,\s*
-                    )*
-                    (?:
-                        (?:".*?")|              # double quotes
-                        (?:'.*?')|              # single quotes
-                        (?:[^'",\s\)][^,\)]*?)       # unquoted
-                    )?                              # last one
-                \)
-            )|
-            (?:
-                (?:".*?")|              # double quotes
-                (?:'.*?')|              # single quotes
-                (?:[^'",\s=][^,=]*?)|       # unquoted
-                (?:                         # keyword argument
-                    [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*
-                    (?:
-                        (?:".*?")|              # double quotes
-                        (?:'.*?')|              # single quotes
-                        (?:[^'",\s=][^,=]*?)       # unquoted
-                    )
-                )
-            )
-        )
-        (?:
-            (?:\s*,\s*)|(?:\s*$)            # comma
-        )
-    )
-    '''
-
-_matchstring = '^%s*' % _paramstring
-
-# Python pre 2.2.1 doesn't have bool
-try:
-    bool
-except NameError:
-    def bool(val):
-        """Simple boolean equivalent function. """
-        if val:
-            return 1
-        else:
-            return 0
-
-
-def dottedQuadToNum(ip):
-    """
-    Convert decimal dotted quad string to long integer
-    
-    >>> int(dottedQuadToNum('1 '))
-    1
-    >>> int(dottedQuadToNum(' 1.2'))
-    16777218
-    >>> int(dottedQuadToNum(' 1.2.3 '))
-    16908291
-    >>> int(dottedQuadToNum('1.2.3.4'))
-    16909060
-    >>> dottedQuadToNum('255.255.255.255')
-    4294967295
-    >>> dottedQuadToNum('255.255.255.256')
-    Traceback (most recent call last):
-    ValueError: Not a good dotted-quad IP: 255.255.255.256
-    """
-    
-    # import here to avoid it when ip_addr values are not used
-    import socket, struct
-
-    try:
-        return struct.unpack('!L',
-            socket.inet_aton(ip.strip()))[0]
-    except socket.error:
-        raise ValueError('Not a good dotted-quad IP: %s' % ip)
-    return
-
-
-def numToDottedQuad(num):
-    """
-    Convert long int to dotted quad string
-    
-    >>> numToDottedQuad(long(-1))
-    Traceback (most recent call last):
-    ValueError: Not a good numeric IP: -1
-    >>> numToDottedQuad(long(1))
-    '0.0.0.1'
-    >>> numToDottedQuad(long(16777218))
-    '1.0.0.2'
-    >>> numToDottedQuad(long(16908291))
-    '1.2.0.3'
-    >>> numToDottedQuad(long(16909060))
-    '1.2.3.4'
-    >>> numToDottedQuad(long(4294967295))
-    '255.255.255.255'
-    >>> numToDottedQuad(long(4294967296))
-    Traceback (most recent call last):
-    ValueError: Not a good numeric IP: 4294967296
-    """
-    
-    # import here to avoid it when ip_addr values are not used
-    import socket, struct
-    
-    # no need to intercept here, 4294967295 is fine
-    if num > 4294967295 or num < 0:
-        raise ValueError('Not a good numeric IP: %s' % num)
-    try:
-        return socket.inet_ntoa(
-            struct.pack('!L', long(num)))
-    except (socket.error, struct.error, OverflowError):
-        raise ValueError('Not a good numeric IP: %s' % num)
-
-
-class ValidateError(Exception):
-    """
-    This error indicates that the check failed.
-    It can be the base class for more specific errors.
-    
-    Any check function that fails ought to raise this error.
-    (or a subclass)
-    
-    >>> raise ValidateError
-    Traceback (most recent call last):
-    ValidateError
-    """
-
-
-class VdtMissingValue(ValidateError):
-    """No value was supplied to a check that needed one."""
-
-
-class VdtUnknownCheckError(ValidateError):
-    """An unknown check function was requested"""
-
-    def __init__(self, value):
-        """
-        >>> raise VdtUnknownCheckError('yoda')
-        Traceback (most recent call last):
-        VdtUnknownCheckError: the check "yoda" is unknown.
-        """
-        ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,))
-
-
-class VdtParamError(SyntaxError):
-    """An incorrect parameter was passed"""
-
-    def __init__(self, name, value):
-        """
-        >>> raise VdtParamError('yoda', 'jedi')
-        Traceback (most recent call last):
-        VdtParamError: passed an incorrect value "jedi" for parameter "yoda".
-        """
-        SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name))
-
-
-class VdtTypeError(ValidateError):
-    """The value supplied was of the wrong type"""
-
-    def __init__(self, value):
-        """
-        >>> raise VdtTypeError('jedi')
-        Traceback (most recent call last):
-        VdtTypeError: the value "jedi" is of the wrong type.
-        """
-        ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,))
-
-
-class VdtValueError(ValidateError):
-    """The value supplied was of the correct type, but was not an allowed value."""
-    
-    def __init__(self, value):
-        """
-        >>> raise VdtValueError('jedi')
-        Traceback (most recent call last):
-        VdtValueError: the value "jedi" is unacceptable.
-        """
-        ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,))
-
-
-class VdtValueTooSmallError(VdtValueError):
-    """The value supplied was of the correct type, but was too small."""
-
-    def __init__(self, value):
-        """
-        >>> raise VdtValueTooSmallError('0')
-        Traceback (most recent call last):
-        VdtValueTooSmallError: the value "0" is too small.
-        """
-        ValidateError.__init__(self, 'the value "%s" is too small.' % (value,))
-
-
-class VdtValueTooBigError(VdtValueError):
-    """The value supplied was of the correct type, but was too big."""
-
-    def __init__(self, value):
-        """
-        >>> raise VdtValueTooBigError('1')
-        Traceback (most recent call last):
-        VdtValueTooBigError: the value "1" is too big.
-        """
-        ValidateError.__init__(self, 'the value "%s" is too big.' % (value,))
-
-
-class VdtValueTooShortError(VdtValueError):
-    """The value supplied was of the correct type, but was too short."""
-
-    def __init__(self, value):
-        """
-        >>> raise VdtValueTooShortError('jed')
-        Traceback (most recent call last):
-        VdtValueTooShortError: the value "jed" is too short.
-        """
-        ValidateError.__init__(
-            self,
-            'the value "%s" is too short.' % (value,))
-
-
-class VdtValueTooLongError(VdtValueError):
-    """The value supplied was of the correct type, but was too long."""
-
-    def __init__(self, value):
-        """
-        >>> raise VdtValueTooLongError('jedie')
-        Traceback (most recent call last):
-        VdtValueTooLongError: the value "jedie" is too long.
-        """
-        ValidateError.__init__(self, 'the value "%s" is too long.' % (value,))
-
-
-class Validator(object):
-    """
-    Validator is an object that allows you to register a set of 'checks'.
-    These checks take input and test that it conforms to the check.
-    
-    This can also involve converting the value from a string into
-    the correct datatype.
-    
-    The ``check`` method takes an input string which configures which
-    check is to be used and applies that check to a supplied value.
-    
-    An example input string would be:
-    'int_range(param1, param2)'
-    
-    You would then provide something like:
-    
-    >>> def int_range_check(value, min, max):
-    ...     # turn min and max from strings to integers
-    ...     min = int(min)
-    ...     max = int(max)
-    ...     # check that value is of the correct type.
-    ...     # possible valid inputs are integers or strings
-    ...     # that represent integers
-    ...     if not isinstance(value, (int, str)):
-    ...         raise VdtTypeError(value)
-    ...     elif isinstance(value, str):
-    ...         # if we are given a string
-    ...         # attempt to convert to an integer
-    ...         try:
-    ...             value = int(value)
-    ...         except ValueError:
-    ...             raise VdtValueError(value)
-    ...     # check the value is between our constraints
-    ...     if not min <= value:
-    ...          raise VdtValueTooSmallError(value)
-    ...     if not value <= max:
-    ...          raise VdtValueTooBigError(value)
-    ...     return value
-    
-    >>> fdict = {'int_range': int_range_check}
-    >>> vtr1 = Validator(fdict)
-    >>> vtr1.check('int_range(20, 40)', '30')
-    30
-    >>> vtr1.check('int_range(20, 40)', '60')
-    Traceback (most recent call last):
-    VdtValueTooBigError: the value "60" is too big.
-    
-    New functions can be added with : ::
-    
-    >>> vtr2 = Validator()       
-    >>> vtr2.functions['int_range'] = int_range_check
-    
-    Or by passing in a dictionary of functions when Validator 
-    is instantiated.
-    
-    Your functions *can* use keyword arguments,
-    but the first argument should always be 'value'.
-    
-    If the function doesn't take additional arguments,
-    the parentheses are optional in the check.
-    It can be written with either of : ::
-    
-        keyword = function_name
-        keyword = function_name()
-    
-    The first program to utilise Validator() was Michael Foord's
-    ConfigObj, an alternative to ConfigParser which supports lists and
-    can validate a config file using a config schema.
-    For more details on using Validator with ConfigObj see:
-    http://www.voidspace.org.uk/python/configobj.html
-    """
-
-    # this regex does the initial parsing of the checks
-    _func_re = re.compile(r'(.+?)\((.*)\)', re.DOTALL)
-
-    # this regex takes apart keyword arguments
-    _key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$',  re.DOTALL)
-
-
-    # this regex finds keyword=list(....) type values
-    _list_arg = _list_arg
-
-    # this regex takes individual values out of lists - in one pass
-    _list_members = _list_members
-
-    # These regexes check a set of arguments for validity
-    # and then pull the members out
-    _paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL)
-    _matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL)
-
-
-    def __init__(self, functions=None):
-        """
-        >>> vtri = Validator()
-        """
-        self.functions = {
-            '': self._pass,
-            'integer': is_integer,
-            'float': is_float,
-            'boolean': is_boolean,
-            'ip_addr': is_ip_addr,
-            'string': is_string,
-            'list': is_list,
-            'tuple': is_tuple,
-            'int_list': is_int_list,
-            'float_list': is_float_list,
-            'bool_list': is_bool_list,
-            'ip_addr_list': is_ip_addr_list,
-            'string_list': is_string_list,
-            'mixed_list': is_mixed_list,
-            'pass': self._pass,
-            'option': is_option,
-            'force_list': force_list,
-        }
-        if functions is not None:
-            self.functions.update(functions)
-        # tekNico: for use by ConfigObj
-        self.baseErrorClass = ValidateError
-        self._cache = {}
-
-
-    def check(self, check, value, missing=False):
-        """
-        Usage: check(check, value)
-        
-        Arguments:
-            check: string representing check to apply (including arguments)
-            value: object to be checked
-        Returns value, converted to correct type if necessary
-        
-        If the check fails, raises a ``ValidateError`` subclass.
-        
-        >>> vtor.check('yoda', '')
-        Traceback (most recent call last):
-        VdtUnknownCheckError: the check "yoda" is unknown.
-        >>> vtor.check('yoda()', '')
-        Traceback (most recent call last):
-        VdtUnknownCheckError: the check "yoda" is unknown.
-        
-        >>> vtor.check('string(default="")', '', missing=True)
-        ''
-        """
-        fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
-            
-        if missing:
-            if default is None:
-                # no information needed here - to be handled by caller
-                raise VdtMissingValue()
-            value = self._handle_none(default)
-                
-        if value is None:
-            return None
-        
-        return self._check_value(value, fun_name, fun_args, fun_kwargs)
-
-
-    def _handle_none(self, value):
-        if value == 'None':
-            value = None
-        elif value in ("'None'", '"None"'):
-            # Special case a quoted None
-            value = self._unquote(value)
-        return value
-
-
-    def _parse_with_caching(self, check):
-        if check in self._cache:
-            fun_name, fun_args, fun_kwargs, default = self._cache[check]
-            # We call list and dict below to work with *copies* of the data
-            # rather than the original (which are mutable of course)
-            fun_args = list(fun_args)
-            fun_kwargs = dict(fun_kwargs)
-        else:
-            fun_name, fun_args, fun_kwargs, default = self._parse_check(check)
-            fun_kwargs = dict([(str(key), value) for (key, value) in fun_kwargs.items()])
-            self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default
-        return fun_name, fun_args, fun_kwargs, default
-        
-        
-    def _check_value(self, value, fun_name, fun_args, fun_kwargs):
-        try:
-            fun = self.functions[fun_name]
-        except KeyError:
-            raise VdtUnknownCheckError(fun_name)
-        else:
-            return fun(value, *fun_args, **fun_kwargs)
-
-
-    def _parse_check(self, check):
-        fun_match = self._func_re.match(check)
-        if fun_match:
-            fun_name = fun_match.group(1)
-            arg_string = fun_match.group(2)
-            arg_match = self._matchfinder.match(arg_string)
-            if arg_match is None:
-                # Bad syntax
-                raise VdtParamError('Bad syntax in check "%s".' % check)
-            fun_args = []
-            fun_kwargs = {}
-            # pull out args of group 2
-            for arg in self._paramfinder.findall(arg_string):
-                # args may need whitespace removing (before removing quotes)
-                arg = arg.strip()
-                listmatch = self._list_arg.match(arg)
-                if listmatch:
-                    key, val = self._list_handle(listmatch)
-                    fun_kwargs[key] = val
-                    continue
-                keymatch = self._key_arg.match(arg)
-                if keymatch:
-                    val = keymatch.group(2)
-                    if not val in ("'None'", '"None"'):
-                        # Special case a quoted None
-                        val = self._unquote(val)
-                    fun_kwargs[keymatch.group(1)] = val
-                    continue
-                
-                fun_args.append(self._unquote(arg))
-        else:
-            # allows for function names without (args)
-            return check, (), {}, None
-
-        # Default must be deleted if the value is specified too,
-        # otherwise the check function will get a spurious "default" keyword arg
-        try:
-            default = fun_kwargs.pop('default', None)
-        except AttributeError:
-            # Python 2.2 compatibility
-            default = None
-            try:
-                default = fun_kwargs['default']
-                del fun_kwargs['default']
-            except KeyError:
-                pass
-            
-        return fun_name, fun_args, fun_kwargs, default
-
-
-    def _unquote(self, val):
-        """Unquote a value if necessary."""
-        if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]):
-            val = val[1:-1]
-        return val
-
-
-    def _list_handle(self, listmatch):
-        """Take apart a ``keyword=list('val, 'val')`` type string."""
-        out = []
-        name = listmatch.group(1)
-        args = listmatch.group(2)
-        for arg in self._list_members.findall(args):
-            out.append(self._unquote(arg))
-        return name, out
-
-
-    def _pass(self, value):
-        """
-        Dummy check that always passes
-        
-        >>> vtor.check('', 0)
-        0
-        >>> vtor.check('', '0')
-        '0'
-        """
-        return value
-    
-    
-    def get_default_value(self, check):
-        """
-        Given a check, return the default value for the check
-        (converted to the right type).
-        
-        If the check doesn't specify a default value then a
-        ``KeyError`` will be raised.
-        """
-        fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
-        if default is None:
-            raise KeyError('Check "%s" has no default value.' % check)
-        value = self._handle_none(default)
-        if value is None:
-            return value
-        return self._check_value(value, fun_name, fun_args, fun_kwargs)
-
-
-def _is_num_param(names, values, to_float=False):
-    """
-    Return numbers from inputs or raise VdtParamError.
-    
-    Lets ``None`` pass through.
-    Pass in keyword argument ``to_float=True`` to
-    use float for the conversion rather than int.
-    
-    >>> _is_num_param(('', ''), (0, 1.0))
-    [0, 1]
-    >>> _is_num_param(('', ''), (0, 1.0), to_float=True)
-    [0.0, 1.0]
-    >>> _is_num_param(('a'), ('a'))
-    Traceback (most recent call last):
-    VdtParamError: passed an incorrect value "a" for parameter "a".
-    """
-    fun = to_float and float or int
-    out_params = []
-    for (name, val) in zip(names, values):
-        if val is None:
-            out_params.append(val)
-        elif isinstance(val, number_or_string_types):
-            try:
-                out_params.append(fun(val))
-            except ValueError:
-                raise VdtParamError(name, val)
-        else:
-            raise VdtParamError(name, val)
-    return out_params
-
-
-# built in checks
-# you can override these by setting the appropriate name
-# in Validator.functions
-# note: if the params are specified wrongly in your input string,
-#       you will also raise errors.
-
-def is_integer(value, min=None, max=None):
-    """
-    A check that tests that a given value is an integer (int, or long)
-    and optionally, between bounds. A negative value is accepted, while
-    a float will fail.
-    
-    If the value is a string, then the conversion is done - if possible.
-    Otherwise a VdtError is raised.
-    
-    >>> vtor.check('integer', '-1')
-    -1
-    >>> vtor.check('integer', '0')
-    0
-    >>> vtor.check('integer', 9)
-    9
-    >>> vtor.check('integer', 'a')
-    Traceback (most recent call last):
-    VdtTypeError: the value "a" is of the wrong type.
-    >>> vtor.check('integer', '2.2')
-    Traceback (most recent call last):
-    VdtTypeError: the value "2.2" is of the wrong type.
-    >>> vtor.check('integer(10)', '20')
-    20
-    >>> vtor.check('integer(max=20)', '15')
-    15
-    >>> vtor.check('integer(10)', '9')
-    Traceback (most recent call last):
-    VdtValueTooSmallError: the value "9" is too small.
-    >>> vtor.check('integer(10)', 9)
-    Traceback (most recent call last):
-    VdtValueTooSmallError: the value "9" is too small.
-    >>> vtor.check('integer(max=20)', '35')
-    Traceback (most recent call last):
-    VdtValueTooBigError: the value "35" is too big.
-    >>> vtor.check('integer(max=20)', 35)
-    Traceback (most recent call last):
-    VdtValueTooBigError: the value "35" is too big.
-    >>> vtor.check('integer(0, 9)', False)
-    0
-    """
-    (min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
-    if not isinstance(value, int_or_string_types):
-        raise VdtTypeError(value)
-    if isinstance(value, string_types):
-        # if it's a string - does it represent an integer ?
-        try:
-            value = int(value)
-        except ValueError:
-            raise VdtTypeError(value)
-    if (min_val is not None) and (value < min_val):
-        raise VdtValueTooSmallError(value)
-    if (max_val is not None) and (value > max_val):
-        raise VdtValueTooBigError(value)
-    return value
-
-
-def is_float(value, min=None, max=None):
-    """
-    A check that tests that a given value is a float
-    (an integer will be accepted), and optionally - that it is between bounds.
-    
-    If the value is a string, then the conversion is done - if possible.
-    Otherwise a VdtError is raised.
-    
-    This can accept negative values.
-    
-    >>> vtor.check('float', '2')
-    2.0
-    
-    From now on we multiply the value to avoid comparing decimals
-    
-    >>> vtor.check('float', '-6.8') * 10
-    -68.0
-    >>> vtor.check('float', '12.2') * 10
-    122.0
-    >>> vtor.check('float', 8.4) * 10
-    84.0
-    >>> vtor.check('float', 'a')
-    Traceback (most recent call last):
-    VdtTypeError: the value "a" is of the wrong type.
-    >>> vtor.check('float(10.1)', '10.2') * 10
-    102.0
-    >>> vtor.check('float(max=20.2)', '15.1') * 10
-    151.0
-    >>> vtor.check('float(10.0)', '9.0')
-    Traceback (most recent call last):
-    VdtValueTooSmallError: the value "9.0" is too small.
-    >>> vtor.check('float(max=20.0)', '35.0')
-    Traceback (most recent call last):
-    VdtValueTooBigError: the value "35.0" is too big.
-    """
-    (min_val, max_val) = _is_num_param(
-        ('min', 'max'), (min, max), to_float=True)
-    if not isinstance(value, number_or_string_types):
-        raise VdtTypeError(value)
-    if not isinstance(value, float):
-        # if it's a string - does it represent a float ?
-        try:
-            value = float(value)
-        except ValueError:
-            raise VdtTypeError(value)
-    if (min_val is not None) and (value < min_val):
-        raise VdtValueTooSmallError(value)
-    if (max_val is not None) and (value > max_val):
-        raise VdtValueTooBigError(value)
-    return value
-
-
-bool_dict = {
-    True: True, 'on': True, '1': True, 'true': True, 'yes': True, 
-    False: False, 'off': False, '0': False, 'false': False, 'no': False,
-}
-
-
-def is_boolean(value):
-    """
-    Check if the value represents a boolean.
-    
-    >>> vtor.check('boolean', 0)
-    0
-    >>> vtor.check('boolean', False)
-    0
-    >>> vtor.check('boolean', '0')
-    0
-    >>> vtor.check('boolean', 'off')
-    0
-    >>> vtor.check('boolean', 'false')
-    0
-    >>> vtor.check('boolean', 'no')
-    0
-    >>> vtor.check('boolean', 'nO')
-    0
-    >>> vtor.check('boolean', 'NO')
-    0
-    >>> vtor.check('boolean', 1)
-    1
-    >>> vtor.check('boolean', True)
-    1
-    >>> vtor.check('boolean', '1')
-    1
-    >>> vtor.check('boolean', 'on')
-    1
-    >>> vtor.check('boolean', 'true')
-    1
-    >>> vtor.check('boolean', 'yes')
-    1
-    >>> vtor.check('boolean', 'Yes')
-    1
-    >>> vtor.check('boolean', 'YES')
-    1
-    >>> vtor.check('boolean', '')
-    Traceback (most recent call last):
-    VdtTypeError: the value "" is of the wrong type.
-    >>> vtor.check('boolean', 'up')
-    Traceback (most recent call last):
-    VdtTypeError: the value "up" is of the wrong type.
-    
-    """
-    if isinstance(value, string_types):
-        try:
-            return bool_dict[value.lower()]
-        except KeyError:
-            raise VdtTypeError(value)
-    # we do an equality test rather than an identity test
-    # this ensures Python 2.2 compatibilty
-    # and allows 0 and 1 to represent True and False
-    if value == False:
-        return False
-    elif value == True:
-        return True
-    else:
-        raise VdtTypeError(value)
-
-
-def is_ip_addr(value):
-    """
-    Check that the supplied value is an Internet Protocol address, v.4,
-    represented by a dotted-quad string, i.e. '1.2.3.4'.
-    
-    >>> vtor.check('ip_addr', '1 ')
-    '1'
-    >>> vtor.check('ip_addr', ' 1.2')
-    '1.2'
-    >>> vtor.check('ip_addr', ' 1.2.3 ')
-    '1.2.3'
-    >>> vtor.check('ip_addr', '1.2.3.4')
-    '1.2.3.4'
-    >>> vtor.check('ip_addr', '0.0.0.0')
-    '0.0.0.0'
-    >>> vtor.check('ip_addr', '255.255.255.255')
-    '255.255.255.255'
-    >>> vtor.check('ip_addr', '255.255.255.256')
-    Traceback (most recent call last):
-    VdtValueError: the value "255.255.255.256" is unacceptable.
-    >>> vtor.check('ip_addr', '1.2.3.4.5')
-    Traceback (most recent call last):
-    VdtValueError: the value "1.2.3.4.5" is unacceptable.
-    >>> vtor.check('ip_addr', 0)
-    Traceback (most recent call last):
-    VdtTypeError: the value "0" is of the wrong type.
-    """
-    if not isinstance(value, string_types):
-        raise VdtTypeError(value)
-    value = value.strip()
-    try:
-        dottedQuadToNum(value)
-    except ValueError:
-        raise VdtValueError(value)
-    return value
-
-
-def is_list(value, min=None, max=None):
-    """
-    Check that the value is a list of values.
-    
-    You can optionally specify the minimum and maximum number of members.
-    
-    It does no check on list members.
-    
-    >>> vtor.check('list', ())
-    []
-    >>> vtor.check('list', [])
-    []
-    >>> vtor.check('list', (1, 2))
-    [1, 2]
-    >>> vtor.check('list', [1, 2])
-    [1, 2]
-    >>> vtor.check('list(3)', (1, 2))
-    Traceback (most recent call last):
-    VdtValueTooShortError: the value "(1, 2)" is too short.
-    >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6))
-    Traceback (most recent call last):
-    VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
-    >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4))
-    [1, 2, 3, 4]
-    >>> vtor.check('list', 0)
-    Traceback (most recent call last):
-    VdtTypeError: the value "0" is of the wrong type.
-    >>> vtor.check('list', '12')
-    Traceback (most recent call last):
-    VdtTypeError: the value "12" is of the wrong type.
-    """
-    (min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
-    if isinstance(value, string_types):
-        raise VdtTypeError(value)
-    try:
-        num_members = len(value)
-    except TypeError:
-        raise VdtTypeError(value)
-    if min_len is not None and num_members < min_len:
-        raise VdtValueTooShortError(value)
-    if max_len is not None and num_members > max_len:
-        raise VdtValueTooLongError(value)
-    return list(value)
-
-
-def is_tuple(value, min=None, max=None):
-    """
-    Check that the value is a tuple of values.
-    
-    You can optionally specify the minimum and maximum number of members.
-    
-    It does no check on members.
-    
-    >>> vtor.check('tuple', ())
-    ()
-    >>> vtor.check('tuple', [])
-    ()
-    >>> vtor.check('tuple', (1, 2))
-    (1, 2)
-    >>> vtor.check('tuple', [1, 2])
-    (1, 2)
-    >>> vtor.check('tuple(3)', (1, 2))
-    Traceback (most recent call last):
-    VdtValueTooShortError: the value "(1, 2)" is too short.
-    >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6))
-    Traceback (most recent call last):
-    VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
-    >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4))
-    (1, 2, 3, 4)
-    >>> vtor.check('tuple', 0)
-    Traceback (most recent call last):
-    VdtTypeError: the value "0" is of the wrong type.
-    >>> vtor.check('tuple', '12')
-    Traceback (most recent call last):
-    VdtTypeError: the value "12" is of the wrong type.
-    """
-    return tuple(is_list(value, min, max))
-
-
-def is_string(value, min=None, max=None):
-    """
-    Check that the supplied value is a string.
-    
-    You can optionally specify the minimum and maximum number of members.
-    
-    >>> vtor.check('string', '0')
-    '0'
-    >>> vtor.check('string', 0)
-    Traceback (most recent call last):
-    VdtTypeError: the value "0" is of the wrong type.
-    >>> vtor.check('string(2)', '12')
-    '12'
-    >>> vtor.check('string(2)', '1')
-    Traceback (most recent call last):
-    VdtValueTooShortError: the value "1" is too short.
-    >>> vtor.check('string(min=2, max=3)', '123')
-    '123'
-    >>> vtor.check('string(min=2, max=3)', '1234')
-    Traceback (most recent call last):
-    VdtValueTooLongError: the value "1234" is too long.
-    """
-    if not isinstance(value, string_types):
-        raise VdtTypeError(value)
-    (min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
-    try:
-        num_members = len(value)
-    except TypeError:
-        raise VdtTypeError(value)
-    if min_len is not None and num_members < min_len:
-        raise VdtValueTooShortError(value)
-    if max_len is not None and num_members > max_len:
-        raise VdtValueTooLongError(value)
-    return value
-
-
-def is_int_list(value, min=None, max=None):
-    """
-    Check that the value is a list of integers.
-    
-    You can optionally specify the minimum and maximum number of members.
-    
-    Each list member is checked that it is an integer.
-    
-    >>> vtor.check('int_list', ())
-    []
-    >>> vtor.check('int_list', [])
-    []
-    >>> vtor.check('int_list', (1, 2))
-    [1, 2]
-    >>> vtor.check('int_list', [1, 2])
-    [1, 2]
-    >>> vtor.check('int_list', [1, 'a'])
-    Traceback (most recent call last):
-    VdtTypeError: the value "a" is of the wrong type.
-    """
-    return [is_integer(mem) for mem in is_list(value, min, max)]
-
-
-def is_bool_list(value, min=None, max=None):
-    """
-    Check that the value is a list of booleans.
-    
-    You can optionally specify the minimum and maximum number of members.
-    
-    Each list member is checked that it is a boolean.
-    
-    >>> vtor.check('bool_list', ())
-    []
-    >>> vtor.check('bool_list', [])
-    []
-    >>> check_res = vtor.check('bool_list', (True, False))
-    >>> check_res == [True, False]
-    1
-    >>> check_res = vtor.check('bool_list', [True, False])
-    >>> check_res == [True, False]
-    1
-    >>> vtor.check('bool_list', [True, 'a'])
-    Traceback (most recent call last):
-    VdtTypeError: the value "a" is of the wrong type.
-    """
-    return [is_boolean(mem) for mem in is_list(value, min, max)]
-
-
-def is_float_list(value, min=None, max=None):
-    """
-    Check that the value is a list of floats.
-    
-    You can optionally specify the minimum and maximum number of members.
-    
-    Each list member is checked that it is a float.
-    
-    >>> vtor.check('float_list', ())
-    []
-    >>> vtor.check('float_list', [])
-    []
-    >>> vtor.check('float_list', (1, 2.0))
-    [1.0, 2.0]
-    >>> vtor.check('float_list', [1, 2.0])
-    [1.0, 2.0]
-    >>> vtor.check('float_list', [1, 'a'])
-    Traceback (most recent call last):
-    VdtTypeError: the value "a" is of the wrong type.
-    """
-    return [is_float(mem) for mem in is_list(value, min, max)]
-
-
-def is_string_list(value, min=None, max=None):
-    """
-    Check that the value is a list of strings.
-    
-    You can optionally specify the minimum and maximum number of members.
-    
-    Each list member is checked that it is a string.
-    
-    >>> vtor.check('string_list', ())
-    []
-    >>> vtor.check('string_list', [])
-    []
-    >>> vtor.check('string_list', ('a', 'b'))
-    ['a', 'b']
-    >>> vtor.check('string_list', ['a', 1])
-    Traceback (most recent call last):
-    VdtTypeError: the value "1" is of the wrong type.
-    >>> vtor.check('string_list', 'hello')
-    Traceback (most recent call last):
-    VdtTypeError: the value "hello" is of the wrong type.
-    """
-    if isinstance(value, string_types):
-        raise VdtTypeError(value)
-    return [is_string(mem) for mem in is_list(value, min, max)]
-
-
-def is_ip_addr_list(value, min=None, max=None):
-    """
-    Check that the value is a list of IP addresses.
-    
-    You can optionally specify the minimum and maximum number of members.
-    
-    Each list member is checked that it is an IP address.
-    
-    >>> vtor.check('ip_addr_list', ())
-    []
-    >>> vtor.check('ip_addr_list', [])
-    []
-    >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
-    ['1.2.3.4', '5.6.7.8']
-    >>> vtor.check('ip_addr_list', ['a'])
-    Traceback (most recent call last):
-    VdtValueError: the value "a" is unacceptable.
-    """
-    return [is_ip_addr(mem) for mem in is_list(value, min, max)]
-
-
-def force_list(value, min=None, max=None):
-    """
-    Check that a value is a list, coercing strings into
-    a list with one member. Useful where users forget the
-    trailing comma that turns a single value into a list.
-    
-    You can optionally specify the minimum and maximum number of members.
-    A minumum of greater than one will fail if the user only supplies a
-    string.
-    
-    >>> vtor.check('force_list', ())
-    []
-    >>> vtor.check('force_list', [])
-    []
-    >>> vtor.check('force_list', 'hello')
-    ['hello']
-    """
-    if not isinstance(value, (list, tuple)):
-        value = [value]
-    return is_list(value, min, max)
-    
-    
-
-fun_dict = {
-    'integer': is_integer,
-    'float': is_float,
-    'ip_addr': is_ip_addr,
-    'string': is_string,
-    'boolean': is_boolean,
-}
-
-
-def is_mixed_list(value, *args):
-    """
-    Check that the value is a list.
-    Allow specifying the type of each member.
-    Work on lists of specific lengths.
-    
-    You specify each member as a positional argument specifying type
-    
-    Each type should be one of the following strings :
-      'integer', 'float', 'ip_addr', 'string', 'boolean'
-    
-    So you can specify a list of two strings, followed by
-    two integers as :
-    
-      mixed_list('string', 'string', 'integer', 'integer')
-    
-    The length of the list must match the number of positional
-    arguments you supply.
-    
-    >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
-    >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
-    >>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
-    1
-    >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
-    >>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
-    1
-    >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
-    Traceback (most recent call last):
-    VdtTypeError: the value "b" is of the wrong type.
-    >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
-    Traceback (most recent call last):
-    VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
-    >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
-    Traceback (most recent call last):
-    VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
-    >>> vtor.check(mix_str, 0)
-    Traceback (most recent call last):
-    VdtTypeError: the value "0" is of the wrong type.
-    
-    This test requires an elaborate setup, because of a change in error string
-    output from the interpreter between Python 2.2 and 2.3 .
-    
-    >>> res_seq = (
-    ...     'passed an incorrect value "',
-    ...     'yoda',
-    ...     '" for parameter "mixed_list".',
-    ... )
-    >>> res_str = "'".join(res_seq)
-    >>> try:
-    ...     vtor.check('mixed_list("yoda")', ('a'))
-    ... except VdtParamError as err:
-    ...     str(err) == res_str
-    1
-    """
-    try:
-        length = len(value)
-    except TypeError:
-        raise VdtTypeError(value)
-    if length < len(args):
-        raise VdtValueTooShortError(value)
-    elif length > len(args):
-        raise VdtValueTooLongError(value)
-    try:
-        return [fun_dict[arg](val) for arg, val in zip(args, value)]
-    except KeyError as e:
-        raise(VdtParamError('mixed_list', e))
-
-
-def is_option(value, *options):
-    """
-    This check matches the value to any of a set of options.
-    
-    >>> vtor.check('option("yoda", "jedi")', 'yoda')
-    'yoda'
-    >>> vtor.check('option("yoda", "jedi")', 'jed')
-    Traceback (most recent call last):
-    VdtValueError: the value "jed" is unacceptable.
-    >>> vtor.check('option("yoda", "jedi")', 0)
-    Traceback (most recent call last):
-    VdtTypeError: the value "0" is of the wrong type.
-    """
-    if not isinstance(value, string_types):
-        raise VdtTypeError(value)
-    if not value in options:
-        raise VdtValueError(value)
-    return value
-
-
-def _test(value, *args, **keywargs):
-    """
-    A function that exists for test purposes.
-    
-    >>> checks = [
-    ...     '3, 6, min=1, max=3, test=list(a, b, c)',
-    ...     '3',
-    ...     '3, 6',
-    ...     '3,',
-    ...     'min=1, test="a b c"',
-    ...     'min=5, test="a, b, c"',
-    ...     'min=1, max=3, test="a, b, c"',
-    ...     'min=-100, test=-99',
-    ...     'min=1, max=3',
-    ...     '3, 6, test="36"',
-    ...     '3, 6, test="a, b, c"',
-    ...     '3, max=3, test=list("a", "b", "c")',
-    ...     '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
-    ...     "test='x=fish(3)'",
-    ...    ]
-    >>> v = Validator({'test': _test})
-    >>> for entry in checks:
-    ...     print(v.check(('test(%s)' % entry), 3))
-    (3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'})
-    (3, ('3',), {})
-    (3, ('3', '6'), {})
-    (3, ('3',), {})
-    (3, (), {'test': 'a b c', 'min': '1'})
-    (3, (), {'test': 'a, b, c', 'min': '5'})
-    (3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'})
-    (3, (), {'test': '-99', 'min': '-100'})
-    (3, (), {'max': '3', 'min': '1'})
-    (3, ('3', '6'), {'test': '36'})
-    (3, ('3', '6'), {'test': 'a, b, c'})
-    (3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'})
-    (3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'})
-    (3, (), {'test': 'x=fish(3)'})
-    
-    >>> v = Validator()
-    >>> v.check('integer(default=6)', '3')
-    3
-    >>> v.check('integer(default=6)', None, True)
-    6
-    >>> v.get_default_value('integer(default=6)')
-    6
-    >>> v.get_default_value('float(default=6)')
-    6.0
-    >>> v.get_default_value('pass(default=None)')
-    >>> v.get_default_value("string(default='None')")
-    'None'
-    >>> v.get_default_value('pass')
-    Traceback (most recent call last):
-    KeyError: 'Check "pass" has no default value.'
-    >>> v.get_default_value('pass(default=list(1, 2, 3, 4))')
-    ['1', '2', '3', '4']
-    
-    >>> v = Validator()
-    >>> v.check("pass(default=None)", None, True)
-    >>> v.check("pass(default='None')", None, True)
-    'None'
-    >>> v.check('pass(default="None")', None, True)
-    'None'
-    >>> v.check('pass(default=list(1, 2, 3, 4))', None, True)
-    ['1', '2', '3', '4']
-    
-    >>> v = Validator()
-    >>> default = v.get_default_value('string(default=None)')
-    >>> default == None
-    1
-    """
-    return (value, args, keywargs)
-
-
-def _test2():
-    """
-    >>> 
-    >>> v = Validator()
-    >>> v.get_default_value('string(default="#ff00dd")')
-    '#ff00dd'
-    >>> v.get_default_value('integer(default=3) # comment')
-    3
-    """
-
-def _test3():
-    r"""
-    >>> vtor.check('string(default="")', '', missing=True)
-    ''
-    >>> vtor.check('string(default="\n")', '', missing=True)
-    '\n'
-    >>> print(vtor.check('string(default="\n")', '', missing=True), end='')
-    <BLANKLINE>
-    >>> vtor.check('string()', '\n')
-    '\n'
-    >>> vtor.check('string(default="\n\n\n")', '', missing=True)
-    '\n\n\n'
-    >>> vtor.check('string()', 'random \n text goes here\n\n')
-    'random \n text goes here\n\n'
-    >>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")',
-    ... '', missing=True)
-    ' \nrandom text\ngoes \n here\n\n '
-    >>> vtor.check("string(default='\n\n\n')", '', missing=True)
-    '\n\n\n'
-    >>> vtor.check("option('\n','a','b',default='\n')", '', missing=True)
-    '\n'
-    >>> vtor.check("string_list()", ['foo', '\n', 'bar'])
-    ['foo', '\n', 'bar']
-    >>> vtor.check("string_list(default=list('\n'))", '', missing=True)
-    ['\n']
-    """
-    
-    
-if __name__ == '__main__':
-    # run the code tests in doctest format
-    import sys
-    import doctest
-    m = sys.modules.get('__main__')
-    globs = m.__dict__.copy()
-    globs.update({
-        'vtor': Validator(),
-    })
-    doctest.testmod(m, globs=globs)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/versioninfo.py b/required_pkgs/stsci.tools/lib/stsci/tools/versioninfo.py
deleted file mode 100644
index b420b4b..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/versioninfo.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#   Program:    versionInfo.py
-#   Author:     Christopher Hanley
-#
-#   License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE
-#
-#   Date:       19 January 2004
-#   Purpose:
-#       To print a user's system information when providing user support.
-#
-#   Version:
-#       Version 0.1.0, 19-Jan-04: Program created. -- CJH
-#       Version 0.1.1, 20-Jan-04: Modified program to
-#                                 loop over a taskList object. -- CJH
-#
-#       Version 0.2.0, 31-Mar-06: Added numpy to the task list. -- CJH
-from __future__ import division, print_function # confidence high
-
-__version__ = '0.2.0'
-
-def printVersionInfo():
-    # Print the current path information
-    try:
-        print("Path information:")
-        print("-----------------")
-        import sys
-        print(sys.path)
-        print(" ")
-    except:
-        print("Unable to get sys information.")
-        print(" ")
-
-    # Define the list of tasks to test
-    taskList = [
-                'numarray',
-                'numpy',
-                'Numeric',
-                'pyfits',
-                'pyraf',
-                'multidrizzle',
-                'pydrizzle',
-                'stsci.tools',
-                'calcos',
-                'convolve',
-                'image',
-                'imagemanip',
-                'imagestats',
-                'ndimage'
-                ]
-
-    # Test the list of software tasks
-    for software in taskList:
-        print(software+":")
-        print("-----------")
-        try:
-            package = __import__(software)
-            try:
-                print("version -> ",package.__version__)
-            except:
-                print("__version__ attribute is not defined")
-            try:
-                print("SVN version -> ",package.__svn_version__)
-            except:
-                print("__svn_version__ attribute is not defined")
-            try:
-                pathName = package.__path__
-            except:
-                pathName = package.__file__
-            print("location -> ",pathName)
-        except:
-            print(software+" not found in path...")
-        print(" ")
-
-    # Print instruction message.
-    print("PLEASE PASTE THE OUTPUT FROM THIS TASK ")
-    print("INTO AN E-MAIL MESSAGE AND SEND IT WITH")
-    print("YOUR PROBLEM DESCRIPTION TO SSB!")
-    print(" ")
-    print("SUPPORT ADDRESS: help at stsci.edu ")
-
-if __name__ == '__main__':
-    printVersionInfo()
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/vtor_checks.py b/required_pkgs/stsci.tools/lib/stsci/tools/vtor_checks.py
deleted file mode 100755
index be61cd4..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/vtor_checks.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/local/bin/python
-
-""" This file holds our own over-rides for the standard Validator check
-    functions.  We over-ride them so that we may add our own special keywords
-    to them in the config_spec.
-
-$Id: vtor_checks.py 38909 2015-04-08 17:41:07Z bsimon $
-"""
-from __future__ import absolute_import, division, print_function # confidence high
-
-from . import configobj, validate
-from . import irafutils
-
-STANDARD_KEYS = ['min', 'max', 'missing', 'default']
-OVCDBG = False
-
-
-def sigStrToKwArgsDict(checkFuncSig):
-    """ Take a check function signature (string), and parse it to get a dict
-        of the keyword args and their values. """
-    p1 = checkFuncSig.find('(')
-    p2 = checkFuncSig.rfind(')')
-    assert p1 > 0 and p2 > 0 and p2 > p1, "Invalid signature: "+checkFuncSig
-    argParts = irafutils.csvSplit(checkFuncSig[p1+1:p2], ',', True)
-    argParts = [x.strip() for x in argParts]
-    retval = {}
-    for argPair in argParts:
-        argSpl = argPair.split('=', 1)
-        if len(argSpl) > 1:
-            if argSpl[0] in retval:
-                if isinstance(retval[argSpl[0]], (list,tuple)):
-                    retval[argSpl[0]]+=(irafutils.stripQuotes(argSpl[1]),) # 3rd
-                else: # 2nd in, so convert to tuple
-                    retval[argSpl[0]] = (retval[argSpl[0]],
-                                         irafutils.stripQuotes(argSpl[1]),)
-            else:
-                retval[argSpl[0]] = irafutils.stripQuotes(argSpl[1]) # 1st in
-        else:
-            retval[argSpl[0]] = None #  eg. found "triggers=, max=6, ..."
-    return retval
-
-
-def separateKeywords(kwArgsDict):
-    """ Look through the keywords passed and separate the special ones we
-        have added from the legal/standard ones.  Return both sets as two
-        dicts (in a tuple), as (standardKws, ourKws) """
-    standardKws = {}
-    ourKws = {}
-    for k in kwArgsDict:
-        if k in STANDARD_KEYS:
-            standardKws[k]=kwArgsDict[k]
-        else:
-            ourKws[k]=kwArgsDict[k]
-    return (standardKws, ourKws)
-
-
-def addKwdArgsToSig(sigStr, kwArgsDict):
-    """ Alter the passed function signature string to add the given kewords """
-    retval = sigStr
-    if len(kwArgsDict) > 0:
-        retval = retval.strip(' ,)') # open up the r.h.s. for more args
-        for k in kwArgsDict:
-            if retval[-1] != '(': retval += ", "
-            retval += str(k)+"="+str(kwArgsDict[k])
-        retval += ')'
-    retval = retval
-    return retval
-
-
-def boolean_check_kw(val, *args, **kw):
-    if OVCDBG: print("boolean_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw))
-    vtor = validate.Validator()
-    checkFuncStr = "boolean"+str(tuple(args))
-    checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0])
-    if OVCDBG: print("CFS: "+checkFuncStr+'\n')
-    return vtor.check(checkFuncStr, val)
-
-
-def option_check_kw(val, *args, **kw):
-    if OVCDBG: print("option_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw))
-    vtor = validate.Validator()
-    checkFuncStr = "option"+str(tuple(args))
-    checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0])
-    if OVCDBG: print("CFS: "+checkFuncStr+'\n')
-    return vtor.check(checkFuncStr, val)
-
-
-def integer_check_kw(val, *args, **kw):
-    if OVCDBG: print("integer_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw))
-    vtor = validate.Validator()
-    checkFuncStr = "integer"+str(tuple(args))
-    checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0])
-    if OVCDBG: print("CFS: "+checkFuncStr+'\n')
-    return vtor.check(checkFuncStr, val)
-
-
-def integer_or_none_check_kw(val, *args, **kw):
-    if OVCDBG: print("integer_or_none_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw))
-    if val in (None,'','None','NONE','INDEF'): return None # only difference
-    vtor = validate.Validator()
-    checkFuncStr = "integer"+str(tuple(args))
-    checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0])
-    if OVCDBG: print("CFS: "+checkFuncStr+'\n')
-    return vtor.check(checkFuncStr, val)
-
-
-def float_check_kw(val, *args, **kw):
-    if OVCDBG: print("float_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw))
-    vtor = validate.Validator()
-    checkFuncStr = "float"+str(tuple(args))
-    checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0])
-    if OVCDBG: print("CFS: "+checkFuncStr+'\n')
-    return vtor.check(checkFuncStr, val)
-
-
-def float_or_none_check_kw(val, *args, **kw):
-    if OVCDBG: print("float_or_none_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw))
-    if val in (None,'','None','NONE','INDEF'): return None # only difference
-    vtor = validate.Validator()
-    checkFuncStr = "float"+str(tuple(args))
-    checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0])
-    if OVCDBG: print("CFS: "+checkFuncStr+'\n')
-    return vtor.check(checkFuncStr, val)
-
-
-def string_check_kw(val, *args, **kw):
-    if OVCDBG: print("string_kw for: "+str(val)+", args: "+str(args)+", kw: "+str(kw))
-    vtor = validate.Validator()
-    checkFuncStr = "string"+str(tuple(args))
-    checkFuncStr = addKwdArgsToSig(checkFuncStr, separateKeywords(kw)[0])
-    if OVCDBG: print("CFS: "+checkFuncStr+'\n')
-    return vtor.check(checkFuncStr, val)
-
-
-FUNC_DICT = {'boolean_kw':         boolean_check_kw,
-             'option_kw':          option_check_kw,
-             'integer_kw':         integer_check_kw,
-             'integer_or_none_kw': integer_or_none_check_kw,
-             'float_kw':           float_check_kw,
-             'float_or_none_kw':   float_or_none_check_kw,
-             'string_kw':          string_check_kw,
-             'action_kw':          string_check_kw }
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/wcsutil.py b/required_pkgs/stsci.tools/lib/stsci/tools/wcsutil.py
deleted file mode 100644
index 40b74bb..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/wcsutil.py
+++ /dev/null
@@ -1,1099 +0,0 @@
-from __future__ import absolute_import, division, print_function # confidence high
-
-import copy, os
-
-from astropy.io import fits
-import numpy as N
-from math import *
-
-from . import fileutil
-
-# Convenience definitions...
-yes = True
-no = False
-
-DEGTORAD = fileutil.DEGTORAD
-RADTODEG = fileutil.RADTODEG
-DIVMOD = fileutil.DIVMOD
-DEFAULT_PREFIX = 'O'
-
-#
-# History
-#
-# 30-Mar-2002 WJH: Added separate XYtoSky interface function.
-# 19-Apr-2002 WJH: Corrected 'ddtohms' for error in converting neg. dec.
-# 20-Sept-2002 WJH: replaced all references to 'keypar' with calls to 'hselect'
-#                   This avoids any parameter writes in the pipeline.
-# 03-Dec-2002 WJH: Added 'new' parameter to WCSObject to make creating an
-#                   object from scratch unambiguous and free from filename
-#                   collisions with user files.
-# 23-Apr-2003 WJH: Enhanced to search entire file for header with WCS keywords
-#                   if no extension was specified with filename.
-# 6-Oct-2003 WJH:  Modified to use the 'fileutil.getHeader' function or
-#                   accept a PyFITS/readgeis header object.  Removed
-#                   any explicit check on whether the image was FITS or
-#                   not.
-# 5-Feb-2004 WJH:  Added 'recenter' method to rigorously shift the WCS from
-#                   an off-center reference pixel position to the frame center
-#
-# 24-Jan-2005 WJH: Added methods and attributes for working with archived
-#                   versions of WCS keywords.  Archived keywords will be
-#                   treated as 'read-only' if they already exist, unless
-#                   specifically overwritten.
-#
-# 30-Mar-2005 WJH: 'read_archive' needed to be modified to use existing prefix
-#                   found in header, if one exists, for computing archive pscale.
-#
-# 20-Jun-2005 WJH: Support for constant-value arrays using NPIX/PIXVALUE added to
-#                   class.  The output reference WCS now creates a constant-value
-#                   array for the extension as well in order to be FITS compliant.
-#                   WCS keywords now get written out in a set order to be FITS compliant.
-#                   New method, get_orient, added to always allow access to computed
-#                   orientation regardless of orientat keyword value.
-#
-# 29-June-2005 WJH: Multiple WCS extensions are not created when running
-#                   'createReferenceWCS'.
-#
-
-
-
-
-__version__ = '1.2.3 (11-Feb-2011)'
-
-def help():
-    print('wcsutil Version '+str(__version__)+':\n')
-    print(WCSObject.__doc__)
-#################
-#
-#
-#               Coordinate Transformation Functions
-#
-#
-#################
-
-
-def ddtohms(xsky,ysky,verbose=no):
-
-    """ Convert sky position(s) from decimal degrees to HMS format."""
-
-    xskyh = xsky /15.
-    xskym = (xskyh - N.floor(xskyh)) * 60.
-    xskys = (xskym - N.floor(xskym)) * 60.
-
-    yskym = (N.abs(ysky) - N.floor(N.abs(ysky))) * 60.
-    yskys = (yskym - N.floor(yskym)) * 60.
-
-    if isinstance(xskyh,N.ndarray):
-        rah,dech = [],[]
-        for i in range(len(xskyh)):
-            rastr = repr(int(xskyh[i]))+':'+repr(int(xskym[i]))+':'+repr(xskys[i])
-            decstr = repr(int(ysky[i]))+':'+repr(int(yskym[i]))+':'+repr(yskys[i])
-            rah.append(rastr)
-            dech.append(decstr)
-            if verbose:
-                print('RA = ',rastr,', Dec = ',decstr)
-    else:
-        rastr = repr(int(xskyh))+':'+repr(int(xskym))+':'+repr(xskys)
-        decstr = repr(int(ysky))+':'+repr(int(yskym))+':'+repr(yskys)
-        rah = rastr
-        dech = decstr
-        if verbose:
-            print('RA = ',rastr,', Dec = ',decstr)
-
-    return rah,dech
-
-
-def troll(roll, dec, v2, v3):
-    """ Computes the roll angle at the target position based on::
-
-            the roll angle at the V1 axis(roll),
-            the dec of the target(dec), and
-            the V2/V3 position of the aperture (v2,v3) in arcseconds.
-
-        Based on the algorithm provided by Colin Cox that is used in
-        Generic Conversion at STScI.
-    """
-    # Convert all angles to radians
-    _roll = DEGTORAD(roll)
-    _dec = DEGTORAD(dec)
-    _v2 = DEGTORAD(v2 / 3600.)
-    _v3 = DEGTORAD(v3 / 3600.)
-
-    # compute components
-    sin_rho = sqrt((pow(sin(_v2),2)+pow(sin(_v3),2)) - (pow(sin(_v2),2)*pow(sin(_v3),2)))
-    rho = asin(sin_rho)
-    beta = asin(sin(_v3)/sin_rho)
-    if _v2 < 0: beta = pi - beta
-    gamma = asin(sin(_v2)/sin_rho)
-    if _v3 < 0: gamma = pi - gamma
-    A = pi/2. + _roll - beta
-    B = atan2( sin(A)*cos(_dec), (sin(_dec)*sin_rho - cos(_dec)*cos(rho)*cos(A)))
-
-    # compute final value
-    troll = RADTODEG(pi - (gamma+B))
-
-    return troll
-
-#################
-#
-#
-#               Coordinate System Class
-#
-#
-#################
-
-class WCSObject:
-    """ This class should contain the WCS information from the
-        input exposure's header and provide conversion functionality
-        from pixels to RA/Dec and back.
-
-        :Syntax: The basic syntax for using this object is::
-
-            >>> wcs = wcsutil.WCSObject(rootname,header=None,shape=None,
-            >>>                        pa_key='PA_V3',new=no,prefix=None)
-
-        This will create a WCSObject which provides basic WCS functions.
-
-        Parameters
-        ==========
-        rootname: string
-            filename in a format supported by IRAF, specifically::
-
-                filename.hhh[group] -or-
-                filename.fits[ext] -or-
-                filename.fits[extname,extver]
-
-        header: object
-            PyFITS header object from which WCS keywords can be read
-        shape:    tuple
-            tuple giving (nx,ny,pscale)
-        pa_key: string
-            name of keyword to read in telescopy orientation
-        new: boolean
-            specify a new object rather than creating one by
-            reading in keywords from an existing image
-        prefix: string
-            string to use as prefix for creating archived versions
-            of WCS keywords, if such keywords do not already exist
-
-        Notes
-        ======
-        Setting 'new=yes' will create a WCSObject from scratch
-        regardless of any input rootname.  This avoids unexpected
-        filename collisions.
-
-        Methods
-        =======
-        print_archive(format=True)
-            print out archive keyword values
-        get_archivekw(keyword)
-            return archived value for WCS keyword
-        set_pscale()
-            set pscale attribute for object
-        compute_pscale(cd11,cd21)
-            compute pscale value
-        get_orient()
-            return orient computed from CD matrix
-        updateWCS(pixel_scale=None,orient=None,refpos=None,refval=None,size=None)
-            reset entire WCS based on given values
-        xy2rd(pos)
-            compute RA/Dec position for given (x,y) tuple
-        rd2xy(skypos,hour=no)
-            compute X,Y position for given (RA,Dec)
-        rotateCD(orient)
-            rotate CD matrix to new orientation given by 'orient'
-        recenter()
-            Reset reference position to X,Y center of frame
-        write(fitsname=None,archive=True,overwrite=False,quiet=True)
-            write out values of WCS to specified file
-        restore()
-            reset WCS keyword values to those from archived values
-        read_archive(header,prepend=None)
-            read any archive WCS keywords from PyFITS header
-        archive(prepend=None,overwrite=no,quiet=yes)
-            create archived copies of WCS keywords.
-        write_archive(fitsname=None,overwrite=no,quiet=yes)
-            write out the archived WCS values to the file
-        restoreWCS(prepend=None)
-            resets WCS values in file to original values
-        createReferenceWCS(refname,overwrite=yes)
-            write out values of WCS keywords to NEW FITS
-            file without any image data
-        copy(deep=True)
-            create a copy of the WCSObject.
-        help()
-            prints out this help message
-
-    """
-    def __init__(self, rootname,header=None,shape=None,pa_key='PA_V3',new=no,prefix=None):
-        # Initialize wcs dictionaries:
-        #   wcsdef - default values for new images
-        #   wcstrans - translation table from header keyword to attribute
-        #   wcskeys  - keywords in the order they should appear in the header
-        self.wcsdef = {'crpix1':0.0,'crpix2':0.0,'crval1':0.0,'crval2':0.0,'cd11':1.0,
-                'cd12':1.0,'cd21':1.0,'cd22':1.0,'orient':1.0,'naxis1':0,'naxis2':0,'pscale':1.0,
-                'postarg1':0.0,'postarg2':0.0,'pa_obs':0.0,
-                'ctype1':'RA---TAN','ctype2':'DEC--TAN'}
-        self.wcstrans = {'CRPIX1':'crpix1','CRPIX2':'crpix2','CRVAL1':'crval1','CRVAL2':'crval2',
-            'CD1_1':'cd11','CD1_2':'cd12','CD2_1':'cd21','CD2_2':'cd22',
-            'ORIENTAT':'orient', 'NAXIS1':'naxis1','NAXIS2':'naxis2',
-            'pixel scale':'pscale','CTYPE1':'ctype1','CTYPE2':'ctype2'}
-        self.wcskeys = ['NAXIS1','NAXIS2','CRPIX1','CRPIX2',
-                        'CRVAL1','CRVAL2','CTYPE1','CTYPE2',
-                        'CD1_1','CD1_2','CD2_1','CD2_2',
-                        'ORIENTAT']
-        # Now, read in the CRPIX1/2, CRVAL1/2, CD1/2_1/2 keywords.
-        # Simplistic, but easy to understand what you are asking for.
-
-        _exists = yes
-        if rootname != None:
-            self.rootname = rootname
-        else:
-            self.rootname = 'New'
-            new = yes
-            _exists = no
-
-        # Initialize attribute for GEIS image name, just in case...
-        self.geisname = None
-
-        # Look for extension specification in rootname
-        _indx = _section = self.rootname.find('[')
-        # If none are found, use entire rootname
-        if _indx < 0:
-            _indx = len(self.rootname)
-
-        # Determine whether we are working with a new image or not.
-        _dir,_rootname = os.path.split(fileutil.osfn(self.rootname[:_indx]))
-        if _dir:
-            _filename = _dir+os.sep+_rootname
-        else:
-            _filename = _rootname
-        self.filename = _filename
-
-        if not new:
-            _exists = fileutil.checkFileExists(_rootname,directory=_dir)
-
-        else:
-            _exists = no
-
-        # If no header has been provided, get the PRIMARY and the
-        # specified extension header... This call uses the fully
-        # expanded version of the filename, plus any sections listed by
-        # by the user in the original rootname.
-        if not header and _exists:
-            _hdr_file = _filename+self.rootname[_indx:]
-            _header = fileutil.getHeader(_hdr_file)
-        else:
-            # Otherwise, simply use the header already read into memory
-            # for this exposure/chip.
-            _header = header
-
-        if _exists or header:
-            # Initialize WCS object with keyword values...
-            try:
-                _dkey = 'orientat'
-                if 'orientat' in _header:
-                    self.orient = _header['orientat']
-                else:
-                    self.orient = None
-
-                if _header['naxis'] == 0 and 'pixvalue' in _header:
-
-                # Check for existence of NPIX/PIXVALUE keywords
-                # which represent a constant array extension
-                    _dkey = 'npix1'
-                    self.naxis1 = _header['npix1']
-                    _dkey = 'npix2'
-                    self.naxis2 = _header['npix2']
-                    _dkey = 'pixvalue'
-                    self.pixvalue = _header['pixvalue']
-                else:
-                    _dkey = 'naxis1'
-                    self.naxis1 = _header['naxis1']
-                    _dkey = 'naxis2'
-                    self.naxis2 = _header['naxis2']
-                    self.pixvalue = None
-
-                self.npix1 = self.naxis1
-                self.npix2 = self.naxis2
-
-                for key in self.wcstrans.keys():
-                    _dkey = self.wcstrans[key]
-                    if _dkey not in ['pscale','orient','naxis1','naxis2']:
-                        self.__dict__[_dkey] = _header[key]
-
-                self.new = no
-            except:
-                print('Could not find WCS keyword: ',_dkey)
-                raise IOError('Image %s does not contain all required WCS keywords!' % self.rootname)
-
-            # Now, try to read in POSTARG keyword values, if they exist...
-            try:
-                self.postarg1 = _header['postarg1']
-                self.postarg2 = _header['postarg2']
-            except:
-                # If these keywords, don't exist set defaults...
-                self.postarg1 = 0.0
-                self.postarg2 = 0.0
-            try:
-                self.pa_obs = _header[pa_key]
-            except:
-                # If no such keyword exists, use orientat value later
-                self.pa_obs = None
-
-        else:
-            # or set default values...
-            self.new = yes
-            for key in self.wcsdef.keys():
-                self.__dict__[key] = self.wcsdef[key]
-
-            if shape != None:
-                # ... and update with user values.
-                self.naxis1 = int(shape[0])
-                self.naxis2 = int(shape[1])
-                self.pscale = float(shape[2])
-
-        # Make sure reported 'orient' is consistent with CD matrix
-        # while preserving the original 'ORIENTAT' keyword value
-        self.orientat = self.orient
-
-        self.orient = RADTODEG(N.arctan2(self.cd12,self.cd22))
-
-        # If no keyword provided pa_obs value (PA_V3), then default to
-        # image orientation from CD matrix.
-        if self.pa_obs == None:
-            self.pa_obs = self.orient
-
-        if shape == None:
-            self.set_pscale()
-            #self.pscale = N.sqrt(N.power(self.cd11,2)+N.power(self.cd21,2)) * 3600.
-            # Use Jacobian determination of pixel scale instead of X or Y separately...
-            #self.pscale = N.sqrt(abs(self.cd11*self.cd22 - self.cd12*self.cd21))*3600.
-
-        # Establish an attribute for the linearized orient
-        # defined as the orientation of the CD after applying the default
-        # distortion correction.
-        self._orient_lin = 0.
-
-        # attribute to define format for printing WCS
-        self.__format__=yes
-
-        # Keep track of the keyword names used as the backup keywords
-        # for the original WCS values
-        #    backup - dict relating active keywords with backup keywords
-        #    prepend - string prepended to active keywords to create backup keywords
-        #    orig_wcs - dict containing orig keywords and values
-        self.backup = {}
-        self.revert = {}
-        self.prepend = None
-        self.orig_wcs = {}
-        # Read in any archived WCS keyword values, if they exist
-        self.read_archive(_header,prepend=prefix)
-
-    # You never know when you want to print out the WCS keywords...
-    def __str__(self):
-        block = 'WCS Keywords for ' + self.rootname + ': \n'
-        if not self.__format__:
-            for key in self.wcstrans.keys():
-                _dkey = self.wcstrans[key]
-                strn = key.upper() + " = " + repr(self.__dict__[_dkey]) + '\n'
-                block += strn
-            block += 'PA_V3: '+repr(self.pa_obs)+'\n'
-
-        else:
-            block += 'CD_11  CD_12: '+repr(self.cd11)+'  '+repr(self.cd12) +'\n'
-            block += 'CD_21  CD_22: '+repr(self.cd21)+'  '+repr(self.cd22) +'\n'
-            block += 'CRVAL       : '+repr(self.crval1)+'  '+repr(self.crval2) + '\n'
-            block += 'CRPIX       : '+repr(self.crpix1)+'  '+repr(self.crpix2) + '\n'
-            block += 'NAXIS       : '+repr(int(self.naxis1))+'  '+repr(int(self.naxis2)) + '\n'
-            block += 'Plate Scale : '+repr(self.pscale)+'\n'
-            block += 'ORIENTAT    : '+repr(self.orient)+'\n'
-            block += 'CTYPE       : '+repr(self.ctype1)+'  '+repr(self.ctype2)+'\n'
-            block += 'PA Telescope: '+repr(self.pa_obs)+'\n'
-
-        return block
-
-    def __repr__(self):
-        return repr(self.__dict__)
-
-    def print_archive(self,format=True):
-        """ Prints out archived WCS keywords."""
-        if len(list(self.orig_wcs.keys())) > 0:
-            block  = 'Original WCS keywords for ' + self.rootname+ '\n'
-            block += '    backed up on '+repr(self.orig_wcs['WCSCDATE'])+'\n'
-            if not format:
-                for key in self.wcstrans.keys():
-                    block += key.upper() + " = " + repr(self.get_archivekw(key)) + '\n'
-                block = 'PA_V3: '+repr(self.pa_obs)+'\n'
-
-            else:
-                block += 'CD_11  CD_12: '+repr(self.get_archivekw('CD1_1'))+'  '+repr(self.get_archivekw('CD1_2')) +'\n'
-                block += 'CD_21  CD_22: '+repr(self.get_archivekw('CD2_1'))+'  '+repr(self.get_archivekw('CD2_2')) +'\n'
-                block += 'CRVAL       : '+repr(self.get_archivekw('CRVAL1'))+'  '+repr(self.get_archivekw('CRVAL2')) + '\n'
-                block += 'CRPIX       : '+repr(self.get_archivekw('CRPIX1'))+'  '+repr(self.get_archivekw('CRPIX2')) + '\n'
-                block += 'NAXIS       : '+repr(int(self.get_archivekw('NAXIS1')))+'  '+repr(int(self.get_archivekw('NAXIS2'))) + '\n'
-                block += 'Plate Scale : '+repr(self.get_archivekw('pixel scale'))+'\n'
-                block += 'ORIENTAT    : '+repr(self.get_archivekw('ORIENTAT'))+'\n'
-
-            print(block)
-
-    def get_archivekw(self,keyword):
-        """ Return an archived/backup value for the keyword. """
-        return self.orig_wcs[self.backup[keyword]]
-
-    def set_pscale(self):
-        """ Compute the pixel scale based on active WCS values. """
-        if self.new:
-            self.pscale = 1.0
-        else:
-            self.pscale = self.compute_pscale(self.cd11,self.cd21)
-
-    def compute_pscale(self,cd11,cd21):
-        """ Compute the pixel scale based on active WCS values. """
-        return N.sqrt(N.power(cd11,2)+N.power(cd21,2)) * 3600.
-
-    def get_orient(self):
-        """ Return the computed orientation based on CD matrix. """
-        return RADTODEG(N.arctan2(self.cd12,self.cd22))
-
-    def set_orient(self):
-        """ Return the computed orientation based on CD matrix. """
-        self.orient = RADTODEG(N.arctan2(self.cd12,self.cd22))
-
-    def update(self):
-        """ Update computed values of WCS based on current CD matrix."""
-        self.set_pscale()
-        self.set_orient()
-
-    def updateWCS(self, pixel_scale=None, orient=None,refpos=None,refval=None,size=None):
-        """
-        Create a new CD Matrix from the absolute pixel scale
-        and reference image orientation.
-        """
-        # Set up parameters necessary for updating WCS
-        # Check to see if new value is provided,
-        # If not, fall back on old value as the default
-
-        _updateCD = no
-        if orient != None and orient != self.orient:
-            pa = DEGTORAD(orient)
-            self.orient = orient
-            self._orient_lin = orient
-            _updateCD = yes
-        else:
-            # In case only pixel_scale was specified
-            pa = DEGTORAD(self.orient)
-
-        if pixel_scale != None and pixel_scale != self.pscale:
-            _ratio = pixel_scale / self.pscale
-            self.pscale = pixel_scale
-            _updateCD = yes
-        else:
-            # In case, only orient was specified
-            pixel_scale = self.pscale
-            _ratio = None
-
-        # If a new plate scale was given,
-        # the default size should be revised accordingly
-        # along with the default reference pixel position.
-        # Added 31 Mar 03, WJH.
-        if _ratio != None:
-            self.naxis1 /= _ratio
-            self.naxis2 /= _ratio
-            self.crpix1 = self.naxis1/2.
-            self.crpix2 = self.naxis2/2.
-
-        # However, if the user provides a given size,
-        # set it to use that no matter what.
-        if size != None:
-            self.naxis1 = size[0]
-            self.naxis2 = size[1]
-
-        # Insure that naxis1,2 always return as integer values.
-        self.naxis1 = int(self.naxis1)
-        self.naxis2 = int(self.naxis2)
-
-        if refpos != None:
-            self.crpix1 = refpos[0]
-            self.crpix2 = refpos[1]
-        if self.crpix1 == None:
-            self.crpix1 = self.naxis1/2.
-            self.crpix2 = self.naxis2/2.
-
-        if refval != None:
-            self.crval1 = refval[0]
-            self.crval2 = refval[1]
-
-        # Reset WCS info now...
-        if _updateCD:
-            # Only update this should the pscale or orientation change...
-            pscale = pixel_scale / 3600.
-
-            self.cd11 = -pscale * N.cos(pa)
-            self.cd12 = pscale * N.sin(pa)
-            self.cd21 = self.cd12
-            self.cd22 = -self.cd11
-
-        # Now make sure that all derived values are really up-to-date based
-        # on these changes
-        self.update()
-
-    def scale_WCS(self,pixel_scale,retain=True):
-        ''' Scale the WCS to a new pixel_scale. The 'retain' parameter
-        [default value: True] controls whether or not to retain the original
-        distortion solution in the CD matrix.
-        '''
-        _ratio = pixel_scale / self.pscale
-
-        # Correct the size of the image and CRPIX values for scaled WCS
-        self.naxis1 /= _ratio
-        self.naxis2 /= _ratio
-        self.crpix1 = self.naxis1/2.
-        self.crpix2 = self.naxis2/2.
-
-        if retain:
-            # Correct the WCS while retaining original distortion information
-            self.cd11 *= _ratio
-            self.cd12 *= _ratio
-            self.cd21 *= _ratio
-            self.cd22 *= _ratio
-        else:
-            pscale = pixel_scale / 3600.
-            self.cd11 = -pscale * N.cos(pa)
-            self.cd12 = pscale * N.sin(pa)
-            self.cd21 = self.cd12
-            self.cd22 = -self.cd11
-
-        # Now make sure that all derived values are really up-to-date based
-        # on these changes
-        self.update()
-
-    def xy2rd(self,pos):
-        """
-        This method would apply the WCS keywords to a position to
-        generate a new sky position.
-
-        The algorithm comes directly from 'imgtools.xy2rd'
-
-        translate (x,y) to (ra, dec)
-        """
-        if self.ctype1.find('TAN') < 0 or self.ctype2.find('TAN') < 0:
-            print('XY2RD only supported for TAN projections.')
-            raise TypeError
-
-        if isinstance(pos,N.ndarray):
-            # If we are working with an array of positions,
-            # point to just X and Y values
-            posx = pos[:,0]
-            posy = pos[:,1]
-        else:
-            # Otherwise, we are working with a single X,Y tuple
-            posx = pos[0]
-            posy = pos[1]
-
-        xi = self.cd11 * (posx - self.crpix1) + self.cd12 * (posy - self.crpix2)
-        eta = self.cd21 * (posx - self.crpix1) + self.cd22 * (posy - self.crpix2)
-
-        xi = DEGTORAD(xi)
-        eta = DEGTORAD(eta)
-        ra0 = DEGTORAD(self.crval1)
-        dec0 = DEGTORAD(self.crval2)
-
-        ra = N.arctan((xi / (N.cos(dec0)-eta*N.sin(dec0)))) + ra0
-        dec = N.arctan( ((eta*N.cos(dec0)+N.sin(dec0)) /
-                (N.sqrt((N.cos(dec0)-eta*N.sin(dec0))**2 + xi**2))) )
-
-        ra = RADTODEG(ra)
-        dec = RADTODEG(dec)
-        ra = DIVMOD(ra, 360.)
-
-        # Otherwise, just return the RA,Dec tuple.
-        return ra,dec
-
-
-    def rd2xy(self,skypos,hour=no):
-        """
-        This method would use the WCS keywords to compute the XY position
-        from a given RA/Dec tuple (in deg).
-
-        NOTE: Investigate how to let this function accept arrays as well
-        as single positions. WJH 27Mar03
-
-        """
-        if self.ctype1.find('TAN') < 0 or self.ctype2.find('TAN') < 0:
-            print('RD2XY only supported for TAN projections.')
-            raise TypeError
-
-        det = self.cd11*self.cd22 - self.cd12*self.cd21
-
-        if det == 0.0:
-            raise ArithmeticError("singular CD matrix!")
-
-        cdinv11 = self.cd22 / det
-        cdinv12 = -self.cd12 / det
-        cdinv21 = -self.cd21 / det
-        cdinv22 = self.cd11 / det
-
-        # translate (ra, dec) to (x, y)
-
-        ra0 = DEGTORAD(self.crval1)
-        dec0 = DEGTORAD(self.crval2)
-        if hour:
-            skypos[0] = skypos[0] * 15.
-        ra = DEGTORAD(skypos[0])
-        dec = DEGTORAD(skypos[1])
-
-        bottom = float(N.sin(dec)*N.sin(dec0) + N.cos(dec)*N.cos(dec0)*N.cos(ra-ra0))
-        if bottom == 0.0:
-            raise ArithmeticError("Unreasonable RA/Dec range!")
-
-        xi = RADTODEG((N.cos(dec) * N.sin(ra-ra0) / bottom))
-        eta = RADTODEG((N.sin(dec)*N.cos(dec0) - N.cos(dec)*N.sin(dec0)*N.cos(ra-ra0)) / bottom)
-
-        x = cdinv11 * xi + cdinv12 * eta + self.crpix1
-        y = cdinv21 * xi + cdinv22 * eta + self.crpix2
-
-        return x,y
-
-    def rotateCD(self,orient):
-        """ Rotates WCS CD matrix to new orientation given by 'orient'
-        """
-        # Determine where member CRVAL position falls in ref frame
-        # Find out whether this needs to be rotated to align with
-        # reference frame.
-
-        _delta = self.get_orient() - orient
-        if _delta == 0.:
-            return
-
-        # Start by building the rotation matrix...
-        _rot = fileutil.buildRotMatrix(_delta)
-        # ...then, rotate the CD matrix and update the values...
-        _cd = N.array([[self.cd11,self.cd12],[self.cd21,self.cd22]],dtype=N.float64)
-        _cdrot = N.dot(_cd,_rot)
-        self.cd11 = _cdrot[0][0]
-        self.cd12 = _cdrot[0][1]
-        self.cd21 = _cdrot[1][0]
-        self.cd22 = _cdrot[1][1]
-        self.orient = orient
-
-    def recenter(self):
-        """
-        Reset the reference position values to correspond to the center
-        of the reference frame.
-        Algorithm used here developed by Colin Cox - 27-Jan-2004.
-        """
-        if self.ctype1.find('TAN') < 0 or self.ctype2.find('TAN') < 0:
-            print('WCS.recenter() only supported for TAN projections.')
-            raise TypeError
-
-        # Check to see if WCS is already centered...
-        if self.crpix1 == self.naxis1/2. and self.crpix2 == self.naxis2/2.:
-            # No recentering necessary... return without changing WCS.
-            return
-
-        # This offset aligns the WCS to the center of the pixel, in accordance
-        # with the 'align=center' option used by 'drizzle'.
-        #_drz_off = -0.5
-        _drz_off = 0.
-        _cen = (self.naxis1/2.+ _drz_off,self.naxis2/2. + _drz_off)
-
-        # Compute the RA and Dec for center pixel
-        _cenrd = self.xy2rd(_cen)
-        _cd = N.array([[self.cd11,self.cd12],[self.cd21,self.cd22]],dtype=N.float64)
-        _ra0 = DEGTORAD(self.crval1)
-        _dec0 = DEGTORAD(self.crval2)
-        _ra = DEGTORAD(_cenrd[0])
-        _dec = DEGTORAD(_cenrd[1])
-
-        # Set up some terms for use in the final result
-        _dx = self.naxis1/2. - self.crpix1
-        _dy = self.naxis2/2. - self.crpix2
-
-        _dE,_dN = DEGTORAD(N.dot(_cd,(_dx,_dy)))
-        _dE_dN = 1 + N.power(_dE,2) + N.power(_dN,2)
-        _cosdec = N.cos(_dec)
-        _sindec = N.sin(_dec)
-        _cosdec0 = N.cos(_dec0)
-        _sindec0 = N.sin(_dec0)
-
-        _n1 = N.power(_cosdec,2) + _dE*_dE + _dN*_dN*N.power(_sindec,2)
-        _dra_dE = (_cosdec0 - _dN*_sindec0)/_n1
-        _dra_dN = _dE*_sindec0 /_n1
-
-        _ddec_dE = -_dE*N.tan(_dec) / _dE_dN
-        _ddec_dN = (1/_cosdec) * ((_cosdec0 / N.sqrt(_dE_dN)) - (_dN*N.sin(_dec) / _dE_dN))
-
-        # Compute new CD matrix values now...
-        _cd11n = _cosdec * (self.cd11*_dra_dE + self.cd21 * _dra_dN)
-        _cd12n = _cosdec * (self.cd12*_dra_dE + self.cd22 * _dra_dN)
-        _cd21n = self.cd11 * _ddec_dE + self.cd21 * _ddec_dN
-        _cd22n = self.cd12 * _ddec_dE + self.cd22 * _ddec_dN
-
-        _new_orient = RADTODEG(N.arctan2(_cd12n,_cd22n))
-        #_new_pscale = N.sqrt(N.power(_cd11n,2)+N.power(_cd21n,2)) * 3600.
-
-        # Update the values now...
-        self.crpix1 = _cen[0]
-        self.crpix2 = _cen[1]
-        self.crval1 = RADTODEG(_ra)
-        self.crval2 = RADTODEG(_dec)
-
-        # Keep the same plate scale, only change the orientation
-        self.rotateCD(_new_orient)
-
-        # These would update the CD matrix with the new rotation
-        # ALONG with the new plate scale which we do not want.
-        self.cd11 = _cd11n
-        self.cd12 = _cd12n
-        self.cd21 = _cd21n
-        self.cd22 = _cd22n
-        #self.pscale = _new_pscale
-
-        self.update()
-
-    def write(self,fitsname=None,wcs=None,archive=True,overwrite=False,quiet=True):
-        """
-        Write out the values of the WCS keywords to the
-        specified image.
-
-        If it is a GEIS image and 'fitsname' has been provided,
-        it will automatically make a multi-extension
-        FITS copy of the GEIS and update that file. Otherwise, it
-        throw an Exception if the user attempts to directly update
-        a GEIS image header.
-
-        If archive=True, also write out archived WCS keyword values to file.
-        If overwrite=True, replace archived WCS values in file with new values.
-
-        If a WCSObject is passed through the 'wcs' keyword, then the WCS keywords
-        of this object are copied to the header of the image to be updated. A use case
-        fo rthis is updating the WCS of a WFPC2 data quality (_c1h.fits) file
-        in order to be in sync with the science (_c0h.fits) file.
-
-        """
-        ## Start by making sure all derived values are in sync with CD matrix
-        self.update()
-
-        image = self.rootname
-        _fitsname = fitsname
-
-        if image.find('.fits') < 0 and _fitsname != None:
-            # A non-FITS image was provided, and openImage made a copy
-            # Update attributes to point to new copy instead
-            self.geisname = image
-            image = self.rootname = _fitsname
-
-        # Open image as writable FITS object
-        fimg = fileutil.openImage(image, mode='update', fitsname=_fitsname)
-
-        _root,_iextn = fileutil.parseFilename(image)
-        _extn = fileutil.getExtn(fimg,_iextn)
-
-        # Write out values to header...
-        if wcs:
-            _wcsobj = wcs
-        else:
-            _wcsobj = self
-
-        for key in _wcsobj.wcstrans.keys():
-            _dkey = _wcsobj.wcstrans[key]
-            if _dkey != 'pscale':
-                _extn.header[key] = _wcsobj.__dict__[_dkey]
-
-        # Close the file
-        fimg.close()
-        del fimg
-        if archive:
-            self.write_archive(fitsname=fitsname,overwrite=overwrite,quiet=quiet)
-
-    def restore(self):
-        """ Reset the active WCS keywords to values stored in the
-            backup keywords.
-        """
-        # If there are no backup keys, do nothing...
-        if len(list(self.backup.keys())) == 0:
-            return
-        for key in self.backup.keys():
-            if key != 'WCSCDATE':
-                self.__dict__[self.wcstrans[key]] = self.orig_wcs[self.backup[key]]
-
-        self.update()
-
-    def archive(self,prepend=None,overwrite=no,quiet=yes):
-        """ Create backup copies of the WCS keywords with the given prepended
-            string.
-            If backup keywords are already present, only update them if
-            'overwrite' is set to 'yes', otherwise, do warn the user and do nothing.
-            Set the WCSDATE at this time as well.
-        """
-        # Verify that existing backup values are not overwritten accidentally.
-        if len(list(self.backup.keys())) > 0 and overwrite == no:
-            if not quiet:
-                print('WARNING: Backup WCS keywords already exist! No backup made.')
-                print('         The values can only be overridden if overwrite=yes.')
-            return
-
-        # Establish what prepend string to use...
-        if prepend == None:
-            if self.prepend != None:
-                _prefix = self.prepend
-            else:
-                _prefix = DEFAULT_PREFIX
-        else:
-            _prefix = prepend
-
-        # Update backup and orig_wcs dictionaries
-        # We have archive keywords and a defined prefix
-        # Go through and append them to self.backup
-        self.prepend = _prefix
-        for key in self.wcstrans.keys():
-            if key != 'pixel scale':
-                _archive_key = self._buildNewKeyname(key,_prefix)
-            else:
-                _archive_key = self.prepend.lower()+'pscale'
-#            if key != 'pixel scale':
-            self.orig_wcs[_archive_key] = self.__dict__[self.wcstrans[key]]
-            self.backup[key] = _archive_key
-            self.revert[_archive_key] = key
-
-        # Setup keyword to record when these keywords were backed up.
-        self.orig_wcs['WCSCDATE']= fileutil.getLTime()
-        self.backup['WCSCDATE'] = 'WCSCDATE'
-        self.revert['WCSCDATE'] = 'WCSCDATE'
-
-    def read_archive(self,header,prepend=None):
-        """ Extract a copy of WCS keywords from an open file header,
-            if they have already been created and remember the prefix
-            used for those keywords. Otherwise, setup the current WCS
-            keywords as the archive values.
-        """
-        # Start by looking for the any backup WCS keywords to
-        # determine whether archived values are present and to set
-        # the prefix used.
-        _prefix = None
-        _archive = False
-        if header != None:
-            for kw in header.items():
-                if kw[0][1:] in self.wcstrans.keys():
-                    _prefix = kw[0][0]
-                    _archive = True
-                    break
-
-        if not _archive:
-            self.archive(prepend=prepend)
-            return
-
-        # We have archive keywords and a defined prefix
-        # Go through and append them to self.backup
-        if _prefix != None:
-            self.prepend = _prefix
-        else:
-            self.prepend = DEFAULT_PREFIX
-
-        for key in self.wcstrans.keys():
-            _archive_key = self._buildNewKeyname(key,_prefix)
-            if key!= 'pixel scale':
-                if _archive_key in header:
-                    self.orig_wcs[_archive_key] = header[_archive_key]
-                else:
-                    self.orig_wcs[_archive_key] = header[key]
-                self.backup[key] = _archive_key
-                self.revert[_archive_key] = key
-
-        # Establish plate scale value
-        _cd11str = self.prepend+'CD1_1'
-        _cd21str = self.prepend+'CD2_1'
-        pscale = self.compute_pscale(self.orig_wcs[_cd11str],self.orig_wcs[_cd21str])
-        _archive_key = self.prepend.lower()+'pscale'
-        self.orig_wcs[_archive_key] = pscale
-        self.backup['pixel scale'] = _archive_key
-        self.revert[_archive_key] = 'pixel scale'
-
-        # Setup keyword to record when these keywords were backed up.
-        if 'WCSCDATE' in header:
-            self.orig_wcs['WCSCDATE'] = header['WCSCDATE']
-        else:
-            self.orig_wcs['WCSCDATE'] = fileutil.getLTime()
-        self.backup['WCSCDATE'] = 'WCSCDATE'
-        self.revert['WCSCDATE'] = 'WCSCDATE'
-
-    def write_archive(self,fitsname=None,overwrite=no,quiet=yes):
-        """ Saves a copy of the WCS keywords from the image header
-            as new keywords with the user-supplied 'prepend'
-            character(s) prepended to the old keyword names.
-
-            If the file is a GEIS image and 'fitsname' != None, create
-            a FITS copy and update that version; otherwise, raise
-            an Exception and do not update anything.
-
-        """
-        _fitsname = fitsname
-
-        # Open image in update mode
-        #    Copying of GEIS images handled by 'openImage'.
-        fimg = fileutil.openImage(self.rootname,mode='update',fitsname=_fitsname)
-        if self.rootname.find('.fits') < 0 and _fitsname != None:
-            # A non-FITS image was provided, and openImage made a copy
-            # Update attributes to point to new copy instead
-            self.geisname = self.rootname
-            self.rootname = _fitsname
-
-        # extract the extension ID being updated
-        _root,_iextn = fileutil.parseFilename(self.rootname)
-        _extn = fileutil.getExtn(fimg,_iextn)
-        if not quiet:
-            print('Updating archive WCS keywords for ',_fitsname)
-
-        # Write out values to header...
-        for key in self.orig_wcs.keys():
-            _comment = None
-            _dkey = self.revert[key]
-
-            # Verify that archive keywords will not be overwritten,
-            # unless overwrite=yes.
-            _old_key = key in _extn.header
-            if  _old_key == True and overwrite == no:
-                if not quiet:
-                    print('WCS keyword',key,' already exists! Not overwriting.')
-                continue
-
-            # No archive keywords exist yet in file, or overwrite=yes...
-            # Extract the value for the original keyword
-            if _dkey in _extn.header:
-
-                # Extract any comment string for the keyword as well
-                _indx_key = _extn.header.index(_dkey)
-                _full_key = _extn.header.cards[_indx_key]
-                if not quiet:
-                    print('updating ',key,' with value of: ',self.orig_wcs[key])
-                _extn.header[key] = (self.orig_wcs[key], _full_key.comment)
-
-        key = 'WCSCDATE'
-        if key not in _extn.header:
-            # Print out history keywords to record when these keywords
-            # were backed up.
-            _extn.header[key] = (self.orig_wcs[key], "Time WCS keywords were copied.")
-
-        # Close the now updated image
-        fimg.close()
-        del fimg
-
-    def restoreWCS(self,prepend=None):
-        """ Resets the WCS values to the original values stored in
-            the backup keywords recorded in self.backup.
-        """
-        # Open header for image
-        image = self.rootname
-
-        if prepend: _prepend = prepend
-        elif self.prepend: _prepend = self.prepend
-        else: _prepend = None
-
-        # Open image as writable FITS object
-        fimg = fileutil.openImage(image, mode='update')
-        # extract the extension ID being updated
-        _root,_iextn = fileutil.parseFilename(self.rootname)
-        _extn = fileutil.getExtn(fimg,_iextn)
-
-        if len(self.backup) > 0:
-            # If it knows about the backup keywords already,
-            # use this to restore the original values to the original keywords
-            for newkey in self.revert.keys():
-                if newkey != 'opscale':
-                    _orig_key = self.revert[newkey]
-                    _extn.header[_orig_key] = _extn.header[newkey]
-        elif _prepend:
-            for key in self.wcstrans.keys():
-                # Get new keyword name based on old keyname
-                #    and prepend string
-                if key != 'pixel scale':
-                    _okey = self._buildNewKeyname(key,_prepend)
-
-                    if _okey in _extn.header:
-                        _extn.header[key] = _extn.header[_okey]
-                    else:
-                        print('No original WCS values found. Exiting...')
-                        break
-        else:
-            print('No original WCS values found. Exiting...')
-
-        fimg.close()
-        del fimg
-
-    def createReferenceWCS(self,refname,overwrite=yes):
-        """ Write out the values of the WCS keywords to the NEW
-            specified image 'fitsname'.
-
-        """
-        hdu = self.createWcsHDU()
-        # If refname already exists, delete it to make way for new file
-        if os.path.exists(refname):
-            if overwrite==yes:
-                # Remove previous version and re-create with new header
-                os.remove(refname)
-                hdu.writeto(refname)
-            else:
-                # Append header to existing file
-                wcs_append = True
-                oldhdu = fits.open(refname, mode='append')
-                for e in oldhdu:
-                    if 'extname' in e.header and e.header['extname'] == 'WCS':
-                        wcs_append = False
-                if wcs_append == True:
-                    oldhdu.append(hdu)
-                oldhdu.close()
-                del oldhdu
-        else:
-            # No previous file, so generate new one from scratch
-            hdu.writeto(refname)
-
-        # Clean up
-        del hdu
-
-    def createWcsHDU(self):
-        """ Generate a WCS header object that can be used to
-            populate a reference WCS HDU.
-        """
-        hdu = fits.ImageHDU()
-        hdu.header['EXTNAME'] = 'WCS'
-        hdu.header['EXTVER'] = 1
-        # Now, update original image size information
-        hdu.header['WCSAXES'] = (2, "number of World Coordinate System axes")
-        hdu.header['NPIX1'] = (self.naxis1, "Length of array axis 1")
-        hdu.header['NPIX2'] = (self.naxis2, "Length of array axis 2")
-        hdu.header['PIXVALUE'] = (0.0, "values of pixels in array")
-
-        # Write out values to header...
-        excluded_keys = ['naxis1','naxis2']
-        for key in self.wcskeys:
-            _dkey = self.wcstrans[key]
-            if _dkey not in excluded_keys:
-                hdu.header[key] = self.__dict__[_dkey]
-
-
-        return hdu
-
-    def _buildNewKeyname(self,key,prepend):
-        """ Builds a new keyword based on original keyword name and
-            a prepend string.
-        """
-
-        if len(prepend+key) <= 8: _new_key = prepend+key
-        else: _new_key = str(prepend+key)[:8]
-
-        return _new_key
-
-
-    def copy(self,deep=yes):
-        """ Makes a (deep)copy of this object for use by other objects.
-        """
-        if deep:
-            return copy.deepcopy(self)
-        else:
-            return copy.copy(self)
-
-    def help(self):
-        """ Prints out help message."""
-        print('wcsutil Version '+str(__version__)+':\n')
-        print(self.__doc__)
diff --git a/required_pkgs/stsci.tools/lib/stsci/tools/xyinterp.py b/required_pkgs/stsci.tools/lib/stsci/tools/xyinterp.py
deleted file mode 100644
index f1acd36..0000000
--- a/required_pkgs/stsci.tools/lib/stsci/tools/xyinterp.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""
-:Module: xyinterp.py
-
-Interpolates y based on the given xval.
-
-`x` and `y` are a pair of independent/dependent variable arrays that must
-be the same length. The x array must also be sorted.
-`xval` is a user-specified value. This routine looks
-up `xval` in the x array and uses that information to properly interpolate
-the value in the y array. 
-
-
-:author: Vicki Laidler
-
-:version: '0.1 (2006-07-06)'  
-
-
-"""
-from __future__ import division # confidence high
-import numpy as N 
-
-#This section for standalone imports only-------------------------------------
-__version__ = '0.1'          #Release version number only
-__vdate__ = '2006-07-06'     #Date of this version, in this (FITS-style) format
-#-----------------------------------------------------------------------------
-
-
-def xyinterp(x,y,xval):
-    """ 
-    
-    :Purpose: Interpolates y based on the given xval.
-
-    x and y are a pair of independent/dependent variable arrays that must
-    be the same length. The x array must also be sorted.
-    xval is a user-specified value. This routine looks
-    up xval in the x array and uses that information to properly interpolate
-    the value in the y array.  
-
-    Notes
-    =====
-    Use the searchsorted method on the X array to determine the bin in
-    which xval falls; then use that information to compute the corresponding
-    y value.
-    
-
-    See Also 
-    ========
-    numpy
-
-    Parameters
-    ==========
-
-    x: 1D numpy array  
-        independent variable array: MUST BE SORTED
-
-    y: 1D numpy array
-        dependent variable array
-
-    xval: float 
-        the x value at which you want to know the value of y
-
-    Returns
-    =======
-    y: float 
-        the value of y corresponding to xval
-
-    Raises
-    ======
-    ValueError: 
-        If arrays are unequal length; or x array is unsorted;
-        or if xval falls outside the bounds of x (extrapolation is unsupported
-
-    :version: 0.1 last modified 2006-07-06
-
-"""
-
-    #Enforce conditions on x, y, and xval:
-    #x and y must correspond
-    if len(x) != len(y):
-        raise ValueError("Input arrays must be equal lengths")
-
-    #Extrapolation not supported
-    if xval < x[0]:
-        raise ValueError("Value %f < min(x) %f: Extrapolation unsupported"%(xval,x[0]))
-    if xval > x[-1]:
-        raise ValueError("Value > max(x): Extrapolation unsupported")
-
-    #This algorithm only works on sorted data
-    if x.argsort().all() != N.arange(len(x)).all():
-        raise ValueError("Input array x must be sorted")
-    
-    # Now do the real work.
-    hi = x.searchsorted(xval)
-    lo = hi - 1
-    
-    try:
-        seg = (float(xval)-x[lo]) / (x[hi] - x[lo])
-    except ZeroDivisionError:
-        seg = 0.0
-
-    yval = y[lo] + seg*(y[hi] - y[lo])
-    return yval
diff --git a/required_pkgs/stsci.tools/scripts/convertwaiveredfits b/required_pkgs/stsci.tools/scripts/convertwaiveredfits
deleted file mode 100755
index b46553e..0000000
--- a/required_pkgs/stsci.tools/scripts/convertwaiveredfits
+++ /dev/null
@@ -1,6 +0,0 @@
-#! python
-from __future__ import division # confidence high
-
-import stsci.tools.convertwaiveredfits 
-stsci.tools.convertwaiveredfits.main()
-
diff --git a/required_pkgs/stsci.tools/scripts/stscidocs b/required_pkgs/stsci.tools/scripts/stscidocs
deleted file mode 100755
index 2e3348f..0000000
--- a/required_pkgs/stsci.tools/scripts/stscidocs
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import print_function
-
-import os
-import sys
-import webbrowser
-
-
-DOCUMENTATION_URL = 'http://stsdas.stsci.edu/stsci_python_epydoc'
-
-
-if __name__ == '__main__':
-    for arg in sys.argv:
-        if arg in ('-h', '--help'):
-            print('Uses the default web browser to display the ' \
-                  'stsci_python documentation')
-            break
-
-    try:
-        webbrowser.open(DOCUMENTATION_URL, new=1)
-    except:
-        print('There was a problem display the documentation.')
-        print('To view the documentation, direct your web browser to the ' \
-              'following URL:\n')
-        print('    ' + DOCUMENTATION_URL)
diff --git a/required_pkgs/stsci.tools/setup.cfg b/required_pkgs/stsci.tools/setup.cfg
deleted file mode 100644
index 3e43501..0000000
--- a/required_pkgs/stsci.tools/setup.cfg
+++ /dev/null
@@ -1,45 +0,0 @@
-[metadata]
-name = stsci.tools
-version = 3.4.1.dev
-author = STScI
-author-email = help at stsci.edu
-home-page = http://www.stsci.edu/resources/software_hardware/stsci_python
-classifier = 
-	Intended Audience :: Science/Research
-	License :: OSI Approved :: BSD License
-	Operating System :: OS Independent
-	Programming Language :: Python
-	Topic :: Scientific/Engineering :: Astronomy
-	Topic :: Software Development :: Libraries :: Python Modules
-requires-python = >=2.6
-requires-dist = 
-	d2to1
-	setuptools
-	stsci.distutils
-	numpy (>=1.5.1)
-
-[files]
-packages_root = lib
-packages = 
-	stsci
-	stsci.tools
-	stsci.tools.tests
-package_data = 
-	stsci.tools.tests = *.fits
-scripts = 
-	scripts/convertwaiveredfits
-	scripts/stscidocs
-
-[nosetests]
-exclude = .*(testutil|tester)
-
-[global]
-setup_hooks = 
-	stsci.distutils.hooks.use_packages_root
-	stsci.distutils.hooks.tag_svn_revision
-	stsci.distutils.hooks.version_setup_hook
-
-[backwards_compat]
-use_2to3 = False
-zip_safe = False
-
diff --git a/required_pkgs/stsci.tools/setup.cfg.orig b/required_pkgs/stsci.tools/setup.cfg.orig
deleted file mode 100644
index a093d60..0000000
--- a/required_pkgs/stsci.tools/setup.cfg.orig
+++ /dev/null
@@ -1,46 +0,0 @@
-[metadata]
-name = stsci.tools
-version = 3.4.1.dev
-author = STScI
-author-email = help at stsci.edu
-home-page = http://www.stsci.edu/resources/software_hardware/stsci_python
-classifier = 
-	Intended Audience :: Science/Research
-	License :: OSI Approved :: BSD License
-	Operating System :: OS Independent
-	Programming Language :: Python
-	Topic :: Scientific/Engineering :: Astronomy
-	Topic :: Software Development :: Libraries :: Python Modules
-requires-python = >=2.6
-requires-dist = 
-	d2to1
-	setuptools
-	stsci.distutils
-	astropy (>=0.3.1)
-	numpy (>=1.5.1)
-
-[files]
-packages_root = lib
-packages = 
-	stsci
-	stsci.tools
-	stsci.tools.tests
-package_data = 
-	stsci.tools.tests = *.fits
-scripts = 
-	scripts/convertwaiveredfits
-	scripts/stscidocs
-
-[nosetests]
-exclude = .*(testutil|tester)
-
-[global]
-setup_hooks = 
-	stsci.distutils.hooks.use_packages_root
-	stsci.distutils.hooks.tag_svn_revision
-	stsci.distutils.hooks.version_setup_hook
-
-[backwards_compat]
-use_2to3 = False
-zip_safe = False
-
diff --git a/required_pkgs/stsci.tools/setup.py b/required_pkgs/stsci.tools/setup.py
deleted file mode 100755
index b8ff3f8..0000000
--- a/required_pkgs/stsci.tools/setup.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-
-try:
-    from setuptools import setup
-except ImportError:
-    from ez_setup import use_setuptools
-    use_setuptools()
-    from setuptools import setup
-
-
-setup(
-    setup_requires=['d2to1>=0.2.11', 'stsci.distutils>=0.3'],
-    namespace_packages=['stsci'], packages=['stsci'],
-    d2to1=True,
-)
diff --git a/scripts/pyraf b/scripts/pyraf
index afa409f..6835f6e 100755
--- a/scripts/pyraf
+++ b/scripts/pyraf
@@ -7,8 +7,7 @@ terms of use.
 
 Usage: pyraf [options] [savefile]
 
-where savefile is an optional save file to start from and options are one
-or more of:
+where savefile is an optional save file to start from, and options are:
   -c cmd  Command passed in as string (any valid PyRAF command)
   -e      Turn on ECL mode
   -h      Print this message
@@ -35,7 +34,7 @@ Long versions of options:
   -y  --ipython
 """
 
-# $Id: pyraf 2120 2014-01-01 16:41:51Z sontag $
+# $Id$
 #
 # R. White, 2000 January 21
 
diff --git a/scripts/pyraf.bat b/scripts/pyraf.bat
deleted file mode 100755
index c369aaf..0000000
--- a/scripts/pyraf.bat
+++ /dev/null
@@ -1,30 +0,0 @@
- at ECHO OFF
-REM BFCPEOPTIONSTART
-REM Advanced BAT to EXE Converter www.BatToExeConverter.com
-REM BFCPEEXE=C:\Documents and Settings\Owner\Desktop\PyRAF.exe
-REM BFCPEICON=C:\tmp\pyraflogo_rgb_web.ico
-REM BFCPEICONINDEX=8
-REM BFCPEEMBEDDISPLAY=0
-REM BFCPEEMBEDDELETE=1
-REM BFCPEVERINCLUDE=1
-REM BFCPEVERVERSION=1.0.0.0
-REM BFCPEVERPRODUCT=PyRAF
-REM BFCPEVERDESC=PyRAF for Windows
-REM BFCPEVERCOMPANY=STScI
-REM BFCPEVERCOPYRIGHT=See startup messages
-REM BFCPEOPTIONEND
- at ECHO ON
- at echo off
-rem - This was created via "Advanced BAT to EXE Converter v2.45"
-rem - Install resulting exe in:
-rem -    %USERPROFILE%\Desktop\PyRAF.exe
-rem - I used the following to convert the PyRAF gif to a .ico file:
-rem -    http://www.coolutils.com/online/image-converter
-echo.
-echo Running %0
-cd %APPDATA%
-echo Launching PyRAF ...
-echo.
-runpyraf.py
-echo.
-pause
diff --git a/setup.cfg b/setup.cfg
index 7caf01c..d59bb2c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
 [metadata]
 name = pyraf
-version = 2.1.10
+version = 2.1.11
 author = Rick White, Perry Greenfield, Chris Sontag
 author-email = help at stsci.edu
 home-page = http://www.stsci.edu/resources/software_hardware/pyraf
diff --git a/shortcut.vbs b/shortcut.vbs
deleted file mode 100644
index 32cba69..0000000
--- a/shortcut.vbs
+++ /dev/null
@@ -1,48 +0,0 @@
-' This is a proof-of-concept for creating shortcuts on MS Windows.
-' To run this script, put it in a file named something.vbs and then
-'
-' c:\> cscript something.vbs
-'
-' ( so, like os.system() from python or something like that. )  It
-' appears that cscript is available at least on windows/xp and
-' windows7.
-'
-' A general discussion where the outline of this script came from:
-' http://stackoverflow.com/questions/346107/creating-a-shortcut-for-a-exe-from-a-batch-file
-' http://www.tomshardware.com/forum/52871-45-creating-desktop-shortcuts-command-line
-'
-' Documentation of special folders:
-' http://msdn.microsoft.com/en-us/library/system.environment.specialfolder.aspx
-'
-' About cscript:
-' http://technet.microsoft.com/en-us/library/bb490816.aspx
-' http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/wsh_runfromcommandprompt.mspx?mfr=true
-'
-' This language is VBScript.
-
-sub make_shortcut( sp_folder )
-	' look up the location of the special folder where we want the shortcut to appear
-	set sh = WScript.CreateObject("WScript.Shell" )
-	where = sh.SpecialFolders(sp_folder)
-
-	' append the name we want for the shortcut
-	where = where & "\pyraf.lnk"
-
-	' create an in-memory object for the shortcut
-	set link = sh.CreateShortcut( where )
-
-	' set various attributes of the shortcut
-	' (this part needs work)
-	link.TargetPath = "c:\python27\scripts\runpyraf.py"
-	link.WindowStyle = 1
-	link.IconLocation = "c:\application folder\application.ico"
-	link.Description = "Pyraf"
-	link.WorkingDirectory = "c:\application folder"
-
-	' actually write the shortcut to disk
-	link.Save
-end sub
-
-' create the shortcuts in various places
-make_shortcut( "Desktop" )
-make_shortcut( "StartMenu" )
diff --git a/src/xutil.c b/src/xutil.c
index 4dcbc85..f77b798 100644
--- a/src/xutil.c
+++ b/src/xutil.c
@@ -8,7 +8,7 @@
 #include <setjmp.h>
 #include <string.h>
 
-/* $Id: xutil.c 1699 2012-04-19 17:45:35Z sontag $ */
+/* $Id$ */
 
 /* Windows and cursor manipulations not provided by Tkinter or any other
 ** standard python library.  This file handles Python 3 as well.
diff --git a/tools/cachecompare.py b/tools/cachecompare.py
index 784c9da..9a01dbd 100755
--- a/tools/cachecompare.py
+++ b/tools/cachecompare.py
@@ -1,7 +1,7 @@
 #! /usr/bin/env python
 
 """cachecompare.py: Compare contents of new CL to old cache
-$Id: cachecompare.py 1186 2010-04-22 13:24:06Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/tools/cachesearch.py b/tools/cachesearch.py
index 69c6d97..e24d87c 100644
--- a/tools/cachesearch.py
+++ b/tools/cachesearch.py
@@ -1,7 +1,7 @@
 #! /usr/bin/env python
 
 """cachesearch.py: Check all entries in CL cache for a particular string
-$Id: cachesearch.py 1186 2010-04-22 13:24:06Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/tools/checkcompileall.py b/tools/checkcompileall.py
index f0d5dd0..9346453 100644
--- a/tools/checkcompileall.py
+++ b/tools/checkcompileall.py
@@ -2,7 +2,7 @@
 
 """checkcompileall.py: Read the output from compileallcl and print just the errors
 
-$Id: checkcompileall.py 1032 2009-06-18 01:52:35Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 import re, sys
diff --git a/tools/compileallcl.py b/tools/compileallcl.py
index 99f0c5f..41a451d 100644
--- a/tools/compileallcl.py
+++ b/tools/compileallcl.py
@@ -9,7 +9,7 @@ is moved to ~/iraf/pyraf/clcache.old.
 Set the -r flag to recompile (default is to close the system cache
 and move the user cache so that everything gets compiled from scratch.)
 
-$Id: compileallcl.py 1032 2009-06-18 01:52:35Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/tools/createTarBall.csh b/tools/createTarBall.csh
deleted file mode 100755
index 8ee46c9..0000000
--- a/tools/createTarBall.csh
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/bin/csh -f
-#
-# $Id: createTarBall.csh 2181 2014-05-07 18:57:08Z sontag $
-#
-
-if ($#argv != 3) then
-   echo "usage:  $0  dev|rel  2|3  py-bin-dir"
-   exit 1
-endif
-set isdev = 0
-if ($argv[1] == "dev") then
-   set isdev = 1
-endif
-set pyver = 2
-if ($argv[2] == "3") then
-   set pyver = 3
-endif
-set pybin = $argv[3]
-
-set svnbin = /usr/bin/svn
-if (`uname -n` == "somenode.stsci.edu") then
-   set svnbin = svn
-endif
-
-if (!(-d ~/.stsci_tmp)) then
-   mkdir ~/.stsci_tmp
-   if ($status != 0) then
-      echo "ERROR creating ~/.stsci_tmp"
-      exit 1
-   endif
-endif
-
-set workDir = "~/.stsci_tmp/pyraf_tar_py${pyver}_`uname -n`"
-echo Creating work area: $workDir
-/bin/rm -rf $workDir
-mkdir $workDir
-cd $workDir
-if ($status != 0) then
-   exit 1
-endif
-#
-if ($isdev == 1) then
-   set pyr = "pyraf-dev"
-#  set co_pyraf = 'co -q -r HEAD http://svn6.assembla.com/svn/pyraf/trunk'
-   set co_pyraf = 'co -q -r HEAD https://aeon.stsci.edu/ssb/svn/pyraf/trunk'
-   set co_tools = 'co -q -r HEAD https://svn.stsci.edu/svn/ssb/stsci_python/stsci.tools/trunk'
-else
-   set pyr = "pyraf-2.1"
-   echo -n 'What will the pyraf dir name be? ('$pyr'): '
-   set ans = $<
-   if ($ans != '') then
-      set pyr = $ans
-   endif
-   set brn = "tags/release_2.1"
-   echo -n 'What is branch name? ('$brn'): '
-   set ans = $<
-   if ($ans != '') then
-      set brn = $ans
-   endif
-   set co_pyraf = "co -q https://aeon.stsci.edu/ssb/svn/pyraf/${brn}"
-   set co_tools = "co -q https://svn.stsci.edu/svn/ssb/stsci_python/stsci.tools/trunk"
-endif
-
-# get all source via SVN
-echo "Downloading source for: $pyr from: `echo $co_pyraf | sed 's/.*:\/\///'` "
-$svnbin $co_pyraf $pyr
-if ($status != 0) then
-   echo ERROR svn-ing pyraf
-   exit 1
-endif
-
-# for now, add svninfo file manually
-#cd $workDir/$pyr
-#set rev = `$svnbin info | grep '^Revision:' | sed 's/.* //'`
-#cd $workDir/$pyr/lib/pyraf
-#if (!(-e svninfo.py)) then
-#   echo '__svn_version__ = "'${rev}'"' > svninfo.py
-#   echo '__full_svn_info__ = ""' >> svninfo.py
-#   echo '__setup_datetime__ = "'`date`'"' >> svninfo.py
-#endif
-
-# get extra pkgs into a subdir
-cd $workDir/$pyr
-mkdir required_pkgs
-cd $workDir/$pyr/required_pkgs
-echo "Downloading source for: stsci.tools and dist. stuff"
-#
-# STABLE!: -q -r '{2013-02-11}', but continue to use HEAD if possible
-$svnbin co -q -r HEAD https://svn.stsci.edu/svn/ssb/stsci_python/d2to1/trunk d2to1
-if ($status != 0) then
-   echo ERROR svn-ing d2to1
-   exit 1
-endif
-#
-$svnbin co -q -r HEAD https://svn.stsci.edu/svn/ssb/stsci_python/stsci.distutils/trunk stsci.distutils
-if ($status != 0) then
-   echo ERROR svn-ing stsci.distutils
-   exit 1
-endif
-#
-$svnbin $co_tools stsci.tools
-if ($status != 0) then
-   echo ERROR svn-ing stsci.tools
-   exit 1
-endif
-
-# edit setup to comment out pyfits/astropy requirements (dont need for pyraf)
-cd $workDir/$pyr/required_pkgs/stsci.tools
-if (-e setup.cfg) then
-   /bin/cp setup.cfg setup.cfg.orig
-   cat setup.cfg.orig |grep -v 'pyfits *(' |grep -v 'astropy *(' > setup.cfg
-   echo DIFF for all required pkgs/versions
-   diff setup.cfg.orig setup.cfg
-endif
-
-# Now that we have setup.cfg working better, run sdist to
-# generate the version.py file (this imports pyraf)
-# and generate the .tar.gz file
-cd $workDir/$pyr
-setenv PYRAF_NO_DISPLAY
-# FORCE_USE_PY27... $pybin/python setup.py sdist >& $workDir/sdist.out
-/user/${USER}/info/usrlcl273/bin/python setup.py sdist >& $workDir/sdist.out
-if ($status != 0) then
-   cat $workDir/sdist.out
-   exit 1
-endif
-
-# ---------------- HACK 1 TO WORK AROUND BUGS IN stsci_distutils ---------------
-# change code to NOT run update_svn_info() on first import of version.py
-cd $workDir/$pyr/lib/pyraf
-cp version.py version.py.orig1
-cat version.py.orig1 |sed 's/^ *update_svn_info *(/#update_svn_info(/' > version.py
-echo 'DIFF of update_svn_info() line'
-diff version.py.orig1 version.py
-# --------- END OF HACK 1 TO WORK AROUND BUGS IN stsci_distutils ---------------
-
-# get version info
-#et verinfo1 = `grep '__version__ *=' $workDir/$pyr/lib/pyraf/__init__.py | sed 's/.*= *//' | sed 's/"//g'`
-#et verinfo2 = `grep '__svn_version__' $workDir/$pyr/lib/pyraf/sv*.py | sed 's/.*= *//' | sed 's/"//g'`
-#et verinfo3 = "${verinfo1}-r$verinfo2"
-set verinfo1 = `grep '__version__ *=' $workDir/$pyr/lib/pyraf/version.py |sed 's/.*= *//' |sed "s/'//g"`
-set verinfo2 = `grep '__svn_revision__ *=' $workDir/$pyr/lib/pyraf/version.py |head -1 |sed 's/.*= *//' |sed "s/'//g"`
-set svn_says = `${svnbin}version |sed 's/M//'`
-
-# ---------------- HACK 2 TO WORK AROUND BUGS IN stsci_distutils ---------------
-set junk = `echo $verinfo2 |grep Unable.to.determine`
-if ("$junk" == "$verinfo2") then
-   # __svn_revision__ did not get set, let's set it manually...
-   cd $workDir/$pyr/lib/pyraf
-   cp version.py version.py.orig2
-   cat version.py.orig2 |sed 's/^\( *\)__svn_revision__ *=.*/\1__svn_revision__ = "'${svn_says}'"/' > version.py
-   echo 'DIFF of __svn_revision__ line(s)'
-   diff version.py.orig2 version.py
-
-   # now re-run the sdist
-   cd $workDir/$pyr
-   /bin/rm -rf *.egg
-   /bin/rm -rf dist
-   # FORCE_USE_PY27... $pybin/python setup.py sdist >& $workDir/sdist2.out
-   /user/${USER}/info/usrlcl273/bin/python setup.py sdist >& $workDir/sdist2.out
-   if ($status != 0) then
-      cat $workDir/sdist2.out
-      exit 1
-   endif
-
-   # now set verinfo2 correctly
-   set verinfo2 = "$svn_says"
-endif
-# ---------END OF  HACK 2 TO WORK AROUND BUGS IN stsci_distutils ---------------
-
-# set full ver (verinfo3) to be n.m.devNNNNN (if dev) or n.m.rNNNNN (if not)
-set junk = `echo $verinfo1 |grep dev`
-if  ("$junk" == "$verinfo1") then
-   set verinfo3 = "${verinfo1}${verinfo2}"
-else
-   set verinfo3 = "${verinfo1}.r${verinfo2}"
-endif
-echo "This build will show a version number of:  $verinfo3 ... is same as r$svn_says ..."
-echo "$verinfo3" > ~/.pyraf_tar_ball_ver
-
-# remove svn dirs (not needed if we use sdist)
-cd $workDir/$pyr
-/bin/rm -rf `find . -name '.svn'`
-if ($status != 0) then
-   echo ERROR cleaning out .svn dirs
-   exit 1
-endif
-
-# OLD - uses tar and gzip directly:
-## tar and zip it - regular (non-win) version
-#cd $workDir
-#tar cf $pyr.tar $pyr
-#if ($status != 0) then
-#   echo ERROR tarring up
-#   exit 1
-#endif
-#gzip $pyr.tar
-#if ($status != 0) then
-#   echo ERROR gzipping
-#   exit 1
-#endif
-
-# New - use the file generated by sdist:
-cd $workDir
-/bin/mv $pyr/dist/pyraf* $pyr.tar.gz
-if ($status != 0) then
-   echo ERROR finding sdist-created tarball
-   exit 1
-endif
-
-# Now tar/zip the Windows version (via "zip")
-if ($isdev == 1) then
-   cd $workDir/$pyr
-   rm -rf dist
-   cd $workDir
-   zip -rq ${pyr}-win $pyr
-   if ($status != 0) then
-      echo ERROR zipping up
-      exit 1
-   endif
-endif
-
-echo "Successfully created tar-ball: $pyr.tar.gz"
diff --git a/tools/fixcache.py b/tools/fixcache.py
index 74c415b..d3dc661 100644
--- a/tools/fixcache.py
+++ b/tools/fixcache.py
@@ -1,6 +1,6 @@
 #! /usr/bin/env python
 # rename clcache files
-# $Id: fixcache.py 1186 2010-04-22 13:24:06Z sontag $
+# $Id$
 #
 
 from __future__ import division # confidence high
diff --git a/tools/loadall.py b/tools/loadall.py
index f6959be..a7ab6e8 100644
--- a/tools/loadall.py
+++ b/tools/loadall.py
@@ -1,7 +1,7 @@
 #! /usr/bin/env python
 
 """loadall.py: Load all the main packages in IRAF with verbose turned on
-$Id: loadall.py 1032 2009-06-18 01:52:35Z sontag $
+$Id$
 """
 from __future__ import division # confidence high
 
diff --git a/tools/plotbench.py b/tools/plotbench.py
index 3c0c5b8..6363fed 100755
--- a/tools/plotbench.py
+++ b/tools/plotbench.py
@@ -7,7 +7,7 @@
 # This will take over your screen while it is running, popping the mouse and
 # the focus back and forth, so be prepared.
 #
-# $Id: plotbench.py 1703 2012-04-24 15:22:15Z sontag $
+# $Id$
 #
 from __future__ import division # confidence high
 
diff --git a/tools/test_usage.csh b/tools/test_usage.csh
deleted file mode 100755
index 1ea5768..0000000
--- a/tools/test_usage.csh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/csh -f
-#
-setenv TERM xterm
-setenv PYRAF_NO_DISPLAY 1
-
-#
-# TRY '--help'
-/bin/rm -f test_usage1.txt >& /dev/null
-pyraf -s --help >& test_usage1.txt
-if ($status != 0) then
-   echo ERROR DURING PYRAF CALL
-   echo "---------------------------------------------------------------"
-   cat test_usage1.txt
-   echo "---------------------------------------------------------------"
-   exit 1
-endif
-# remove non-printable garbled first line chars which occur on some platforms
-/bin/cp test_usage1.txt test_usage1.txt.orig
-cat test_usage1.txt.orig | sed -n '/Copyright/,$ p' > test_usage1.txt
-
-# Do the diff
-/usr/bin/diff -Bw test_usage.save test_usage1.txt
-if ($status != 0) then
-   echo ERROR DURING DIFF 1
-   exit 1
-endif
-
-#
-# TRY '-h'
-/bin/rm -f test_usage2.txt >& /dev/null
-pyraf --silent -h >& test_usage2.txt
-# remove non-printable garbled first line chars which occur on some platforms
-/bin/cp test_usage2.txt test_usage2.txt.orig
-cat test_usage2.txt.orig | sed -n '/Copyright/,$ p' > test_usage2.txt
-
-# Do the diff
-/usr/bin/diff -Bw test_usage.save test_usage2.txt
-if ($status != 0) then
-   echo ERROR DURING DIFF 2
-   exit 1
-endif
-
-#
-# If we get here then the test is successful, but we will print out some
-# helpful PyRAF version info, to help in debugging the test runs themselves.
-# This output goes to test harness (seen in web page) and is not captured
-# or diffed (this kind of use is tested elsewhere).
-pyraf -v -c 'print 123; prcache'
-exit 0
diff --git a/tools/test_usage.save b/tools/test_usage.save
deleted file mode 100644
index a8575de..0000000
--- a/tools/test_usage.save
+++ /dev/null
@@ -1,34 +0,0 @@
-Copyright (C) 2003 Association of Universities for Research in Astronomy
-(AURA)
-See LICENSE.txt in the docs directory of the source distribution for the
-terms of use.
-
-Usage: pyraf [options] [savefile]
-
-where savefile is an optional save file to start from and options are one
-or more of:
-  -c cmd  Command passed in as string (any valid PyRAF command)
-  -e      Turn on ECL mode
-  -h      Print this message
-  -i      No command line wrapper, just run standard interactive Python shell
-  -m      Run command line wrapper to provide extra capabilities (default)
-  -n      No splash screen during startup (also see -x)
-  -s      Silent initialization (does not print startup messages)
-  -V      Print version info and exit
-  -v      Set verbosity level (may be repeated to increase verbosity)
-  -x      No graphics will be attempted/loaded during session
-  -y      Run the IPython shell instead of the normal PyRAF command shell
-
-Long versions of options:
-  -c  --comand=<cmd>
-  -e  --ecl
-  -h  --help
-  -i  --commandwrapper=no
-  -m  --commandwrapper=yes
-  -n  --nosplash
-  -s  --silent
-  -V  --version
-  -v  --verbose
-  -x  --nographics
-  -y  --ipython
-

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-astro/packages/python-pyraf.git



More information about the Debian-astro-commits mailing list