[python-astropy] 01/03: New upstream version 1.0.2
Ole Streicher
olebole at moszumanska.debian.org
Thu Apr 16 20:16:17 UTC 2015
This is an automated email from the git hooks/post-receive script.
olebole pushed a commit to branch debian
in repository python-astropy.
commit c6c9bc82ca930d2ad6a8581b802f34f880b790d5
Author: Ole Streicher <olebole at debian.org>
Date: Thu Apr 16 22:12:38 2015 +0200
New upstream version 1.0.2
---
CHANGES.rst | 219 +++----
PKG-INFO | 4 +-
ah_bootstrap.py | 26 +-
astropy/_erfa/core.c | 656 +++++++++++++++++++++
astropy/_erfa/core.c.templ | 4 +
astropy/astropy.cfg | 3 +
astropy/config/configuration.py | 8 +-
.../coordinates/tests/test_angular_separation.py | 14 +-
astropy/coordinates/tests/test_api_ape5.py | 34 +-
astropy/coordinates/tests/test_arrays.py | 10 +-
astropy/coordinates/tests/test_earth.py | 41 +-
astropy/coordinates/tests/test_frames.py | 15 +-
astropy/coordinates/tests/test_matching.py | 20 +-
astropy/coordinates/tests/test_sky_coord.py | 39 +-
astropy/coordinates/tests/test_transformations.py | 170 +++---
astropy/cosmology/tests/test_cosmology.py | 644 ++++++++++----------
astropy/io/ascii/core.py | 48 +-
astropy/io/ascii/src/tokenizer.h | 9 +-
astropy/io/fits/column.py | 265 ++++++---
astropy/io/fits/file.py | 121 ++--
astropy/io/fits/fitsrec.py | 265 +++++----
astropy/io/fits/hdu/base.py | 9 +
astropy/io/fits/hdu/compressed.py | 40 +-
astropy/io/fits/hdu/groups.py | 107 ++--
astropy/io/fits/hdu/hdulist.py | 72 +--
astropy/io/fits/hdu/table.py | 197 ++++++-
astropy/io/fits/src/compressionmodule.c | 37 +-
astropy/io/fits/tests/data/tdim.fits | Bin 0 -> 8640 bytes
astropy/io/fits/tests/test_checksum.py | 2 +-
astropy/io/fits/tests/test_image.py | 2 +-
astropy/io/fits/tests/test_table.py | 132 ++++-
astropy/io/fits/util.py | 148 ++++-
astropy/io/votable/tests/data/empty_table.xml | 12 +
astropy/io/votable/tests/table_test.py | 8 +
astropy/io/votable/tree.py | 3 +
astropy/modeling/core.py | 59 +-
astropy/modeling/fitting.py | 3 +-
astropy/modeling/functional_models.py | 6 +-
astropy/modeling/polynomial.py | 70 ++-
astropy/modeling/projections.py | 184 +++++-
astropy/modeling/tests/test_compound.py | 29 +
astropy/modeling/tests/test_polynomial.py | 283 +++++++--
astropy/nddata/mixins/tests/__init__.py | 0
astropy/nddata/mixins/tests/test_ndarithmetic.py | 468 +++++++++++++++
astropy/nddata/mixins/tests/test_ndio.py | 16 +
astropy/stats/funcs.py | 4 +-
astropy/stats/sigma_clipping.py | 2 +-
astropy/table/table.py | 14 +
astropy/table/tests/test_pickle.py | 28 +-
astropy/table/tests/test_table.py | 11 +
astropy/tests/helper.py | 39 +-
astropy/tests/output_checker.py | 4 +-
astropy/tests/pytest_plugins.py | 41 +-
astropy/time/core.py | 7 +-
astropy/time/tests/test_quantity_interaction.py | 15 +
astropy/units/core.py | 4 +-
astropy/units/quantity.py | 37 +-
astropy/units/quantity_helper.py | 131 ++--
astropy/units/si.py | 2 +-
astropy/units/tests/test_equivalencies.py | 13 +-
astropy/units/tests/test_quantity_ufuncs.py | 2 +-
astropy/utils/compat/numpycompat.py | 32 +
astropy/version.py | 10 +-
astropy/vo/samp/__init__.py | 4 +
astropy/vo/samp/client.py | 2 +
astropy/vo/samp/hub.py | 190 +++---
astropy/vo/samp/hub_proxy.py | 3 +
astropy/vo/samp/ssl_utils.py | 109 ++--
astropy/vo/samp/tests/test_standard_profile.py | 2 -
astropy/vo/samp/tests/web_profile_test_helpers.py | 2 +
astropy/wcs/tests/test_wcs.py | 9 +
astropy/wcs/wcs.py | 39 +-
astropy_helpers/CHANGES.rst | 19 +
astropy_helpers/ah_bootstrap.py | 26 +-
astropy_helpers/astropy_helpers.egg-info/PKG-INFO | 4 +-
.../astropy_helpers.egg-info/SOURCES.txt | 20 +-
.../astropy_helpers/commands/build_sphinx.py | 9 +
astropy_helpers/astropy_helpers/setup_helpers.py | 45 +-
astropy_helpers/astropy_helpers/sphinx/conf.py | 3 +
.../sphinx/ext/autodoc_enhancements.py | 17 +-
.../astropy_helpers/sphinx/ext/automodapi.py | 24 +-
.../astropy_helpers/sphinx/ext/automodsumm.py | 33 +-
.../sphinx/ext/tests/test_automodsumm.py | 6 +-
.../astropy_helpers/sphinx/ext/viewcode.py | 5 +-
.../bootstrap-astropy/static/astropy_linkout.svg | 75 +++
.../bootstrap-astropy/static/astropy_logo.ico | Bin 1150 -> 32988 bytes
.../bootstrap-astropy/static/astropy_logo.svg | 87 +++
.../bootstrap-astropy/static/bootstrap-astropy.css | 4 +
astropy_helpers/astropy_helpers/utils.py | 67 +++
astropy_helpers/astropy_helpers/version.py | 10 +-
.../travis/install_graphviz_osx.sh | 4 +-
astropy_helpers/setup.py | 2 +-
docs/_static/astropy_banner.svg | 263 +++++++++
docs/{ => _static}/astropy_banner_96.png | Bin
docs/_static/dev.png | Bin 3177 -> 0 bytes
docs/_static/mature.png | Bin 3189 -> 0 bytes
docs/_static/planned.png | Bin 3116 -> 0 bytes
docs/_static/stable.png | Bin 6439 -> 0 bytes
docs/coordinates/representations.rst | 1 +
docs/coordinates/sgr-example.rst | 2 +-
docs/development/affiliated-packages.rst | 4 +-
docs/index.rst | 19 +-
docs/install.rst | 4 +
docs/io/votable/index.rst | 9 +
docs/known_issues.rst | 318 ++++++----
docs/modeling/parameters.rst | 8 +-
docs/nitpick-exceptions | 1 +
docs/stability.rst | 81 +--
docs/table/construct_table.rst | 21 +-
docs/time/index.rst | 50 +-
docs/units/equivalencies.rst | 58 +-
docs/units/index.rst | 28 +-
docs/units/quantity.rst | 72 +--
docs/warnings.rst | 5 +-
setup.py | 2 +-
115 files changed, 4879 insertions(+), 1785 deletions(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index 629c5f5..36998bd 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,164 +1,133 @@
-1.0.1 (2015-03-06)
+1.0.2 (2015-04-16)
------------------
New Features
^^^^^^^^^^^^
-- ``astropy.config``
-
-- ``astropy.constants``
-
-- ``astropy.convolution``
-
-- ``astropy.coordinates``
-
-- ``astropy.cosmology``
-
-- ``astropy.io.ascii``
-
-- ``astropy.io.fits``
-
-- ``astropy.io.misc``
-
-- ``astropy.io.registry``
-
-- ``astropy.io.votable``
-
- ``astropy.modeling``
-- ``astropy.nddata``
-
-- ``astropy.stats``
-
-- ``astropy.sphinx``
-
-- ``astropy.table``
-
-- ``astropy.time``
+ - Added support for polynomials with degree 0 or degree greater than 15.
+ [#3574, 3589]
-- ``astropy.units``
-
-- ``astropy.utils``
-
-- ``astropy.vo``
-
-- ``astropy.wcs``
-
-API Changes
-^^^^^^^^^^^
+Bug Fixes
+^^^^^^^^^
- ``astropy.config``
-- ``astropy.constants``
-
-- ``astropy.convolution``
-
-- ``astropy.coordinates``
-
-- ``astropy.cosmology``
-
-- ``astropy.io.ascii``
+ - The pre-astropy-0.4 configuration API has been fixed. It was
+ inadvertently broken in 1.0.1. [#3627]
- ``astropy.io.fits``
-- ``astropy.io.misc``
-
-- ``astropy.io.registry``
-
-- ``astropy.io.votable``
+ - Fixed handling of BINTABLE with TDIMn of size 1. [#3580]
+ - Fixed a severe memory leak that occurred when reading tile compressed
+ images. [#3680]
-- ``astropy.modeling``
+ - Fixed bug where column data could be unintentionally byte-swapped when
+ copying data from an existing FITS file to a new FITS table with a
+ TDIMn keyword for that column. [#3561]
-- ``astropy.nddata``
+ - The ``ColDefs.change_attrib``, ``ColDefs.change_name``, and
+ ``ColDefs.change_unit`` methods now work as advertised. It is also
+ possible (and preferable) to update attributes directly on ``Column``
+ objects (for example setting ``column.name``), and the change will be
+ accurately reflected in any associated table data and its FITS header.
+ [#3283, #1539, #2618]
-- ``astropy.stats``
+ - Fixes an issue with the ``FITS_rec`` interface to FITS table data, where a
+ ``FITS_rec`` created by copying an existing FITS table but adding new rows
+ could not be sliced or masked correctly. [#3641]
-- ``astropy.table``
-
-- ``astropy.time``
-
-- ``astropy.units``
+- ``astropy.io.votable``
-- ``astropy.utils``
+ - Loading a ``TABLE`` element without any ``DATA`` now correctly
+ creates a 0-row array. [#3636]
-- ``astropy.vo``
-
-- ``astropy.wcs``
+- ``astropy.modeling``
-Bug Fixes
-^^^^^^^^^
+ - Added workaround to support inverses on compound models when one of the
+ sub-models is itself a compound model with a manually-assigned custom
+ inverse. [#3542]
-- ``astropy.config``
+ - Fixed instantiation of polynomial models with constraints for parameters
+ (constraints could still be assigned after instantiation, but not during).
+ [#3606]
-- ``astropy.constants``
+ - Fixed fitting of 2D polynomial models with the ``LeVMarLSQFitter``. [#3606]
-- ``astropy.convolution``
+- ``astropy.table``
-- ``astropy.coordinates``
+ - Ensure ``QTable`` can be pickled [#3590]
-- ``astropy.cosmology``
+ - Some corner cases when instantiating an ``astropy.table.Table``
+ with a Numpy array are handled [#3637]. Notably:
-- ``astropy.io.ascii``
+ - a zero-length array is the same as passing ``None``
+ - a scalar raises a ``ValueError``
+ - a one-dimensional array is treated as a single row of a table.
- - Fix a segfault in the fast C parser when one of the column headers
- is empty [#3545].
+- ``astropy.time``
-- ``astropy.io.fits``
+ - Ensure a ``Column`` without units is treated as an ``array``, not as an
+ - Ensure a ``Column`` without units is treated as an ``array``, not as an
+ dimensionless ``Quantity``. [#3648]
-- ``astropy.io.misc``
+- ``astropy.units``
-- ``astropy.io.registry``
+ - Ensure equivalencies that do more than just scale a ``Quantity`` are
+ properly handled also in ``ufunc`` evaluations. [#2496, #3586]
-- ``astropy.io.votable``
+ - The LaTeX representation of the Angstrom unit has changed from
+ ``\overset{\circ}{A}`` to ``\mathring{A}``, which should have
+ better support across regular LaTeX, MathJax and matplotlib (as of
+ version 1.5) [#3617]
-- ``astropy.modeling``
+- ``astropy.vo``
-- ``astropy.nddata``
+ - Using HTTPS/SSL for communication between SAMP hubs now works
+ correctly on all supported versions of Python [#3613]
-- ``astropy.stats``
+- ``astropy.wcs``
-- ``astropy.table``
+ - When no ``relax`` argument is passed to ``WCS.to_header()`` and
+ the result omits non-standard WCS keywords, a warning is
+ emitted. [#3652]
-- ``astropy.time``
+Other Changes and Additions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+- Nothing changed yet.
- ``astropy.units``
-- ``astropy.utils``
+ - Clarified imperial mass measurements and added pound force (lbf),
+ kilopound (kip), and pound per square inch (psi). [#3409]
- ``astropy.vo``
-- ``astropy.wcs``
+ - The number of retries for connections in ``astropy.vo.samp`` can now be
+ configured by a ``n_retries`` configuration option. [#3612]
-Other Changes and Additions
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
+- Testing
+
+ - Running ``astropy.test()`` from within the IPython prompt has been
+ provisionally re-enabled. [#3184]
-- Updated bundled astropy-helpers version to v1.0.1 to address installation
- issues with some packages that depend on Astropy. [#3541]
-1.0.1 (unreleased)
+1.0.1 (2015-03-06)
------------------
-Bug fixes
+Bug Fixes
^^^^^^^^^
-- ``astropy.analytic_functions``
-
-- ``astropy.config``
-
-- ``astropy.conftest.py``
-
- ``astropy.constants``
- Ensure constants can be turned into ``Quantity`` safely. [#3537, #3538]
-
-- ``astropy.convolution``
-
-- ``astropy.coordinates``
-
-- ``astropy.cosmology``
- ``astropy.io.ascii``
+ - Fix a segfault in the fast C parser when one of the column headers
+ is empty [#3545].
+
- Fixed support for reading inf and nan values with the fast reader in
Windows. Also fixed in the case of using ``use_fast_converter=True``
with the fast reader. [#3525]
@@ -169,14 +138,6 @@ Bug fixes
(i.e. column headers) as purely information comments, leading to problems
when trying to round-trip the table. [#3562]
-- ``astropy.io.fits``
-
-- ``astropy.io.misc``
-
-- ``astropy.io.votable``
-
-- ``astropy.logger.py``
-
- ``astropy.modeling``
- Fixed propagation of parameter constraints ('fixed', 'bounds', 'tied')
@@ -189,26 +150,10 @@ Bug fixes
- Restore several properties to the compatibility class ``NDDataArray`` that
were inadvertently omitted [#3466].
-- ``astropy.stats``
-
-- ``astropy.table``
-
-- ``astropy.tests``
-
- ``astropy.time``
- Time objects now always evalutate to ``True``, except when empty. [#3530]
-- ``astropy.units``
-
-- ``astropy.utils``
-
-- ``astropy.visualization``
-
-- ``astropy.vo``
-
-- ``astropy.wcs``
-
Miscellaneous
^^^^^^^^^^^^^
@@ -220,19 +165,11 @@ Miscellaneous
- Improve import time of astropy [#3488].
-1.0.1 (unreleased)
-------------------
-
-Bug Fixes
-^^^^^^^^^
-
-- ``astropy.io.ascii``
-
- - Fixed support for reading inf and nan values with the fast reader in
- Windows. Also fixed in the case of using ``use_fast_converter=True``
- with the fast reader. [#3525]
+Other Changes and Additions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
- - Fixed use of mmap in the fast reader on Windows. [#3525]
+- Updated bundled astropy-helpers version to v1.0.1 to address installation
+ issues with some packages that depend on Astropy. [#3541]
1.0 (2015-02-18)
diff --git a/PKG-INFO b/PKG-INFO
index 9601cb7..e41a408 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,12 +1,12 @@
Metadata-Version: 1.1
Name: astropy
-Version: 1.0.1
+Version: 1.0.2
Summary: Community-developed python astronomy tools
Home-page: http://astropy.org
Author: The Astropy Developers
Author-email: astropy.team at gmail.com
License: BSD
-Download-URL: http://pypi.python.org/packages/source/a/astropy/astropy-1.0.1.tar.gz
+Download-URL: http://pypi.python.org/packages/source/a/astropy/astropy-1.0.2.tar.gz
Description:
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
diff --git a/ah_bootstrap.py b/ah_bootstrap.py
index e9521ea..7e145e3 100644
--- a/ah_bootstrap.py
+++ b/ah_bootstrap.py
@@ -282,6 +282,19 @@ class _Bootstrapper(object):
strategies = ['local_directory', 'local_file', 'index']
dist = None
+ # First, remove any previously imported versions of astropy_helpers;
+ # this is necessary for nested installs where one package's installer
+ # is installing another package via setuptools.sandbox.run_setup, as in
+ # the case of setup_requires
+ for key in list(sys.modules):
+ try:
+ if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
+ del sys.modules[key]
+ except AttributeError:
+ # Sometimes mysterious non-string things can turn up in
+ # sys.modules
+ continue
+
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
@@ -311,19 +324,6 @@ class _Bootstrapper(object):
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
- # But first, remove any previously imported versions of
- # astropy_helpers; this is necessary for nested installs where one
- # package's installer is installing another package via
- # setuptools.sandbox.run_set, as in the case of setup_requires
- for key in list(sys.modules):
- try:
- if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
- del sys.modules[key]
- except AttributeError:
- # Sometimes mysterious non-string things can turn up in
- # sys.modules
- continue
-
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
diff --git a/astropy/_erfa/core.c b/astropy/_erfa/core.c
index 815f028..3d9e2aa 100644
--- a/astropy/_erfa/core.c
+++ b/astropy/_erfa/core.c
@@ -47,6 +47,8 @@ static PyObject *Py_gd2gce(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_a = ((double *)(dataptrarray[0]))[0];
_f = ((double *)(dataptrarray[1]))[0];
@@ -61,6 +63,8 @@ static PyObject *Py_gd2gce(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -76,12 +80,16 @@ static PyObject *Py_fal03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFal03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -98,6 +106,8 @@ static PyObject *Py_ttut1(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tt1 = ((double *)(dataptrarray[0]))[0];
_tt2 = ((double *)(dataptrarray[1]))[0];
@@ -111,6 +121,8 @@ static PyObject *Py_ttut1(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -127,6 +139,8 @@ static PyObject *Py_ee06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -134,6 +148,8 @@ static PyObject *Py_ee06a(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEe06a(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -151,6 +167,8 @@ static PyObject *Py_jd2cal(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_dj1 = ((double *)(dataptrarray[0]))[0];
_dj2 = ((double *)(dataptrarray[1]))[0];
@@ -165,6 +183,8 @@ static PyObject *Py_jd2cal(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -182,6 +202,8 @@ static PyObject *Py_xy06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -190,6 +212,8 @@ static PyObject *Py_xy06(PyObject *self, PyObject *args, PyObject *kwds)
eraXy06(_date1, _date2, _x, _y);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -202,6 +226,8 @@ static PyObject *Py_epj2jd(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_epj = ((double *)(dataptrarray[0]))[0];
_djm0 = ((double *)(dataptrarray[1]));
@@ -209,6 +235,8 @@ static PyObject *Py_epj2jd(PyObject *self, PyObject *args, PyObject *kwds)
eraEpj2jd(_epj, _djm0, _djm);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -223,6 +251,8 @@ static PyObject *Py_ldn(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_n = ((int *)(dataptrarray[0]))[0];
_b = ((eraLDBODY *)(dataptrarray[1]));
@@ -232,6 +262,8 @@ static PyObject *Py_ldn(PyObject *self, PyObject *args, PyObject *kwds)
eraLdn(_n, _b, _ob, _sc, _sn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -257,6 +289,8 @@ static PyObject *Py_apco13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_utc1 = ((double *)(dataptrarray[0]))[0];
_utc2 = ((double *)(dataptrarray[1]))[0];
@@ -279,6 +313,8 @@ static PyObject *Py_apco13(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -299,6 +335,8 @@ static PyObject *Py_gd2gc(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_n = ((int *)(dataptrarray[0]))[0];
_elong = ((double *)(dataptrarray[1]))[0];
@@ -312,6 +350,8 @@ static PyObject *Py_gd2gc(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -331,6 +371,8 @@ static PyObject *Py_tttai(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tt1 = ((double *)(dataptrarray[0]))[0];
_tt2 = ((double *)(dataptrarray[1]))[0];
@@ -343,6 +385,8 @@ static PyObject *Py_tttai(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -365,6 +409,8 @@ static PyObject *Py_pvstar(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_pv = ((double *)(dataptrarray[0]));
_ra = ((double *)(dataptrarray[1]));
@@ -380,6 +426,8 @@ static PyObject *Py_pvstar(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -412,6 +460,8 @@ static PyObject *Py_atoi13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_type = ((const char *)(dataptrarray[0]));
_ob1 = ((double *)(dataptrarray[1]))[0];
@@ -437,6 +487,8 @@ static PyObject *Py_atoi13(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -457,6 +509,8 @@ static PyObject *Py_ut1utc(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ut11 = ((double *)(dataptrarray[0]))[0];
_ut12 = ((double *)(dataptrarray[1]))[0];
@@ -470,6 +524,8 @@ static PyObject *Py_ut1utc(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -486,6 +542,8 @@ static PyObject *Py_c2i00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -493,6 +551,8 @@ static PyObject *Py_c2i00b(PyObject *self, PyObject *args, PyObject *kwds)
eraC2i00b(_date1, _date2, _rc2i);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -505,6 +565,8 @@ static PyObject *Py_c2i00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -512,6 +574,8 @@ static PyObject *Py_c2i00a(PyObject *self, PyObject *args, PyObject *kwds)
eraC2i00a(_date1, _date2, _rc2i);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -525,6 +589,8 @@ static PyObject *Py_c2ibpn(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -533,6 +599,8 @@ static PyObject *Py_c2ibpn(PyObject *self, PyObject *args, PyObject *kwds)
eraC2ibpn(_date1, _date2, _rbpn, _rc2i);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -545,6 +613,8 @@ static PyObject *Py_apcg13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -552,6 +622,8 @@ static PyObject *Py_apcg13(PyObject *self, PyObject *args, PyObject *kwds)
eraApcg13(_date1, _date2, _astrom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -567,6 +639,8 @@ static PyObject *Py_gst06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_uta = ((double *)(dataptrarray[0]))[0];
_utb = ((double *)(dataptrarray[1]))[0];
@@ -577,6 +651,8 @@ static PyObject *Py_gst06(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraGst06(_uta, _utb, _tta, _ttb, _rnpb);
*((double *)(dataptrarray[5])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -590,6 +666,8 @@ static PyObject *Py_c2tcio(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rc2i = ((double *)(dataptrarray[0]));
_era = ((double *)(dataptrarray[1]))[0];
@@ -598,6 +676,8 @@ static PyObject *Py_c2tcio(PyObject *self, PyObject *args, PyObject *kwds)
eraC2tcio(_rc2i, _era, _rpom, _rc2t);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -610,6 +690,8 @@ static PyObject *Py_eo06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -617,6 +699,8 @@ static PyObject *Py_eo06a(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEo06a(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -629,6 +713,8 @@ static PyObject *Py_pmat76(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -636,6 +722,8 @@ static PyObject *Py_pmat76(PyObject *self, PyObject *args, PyObject *kwds)
eraPmat76(_date1, _date2, _rmatp);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -647,12 +735,16 @@ static PyObject *Py_fasa03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFasa03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -669,6 +761,8 @@ static PyObject *Py_atic13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ri = ((double *)(dataptrarray[0]))[0];
_di = ((double *)(dataptrarray[1]))[0];
@@ -680,6 +774,8 @@ static PyObject *Py_atic13(PyObject *self, PyObject *args, PyObject *kwds)
eraAtic13(_ri, _di, _date1, _date2, _rc, _dc, _eo);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -707,6 +803,8 @@ static PyObject *Py_pmsafe(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ra1 = ((double *)(dataptrarray[0]))[0];
_dec1 = ((double *)(dataptrarray[1]))[0];
@@ -731,6 +829,8 @@ static PyObject *Py_pmsafe(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -753,6 +853,8 @@ static PyObject *Py_starpv(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ra = ((double *)(dataptrarray[0]))[0];
_dec = ((double *)(dataptrarray[1]))[0];
@@ -768,6 +870,8 @@ static PyObject *Py_starpv(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -793,6 +897,8 @@ static PyObject *Py_h2fk5(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rh = ((double *)(dataptrarray[0]))[0];
_dh = ((double *)(dataptrarray[1]))[0];
@@ -809,6 +915,8 @@ static PyObject *Py_h2fk5(PyObject *self, PyObject *args, PyObject *kwds)
eraH2fk5(_rh, _dh, _drh, _ddh, _pxh, _rvh, _r5, _d5, _dr5, _dd5, _px5, _rv5);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -821,6 +929,8 @@ static PyObject *Py_gmst82(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_dj1 = ((double *)(dataptrarray[0]))[0];
_dj2 = ((double *)(dataptrarray[1]))[0];
@@ -828,6 +938,8 @@ static PyObject *Py_gmst82(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraGmst82(_dj1, _dj2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -844,6 +956,8 @@ static PyObject *Py_prec76(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date01 = ((double *)(dataptrarray[0]))[0];
_date02 = ((double *)(dataptrarray[1]))[0];
@@ -855,6 +969,8 @@ static PyObject *Py_prec76(PyObject *self, PyObject *args, PyObject *kwds)
eraPrec76(_date01, _date02, _date11, _date12, _zeta, _z, _theta);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -867,6 +983,8 @@ static PyObject *Py_nutm80(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -874,6 +992,8 @@ static PyObject *Py_nutm80(PyObject *self, PyObject *args, PyObject *kwds)
eraNutm80(_date1, _date2, _rmatn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -887,6 +1007,8 @@ static PyObject *Py_numat(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_epsa = ((double *)(dataptrarray[0]))[0];
_dpsi = ((double *)(dataptrarray[1]))[0];
@@ -895,6 +1017,8 @@ static PyObject *Py_numat(PyObject *self, PyObject *args, PyObject *kwds)
eraNumat(_epsa, _dpsi, _deps, _rmatn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -906,12 +1030,16 @@ static PyObject *Py_faom03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFaom03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -926,6 +1054,8 @@ static PyObject *Py_xys00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -935,6 +1065,8 @@ static PyObject *Py_xys00b(PyObject *self, PyObject *args, PyObject *kwds)
eraXys00b(_date1, _date2, _x, _y, _s);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -949,6 +1081,8 @@ static PyObject *Py_xys00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -958,6 +1092,8 @@ static PyObject *Py_xys00a(PyObject *self, PyObject *args, PyObject *kwds)
eraXys00a(_date1, _date2, _x, _y, _s);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -970,6 +1106,8 @@ static PyObject *Py_eect00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -977,6 +1115,8 @@ static PyObject *Py_eect00(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEect00(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1001,6 +1141,8 @@ static PyObject *Py_apio13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_utc1 = ((double *)(dataptrarray[0]))[0];
_utc2 = ((double *)(dataptrarray[1]))[0];
@@ -1022,6 +1164,8 @@ static PyObject *Py_apio13(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -1042,6 +1186,8 @@ static PyObject *Py_utcut1(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_utc1 = ((double *)(dataptrarray[0]))[0];
_utc2 = ((double *)(dataptrarray[1]))[0];
@@ -1055,6 +1201,8 @@ static PyObject *Py_utcut1(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -1087,6 +1235,8 @@ static PyObject *Py_atoc13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_type = ((const char *)(dataptrarray[0]));
_ob1 = ((double *)(dataptrarray[1]))[0];
@@ -1112,6 +1262,8 @@ static PyObject *Py_atoc13(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -1128,6 +1280,8 @@ static PyObject *Py_pmat06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1135,6 +1289,8 @@ static PyObject *Py_pmat06(PyObject *self, PyObject *args, PyObject *kwds)
eraPmat06(_date1, _date2, _rbp);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1150,6 +1306,8 @@ static PyObject *Py_apcs(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1160,6 +1318,8 @@ static PyObject *Py_apcs(PyObject *self, PyObject *args, PyObject *kwds)
eraApcs(_date1, _date2, _pv, _ebpv, _ehp, _astrom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1172,6 +1332,8 @@ static PyObject *Py_pmat00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1179,6 +1341,8 @@ static PyObject *Py_pmat00(PyObject *self, PyObject *args, PyObject *kwds)
eraPmat00(_date1, _date2, _rbp);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1194,6 +1358,8 @@ static PyObject *Py_plan94(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1206,6 +1372,8 @@ static PyObject *Py_plan94(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -1236,6 +1404,8 @@ static PyObject *Py_apco(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1257,6 +1427,8 @@ static PyObject *Py_apco(PyObject *self, PyObject *args, PyObject *kwds)
eraApco(_date1, _date2, _ebpv, _ehp, _x, _y, _s, _theta, _elong, _phi, _hm, _xp, _yp, _sp, _refa, _refb, _astrom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1274,6 +1446,8 @@ static PyObject *Py_pvtob(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_elong = ((double *)(dataptrarray[0]))[0];
_phi = ((double *)(dataptrarray[1]))[0];
@@ -1286,6 +1460,8 @@ static PyObject *Py_pvtob(PyObject *self, PyObject *args, PyObject *kwds)
eraPvtob(_elong, _phi, _hm, _xp, _yp, _sp, _theta, _pv);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1303,6 +1479,8 @@ static PyObject *Py_apci(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1315,6 +1493,8 @@ static PyObject *Py_apci(PyObject *self, PyObject *args, PyObject *kwds)
eraApci(_date1, _date2, _ebpv, _ehp, _x, _y, _s, _astrom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1329,6 +1509,8 @@ static PyObject *Py_apcg(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1338,6 +1520,8 @@ static PyObject *Py_apcg(PyObject *self, PyObject *args, PyObject *kwds)
eraApcg(_date1, _date2, _ebpv, _ehp, _astrom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1350,6 +1534,8 @@ static PyObject *Py_num00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1357,6 +1543,8 @@ static PyObject *Py_num00b(PyObject *self, PyObject *args, PyObject *kwds)
eraNum00b(_date1, _date2, _rmatn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1369,6 +1557,8 @@ static PyObject *Py_num00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1376,6 +1566,8 @@ static PyObject *Py_num00a(PyObject *self, PyObject *args, PyObject *kwds)
eraNum00a(_date1, _date2, _rmatn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1391,6 +1583,8 @@ static PyObject *Py_utctai(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_utc1 = ((double *)(dataptrarray[0]))[0];
_utc2 = ((double *)(dataptrarray[1]))[0];
@@ -1403,6 +1597,8 @@ static PyObject *Py_utctai(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -1418,12 +1614,16 @@ static PyObject *Py_faju03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFaju03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1436,6 +1636,8 @@ static PyObject *Py_s00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1443,6 +1645,8 @@ static PyObject *Py_s00a(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraS00a(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1455,6 +1659,8 @@ static PyObject *Py_s00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1462,6 +1668,8 @@ static PyObject *Py_s00b(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraS00b(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1482,6 +1690,8 @@ static PyObject *Py_atci13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rc = ((double *)(dataptrarray[0]))[0];
_dc = ((double *)(dataptrarray[1]))[0];
@@ -1497,6 +1707,8 @@ static PyObject *Py_atci13(PyObject *self, PyObject *args, PyObject *kwds)
eraAtci13(_rc, _dc, _pr, _pd, _px, _rv, _date1, _date2, _ri, _di, _eo);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1511,6 +1723,8 @@ static PyObject *Py_bp06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1520,6 +1734,8 @@ static PyObject *Py_bp06(PyObject *self, PyObject *args, PyObject *kwds)
eraBp06(_date1, _date2, _rb, _rp, _rbp);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1531,12 +1747,16 @@ static PyObject *Py_falp03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFalp03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1549,6 +1769,8 @@ static PyObject *Py_bi00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_dpsibi = ((double *)(dataptrarray[0]));
_depsbi = ((double *)(dataptrarray[1]));
@@ -1556,6 +1778,8 @@ static PyObject *Py_bi00(PyObject *self, PyObject *args, PyObject *kwds)
eraBi00(_dpsibi, _depsbi, _dra);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1570,6 +1794,8 @@ static PyObject *Py_bp00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1579,6 +1805,8 @@ static PyObject *Py_bp00(PyObject *self, PyObject *args, PyObject *kwds)
eraBp00(_date1, _date2, _rb, _rp, _rbp);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1591,6 +1819,8 @@ static PyObject *Py_ee00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1598,6 +1828,8 @@ static PyObject *Py_ee00a(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEe00a(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1610,6 +1842,8 @@ static PyObject *Py_ee00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1617,6 +1851,8 @@ static PyObject *Py_ee00b(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEe00b(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1629,6 +1865,8 @@ static PyObject *Py_sp00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1636,6 +1874,8 @@ static PyObject *Py_sp00(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraSp00(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1651,6 +1891,8 @@ static PyObject *Py_refco(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_phpa = ((double *)(dataptrarray[0]))[0];
_tc = ((double *)(dataptrarray[1]))[0];
@@ -1661,6 +1903,8 @@ static PyObject *Py_refco(PyObject *self, PyObject *args, PyObject *kwds)
eraRefco(_phpa, _tc, _rh, _wl, _refa, _refb);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1678,6 +1922,8 @@ static PyObject *Py_atioq(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ri = ((double *)(dataptrarray[0]))[0];
_di = ((double *)(dataptrarray[1]))[0];
@@ -1690,6 +1936,8 @@ static PyObject *Py_atioq(PyObject *self, PyObject *args, PyObject *kwds)
eraAtioq(_ri, _di, _astrom, _aob, _zob, _hob, _dob, _rob);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1706,6 +1954,8 @@ static PyObject *Py_cal2jd(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_iy = ((int *)(dataptrarray[0]))[0];
_im = ((int *)(dataptrarray[1]))[0];
@@ -1719,6 +1969,8 @@ static PyObject *Py_cal2jd(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -1736,6 +1988,8 @@ static PyObject *Py_apcs13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1744,6 +1998,8 @@ static PyObject *Py_apcs13(PyObject *self, PyObject *args, PyObject *kwds)
eraApcs13(_date1, _date2, _pv, _astrom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1758,6 +2014,8 @@ static PyObject *Py_c2ixy(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1767,6 +2025,8 @@ static PyObject *Py_c2ixy(PyObject *self, PyObject *args, PyObject *kwds)
eraC2ixy(_date1, _date2, _x, _y, _rc2i);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1781,6 +2041,8 @@ static PyObject *Py_ab(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_pnat = ((double *)(dataptrarray[0]));
_v = ((double *)(dataptrarray[1]));
@@ -1790,6 +2052,8 @@ static PyObject *Py_ab(PyObject *self, PyObject *args, PyObject *kwds)
eraAb(_pnat, _v, _s, _bm1, _ppr);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1804,6 +2068,8 @@ static PyObject *Py_s06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1813,6 +2079,8 @@ static PyObject *Py_s06(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraS06(_date1, _date2, _x, _y);
*((double *)(dataptrarray[4])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1827,6 +2095,8 @@ static PyObject *Py_s00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1836,6 +2106,8 @@ static PyObject *Py_s00(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraS00(_date1, _date2, _x, _y);
*((double *)(dataptrarray[4])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1849,6 +2121,8 @@ static PyObject *Py_pr00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1857,6 +2131,8 @@ static PyObject *Py_pr00(PyObject *self, PyObject *args, PyObject *kwds)
eraPr00(_date1, _date2, _dpsipr, _depspr);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1877,6 +2153,8 @@ static PyObject *Py_dtf2d(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_scale = ((const char *)(dataptrarray[0]));
_iy = ((int *)(dataptrarray[1]))[0];
@@ -1894,6 +2172,8 @@ static PyObject *Py_dtf2d(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -1912,6 +2192,8 @@ static PyObject *Py_xys06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -1921,6 +2203,8 @@ static PyObject *Py_xys06a(PyObject *self, PyObject *args, PyObject *kwds)
eraXys06a(_date1, _date2, _x, _y, _s);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1933,6 +2217,8 @@ static PyObject *Py_era00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_dj1 = ((double *)(dataptrarray[0]))[0];
_dj2 = ((double *)(dataptrarray[1]))[0];
@@ -1940,6 +2226,8 @@ static PyObject *Py_era00(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEra00(_dj1, _dj2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -1967,6 +2255,8 @@ static PyObject *Py_starpm(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ra1 = ((double *)(dataptrarray[0]))[0];
_dec1 = ((double *)(dataptrarray[1]))[0];
@@ -1991,6 +2281,8 @@ static PyObject *Py_starpm(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -2011,6 +2303,8 @@ static PyObject *Py_ld(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_bm = ((double *)(dataptrarray[0]))[0];
_p = ((double *)(dataptrarray[1]));
@@ -2022,6 +2316,8 @@ static PyObject *Py_ld(PyObject *self, PyObject *args, PyObject *kwds)
eraLd(_bm, _p, _q, _e, _em, _dlim, _p1);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2033,12 +2329,16 @@ static PyObject *Py_fave03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFave03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2051,6 +2351,8 @@ static PyObject *Py_aper13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ut11 = ((double *)(dataptrarray[0]))[0];
_ut12 = ((double *)(dataptrarray[1]))[0];
@@ -2058,6 +2360,8 @@ static PyObject *Py_aper13(PyObject *self, PyObject *args, PyObject *kwds)
eraAper13(_ut11, _ut12, _astrom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2077,6 +2381,8 @@ static PyObject *Py_pn06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2091,6 +2397,8 @@ static PyObject *Py_pn06a(PyObject *self, PyObject *args, PyObject *kwds)
eraPn06a(_date1, _date2, _dpsi, _deps, _epsa, _rb, _rp, _rbp, _rn, _rbpn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2105,6 +2413,8 @@ static PyObject *Py_ee00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2114,6 +2424,8 @@ static PyObject *Py_ee00(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEe00(_date1, _date2, _epsa, _dpsi);
*((double *)(dataptrarray[4])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2128,6 +2440,8 @@ static PyObject *Py_gst06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_uta = ((double *)(dataptrarray[0]))[0];
_utb = ((double *)(dataptrarray[1]))[0];
@@ -2137,6 +2451,8 @@ static PyObject *Py_gst06a(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraGst06a(_uta, _utb, _tta, _ttb);
*((double *)(dataptrarray[4])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2155,6 +2471,8 @@ static PyObject *Py_pmpx(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rc = ((double *)(dataptrarray[0]))[0];
_dc = ((double *)(dataptrarray[1]))[0];
@@ -2168,6 +2486,8 @@ static PyObject *Py_pmpx(PyObject *self, PyObject *args, PyObject *kwds)
eraPmpx(_rc, _dc, _pr, _pd, _px, _rv, _pmt, _pob, _pco);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2179,12 +2499,16 @@ static PyObject *Py_fk5hip(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_r5h = ((double *)(dataptrarray[0]));
_s5h = ((double *)(dataptrarray[1]));
eraFk5hip(_r5h, _s5h);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2197,6 +2521,8 @@ static PyObject *Py_obl06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2204,6 +2530,8 @@ static PyObject *Py_obl06(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraObl06(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2216,6 +2544,8 @@ static PyObject *Py_eors(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rnpb = ((double *)(dataptrarray[0]));
_s = ((double *)(dataptrarray[1]))[0];
@@ -2223,6 +2553,8 @@ static PyObject *Py_eors(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEors(_rnpb, _s);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2239,6 +2571,8 @@ static PyObject *Py_gc2gd(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_n = ((int *)(dataptrarray[0]))[0];
_xyz = ((double *)(dataptrarray[1]));
@@ -2252,6 +2586,8 @@ static PyObject *Py_gc2gd(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -2272,6 +2608,8 @@ static PyObject *Py_dtdb(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2283,6 +2621,8 @@ static PyObject *Py_dtdb(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraDtdb(_date1, _date2, _ut, _elong, _u, _v);
*((double *)(dataptrarray[6])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2295,6 +2635,8 @@ static PyObject *Py_pnm06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2302,6 +2644,8 @@ static PyObject *Py_pnm06a(PyObject *self, PyObject *args, PyObject *kwds)
eraPnm06a(_date1, _date2, _rnpb);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2321,6 +2665,8 @@ static PyObject *Py_apio(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_sp = ((double *)(dataptrarray[0]))[0];
_theta = ((double *)(dataptrarray[1]))[0];
@@ -2335,6 +2681,8 @@ static PyObject *Py_apio(PyObject *self, PyObject *args, PyObject *kwds)
eraApio(_sp, _theta, _elong, _phi, _hm, _xp, _yp, _refa, _refb, _astrom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2365,6 +2713,8 @@ static PyObject *Py_atio13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ri = ((double *)(dataptrarray[0]))[0];
_di = ((double *)(dataptrarray[1]))[0];
@@ -2392,6 +2742,8 @@ static PyObject *Py_atio13(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -2409,6 +2761,8 @@ static PyObject *Py_c2teqx(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rbpn = ((double *)(dataptrarray[0]));
_gst = ((double *)(dataptrarray[1]))[0];
@@ -2417,6 +2771,8 @@ static PyObject *Py_c2teqx(PyObject *self, PyObject *args, PyObject *kwds)
eraC2teqx(_rbpn, _gst, _rpom, _rc2t);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2430,6 +2786,8 @@ static PyObject *Py_nut00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2438,6 +2796,8 @@ static PyObject *Py_nut00a(PyObject *self, PyObject *args, PyObject *kwds)
eraNut00a(_date1, _date2, _dpsi, _deps);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2451,6 +2811,8 @@ static PyObject *Py_nut00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2459,6 +2821,8 @@ static PyObject *Py_nut00b(PyObject *self, PyObject *args, PyObject *kwds)
eraNut00b(_date1, _date2, _dpsi, _deps);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2470,12 +2834,16 @@ static PyObject *Py_fae03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFae03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2495,6 +2863,8 @@ static PyObject *Py_pn00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2509,6 +2879,8 @@ static PyObject *Py_pn00b(PyObject *self, PyObject *args, PyObject *kwds)
eraPn00b(_date1, _date2, _dpsi, _deps, _epsa, _rb, _rp, _rbp, _rn, _rbpn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2522,6 +2894,8 @@ static PyObject *Py_nut80(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2530,6 +2904,8 @@ static PyObject *Py_nut80(PyObject *self, PyObject *args, PyObject *kwds)
eraNut80(_date1, _date2, _dpsi, _deps);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2541,12 +2917,16 @@ static PyObject *Py_fane03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFane03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2564,6 +2944,8 @@ static PyObject *Py_hfk5z(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rh = ((double *)(dataptrarray[0]))[0];
_dh = ((double *)(dataptrarray[1]))[0];
@@ -2576,6 +2958,8 @@ static PyObject *Py_hfk5z(PyObject *self, PyObject *args, PyObject *kwds)
eraHfk5z(_rh, _dh, _date1, _date2, _r5, _d5, _dr5, _dd5);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2591,6 +2975,8 @@ static PyObject *Py_epv00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2603,6 +2989,8 @@ static PyObject *Py_epv00(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -2621,6 +3009,8 @@ static PyObject *Py_gmst00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_uta = ((double *)(dataptrarray[0]))[0];
_utb = ((double *)(dataptrarray[1]))[0];
@@ -2630,6 +3020,8 @@ static PyObject *Py_gmst00(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraGmst00(_uta, _utb, _tta, _ttb);
*((double *)(dataptrarray[4])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2643,6 +3035,8 @@ static PyObject *Py_c2ixys(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_x = ((double *)(dataptrarray[0]))[0];
_y = ((double *)(dataptrarray[1]))[0];
@@ -2651,6 +3045,8 @@ static PyObject *Py_c2ixys(PyObject *self, PyObject *args, PyObject *kwds)
eraC2ixys(_x, _y, _s, _rc2i);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2665,6 +3061,8 @@ static PyObject *Py_gmst06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_uta = ((double *)(dataptrarray[0]))[0];
_utb = ((double *)(dataptrarray[1]))[0];
@@ -2674,6 +3072,8 @@ static PyObject *Py_gmst06(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraGmst06(_uta, _utb, _tta, _ttb);
*((double *)(dataptrarray[4])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2694,6 +3094,8 @@ static PyObject *Py_atciqn(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rc = ((double *)(dataptrarray[0]))[0];
_dc = ((double *)(dataptrarray[1]))[0];
@@ -2709,6 +3111,8 @@ static PyObject *Py_atciqn(PyObject *self, PyObject *args, PyObject *kwds)
eraAtciqn(_rc, _dc, _pr, _pd, _px, _rv, _astrom, _n, _b, _ri, _di);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2721,6 +3125,8 @@ static PyObject *Py_epb2jd(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_epb = ((double *)(dataptrarray[0]))[0];
_djm0 = ((double *)(dataptrarray[1]));
@@ -2728,6 +3134,8 @@ static PyObject *Py_epb2jd(PyObject *self, PyObject *args, PyObject *kwds)
eraEpb2jd(_epb, _djm0, _djm);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2744,6 +3152,8 @@ static PyObject *Py_c2t00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tta = ((double *)(dataptrarray[0]))[0];
_ttb = ((double *)(dataptrarray[1]))[0];
@@ -2755,6 +3165,8 @@ static PyObject *Py_c2t00a(PyObject *self, PyObject *args, PyObject *kwds)
eraC2t00a(_tta, _ttb, _uta, _utb, _xp, _yp, _rc2t);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2773,6 +3185,8 @@ static PyObject *Py_c2txy(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tta = ((double *)(dataptrarray[0]))[0];
_ttb = ((double *)(dataptrarray[1]))[0];
@@ -2786,6 +3200,8 @@ static PyObject *Py_c2txy(PyObject *self, PyObject *args, PyObject *kwds)
eraC2txy(_tta, _ttb, _uta, _utb, _x, _y, _xp, _yp, _rc2t);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2800,6 +3216,8 @@ static PyObject *Py_atciqz(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rc = ((double *)(dataptrarray[0]))[0];
_dc = ((double *)(dataptrarray[1]))[0];
@@ -2809,6 +3227,8 @@ static PyObject *Py_atciqz(PyObject *self, PyObject *args, PyObject *kwds)
eraAtciqz(_rc, _dc, _astrom, _ri, _di);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2822,6 +3242,8 @@ static PyObject *Py_ldsun(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_p = ((double *)(dataptrarray[0]));
_e = ((double *)(dataptrarray[1]));
@@ -2830,6 +3252,8 @@ static PyObject *Py_ldsun(PyObject *self, PyObject *args, PyObject *kwds)
eraLdsun(_p, _e, _em, _p1);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2849,6 +3273,8 @@ static PyObject *Py_pn00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2863,6 +3289,8 @@ static PyObject *Py_pn00a(PyObject *self, PyObject *args, PyObject *kwds)
eraPn00a(_date1, _date2, _dpsi, _deps, _epsa, _rb, _rp, _rbp, _rn, _rbpn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2881,6 +3309,8 @@ static PyObject *Py_c2tpe(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tta = ((double *)(dataptrarray[0]))[0];
_ttb = ((double *)(dataptrarray[1]))[0];
@@ -2894,6 +3324,8 @@ static PyObject *Py_c2tpe(PyObject *self, PyObject *args, PyObject *kwds)
eraC2tpe(_tta, _ttb, _uta, _utb, _dpsi, _deps, _xp, _yp, _rc2t);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2910,6 +3342,8 @@ static PyObject *Py_dat(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_iy = ((int *)(dataptrarray[0]))[0];
_im = ((int *)(dataptrarray[1]))[0];
@@ -2923,6 +3357,8 @@ static PyObject *Py_dat(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -2943,6 +3379,8 @@ static PyObject *Py_c2t00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tta = ((double *)(dataptrarray[0]))[0];
_ttb = ((double *)(dataptrarray[1]))[0];
@@ -2954,6 +3392,8 @@ static PyObject *Py_c2t00b(PyObject *self, PyObject *args, PyObject *kwds)
eraC2t00b(_tta, _ttb, _uta, _utb, _xp, _yp, _rc2t);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2966,6 +3406,8 @@ static PyObject *Py_eqeq94(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2973,6 +3415,8 @@ static PyObject *Py_eqeq94(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEqeq94(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -2985,6 +3429,8 @@ static PyObject *Py_c2i06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -2992,6 +3438,8 @@ static PyObject *Py_c2i06a(PyObject *self, PyObject *args, PyObject *kwds)
eraC2i06a(_date1, _date2, _rc2i);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3009,6 +3457,8 @@ static PyObject *Py_gc2gde(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_a = ((double *)(dataptrarray[0]))[0];
_f = ((double *)(dataptrarray[1]))[0];
@@ -3023,6 +3473,8 @@ static PyObject *Py_gc2gde(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3039,6 +3491,8 @@ static PyObject *Py_num06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3046,6 +3500,8 @@ static PyObject *Py_num06a(PyObject *self, PyObject *args, PyObject *kwds)
eraNum06a(_date1, _date2, _rmatn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3057,12 +3513,16 @@ static PyObject *Py_fama03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFama03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3074,12 +3534,16 @@ static PyObject *Py_fad03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFad03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3092,6 +3556,8 @@ static PyObject *Py_s06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3099,6 +3565,8 @@ static PyObject *Py_s06a(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraS06a(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3113,6 +3581,8 @@ static PyObject *Py_gst00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_uta = ((double *)(dataptrarray[0]))[0];
_utb = ((double *)(dataptrarray[1]))[0];
@@ -3122,6 +3592,8 @@ static PyObject *Py_gst00a(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraGst00a(_uta, _utb, _tta, _ttb);
*((double *)(dataptrarray[4])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3134,6 +3606,8 @@ static PyObject *Py_gst00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_uta = ((double *)(dataptrarray[0]))[0];
_utb = ((double *)(dataptrarray[1]))[0];
@@ -3141,6 +3615,8 @@ static PyObject *Py_gst00b(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraGst00b(_uta, _utb);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3153,6 +3629,8 @@ static PyObject *Py_bpn2xy(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rbpn = ((double *)(dataptrarray[0]));
_x = ((double *)(dataptrarray[1]));
@@ -3160,6 +3638,8 @@ static PyObject *Py_bpn2xy(PyObject *self, PyObject *args, PyObject *kwds)
eraBpn2xy(_rbpn, _x, _y);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3176,6 +3656,8 @@ static PyObject *Py_tttdb(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tt1 = ((double *)(dataptrarray[0]))[0];
_tt2 = ((double *)(dataptrarray[1]))[0];
@@ -3189,6 +3671,8 @@ static PyObject *Py_tttdb(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3204,12 +3688,16 @@ static PyObject *Py_fame03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFame03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3225,6 +3713,8 @@ static PyObject *Py_atoiq(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_type = ((const char *)(dataptrarray[0]));
_ob1 = ((double *)(dataptrarray[1]))[0];
@@ -3235,6 +3725,8 @@ static PyObject *Py_atoiq(PyObject *self, PyObject *args, PyObject *kwds)
eraAtoiq(_type, _ob1, _ob2, _astrom, _ri, _di);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3250,6 +3742,8 @@ static PyObject *Py_taitt(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tai1 = ((double *)(dataptrarray[0]))[0];
_tai2 = ((double *)(dataptrarray[1]))[0];
@@ -3262,6 +3756,8 @@ static PyObject *Py_taitt(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3279,6 +3775,8 @@ static PyObject *Py_nut06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3287,6 +3785,8 @@ static PyObject *Py_nut06a(PyObject *self, PyObject *args, PyObject *kwds)
eraNut06a(_date1, _date2, _dpsi, _deps);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3299,6 +3799,8 @@ static PyObject *Py_obl80(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3306,6 +3808,8 @@ static PyObject *Py_obl80(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraObl80(_date1, _date2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3321,6 +3825,8 @@ static PyObject *Py_tcbtdb(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tcb1 = ((double *)(dataptrarray[0]))[0];
_tcb2 = ((double *)(dataptrarray[1]))[0];
@@ -3333,6 +3839,8 @@ static PyObject *Py_tcbtdb(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3349,6 +3857,8 @@ static PyObject *Py_pnm00b(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3356,6 +3866,8 @@ static PyObject *Py_pnm00b(PyObject *self, PyObject *args, PyObject *kwds)
eraPnm00b(_date1, _date2, _rbpn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3391,6 +3903,8 @@ static PyObject *Py_atco13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rc = ((double *)(dataptrarray[0]))[0];
_dc = ((double *)(dataptrarray[1]))[0];
@@ -3423,6 +3937,8 @@ static PyObject *Py_atco13(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3439,6 +3955,8 @@ static PyObject *Py_pnm00a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3446,6 +3964,8 @@ static PyObject *Py_pnm00a(PyObject *self, PyObject *args, PyObject *kwds)
eraPnm00a(_date1, _date2, _rbpn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3462,6 +3982,8 @@ static PyObject *Py_taiut1(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tai1 = ((double *)(dataptrarray[0]))[0];
_tai2 = ((double *)(dataptrarray[1]))[0];
@@ -3475,6 +3997,8 @@ static PyObject *Py_taiut1(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3491,6 +4015,8 @@ static PyObject *Py_pnm80(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3498,6 +4024,8 @@ static PyObject *Py_pnm80(PyObject *self, PyObject *args, PyObject *kwds)
eraPnm80(_date1, _date2, _rmatpn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3511,6 +4039,8 @@ static PyObject *Py_pom00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_xp = ((double *)(dataptrarray[0]))[0];
_yp = ((double *)(dataptrarray[1]))[0];
@@ -3519,6 +4049,8 @@ static PyObject *Py_pom00(PyObject *self, PyObject *args, PyObject *kwds)
eraPom00(_xp, _yp, _sp, _rpom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3530,12 +4062,16 @@ static PyObject *Py_faf03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFaf03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3552,6 +4088,8 @@ static PyObject *Py_c2t06a(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tta = ((double *)(dataptrarray[0]))[0];
_ttb = ((double *)(dataptrarray[1]))[0];
@@ -3563,6 +4101,8 @@ static PyObject *Py_c2t06a(PyObject *self, PyObject *args, PyObject *kwds)
eraC2t06a(_tta, _ttb, _uta, _utb, _xp, _yp, _rc2t);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3582,6 +4122,8 @@ static PyObject *Py_d2dtf(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_scale = ((const char *)(dataptrarray[0]));
_ndp = ((int *)(dataptrarray[1]))[0];
@@ -3598,6 +4140,8 @@ static PyObject *Py_d2dtf(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3617,6 +4161,8 @@ static PyObject *Py_pfw06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3627,6 +4173,8 @@ static PyObject *Py_pfw06(PyObject *self, PyObject *args, PyObject *kwds)
eraPfw06(_date1, _date2, _gamb, _phib, _psib, _epsa);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3642,6 +4190,8 @@ static PyObject *Py_fk5hz(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_r5 = ((double *)(dataptrarray[0]))[0];
_d5 = ((double *)(dataptrarray[1]))[0];
@@ -3652,6 +4202,8 @@ static PyObject *Py_fk5hz(PyObject *self, PyObject *args, PyObject *kwds)
eraFk5hz(_r5, _d5, _date1, _date2, _rh, _dh);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3667,6 +4219,8 @@ static PyObject *Py_tttcg(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tt1 = ((double *)(dataptrarray[0]))[0];
_tt2 = ((double *)(dataptrarray[1]))[0];
@@ -3679,6 +4233,8 @@ static PyObject *Py_tttcg(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3710,6 +4266,8 @@ static PyObject *Py_p06e(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3732,6 +4290,8 @@ static PyObject *Py_p06e(PyObject *self, PyObject *args, PyObject *kwds)
eraP06e(_date1, _date2, _eps0, _psia, _oma, _bpa, _bqa, _pia, _bpia, _epsa, _chia, _za, _zetaa, _thetaa, _pa, _gam, _phi, _psi);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3746,6 +4306,8 @@ static PyObject *Py_aticq(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ri = ((double *)(dataptrarray[0]))[0];
_di = ((double *)(dataptrarray[1]))[0];
@@ -3755,6 +4317,8 @@ static PyObject *Py_aticq(PyObject *self, PyObject *args, PyObject *kwds)
eraAticq(_ri, _di, _astrom, _rc, _dc);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3769,6 +4333,8 @@ static PyObject *Py_eform(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_n = ((int *)(dataptrarray[0]))[0];
_a = ((double *)(dataptrarray[1]));
@@ -3780,6 +4346,8 @@ static PyObject *Py_eform(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3796,6 +4364,8 @@ static PyObject *Py_gst94(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_uta = ((double *)(dataptrarray[0]))[0];
_utb = ((double *)(dataptrarray[1]))[0];
@@ -3803,6 +4373,8 @@ static PyObject *Py_gst94(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraGst94(_uta, _utb);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3822,6 +4394,8 @@ static PyObject *Py_pn06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3836,6 +4410,8 @@ static PyObject *Py_pn06(PyObject *self, PyObject *args, PyObject *kwds)
eraPn06(_date1, _date2, _dpsi, _deps, _epsa, _rb, _rp, _rbp, _rn, _rbpn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3855,6 +4431,8 @@ static PyObject *Py_pn00(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -3869,6 +4447,8 @@ static PyObject *Py_pn00(PyObject *self, PyObject *args, PyObject *kwds)
eraPn00(_date1, _date2, _dpsi, _deps, _epsa, _rb, _rp, _rbp, _rn, _rbpn);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3884,6 +4464,8 @@ static PyObject *Py_fw2xy(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_gamb = ((double *)(dataptrarray[0]))[0];
_phib = ((double *)(dataptrarray[1]))[0];
@@ -3894,6 +4476,8 @@ static PyObject *Py_fw2xy(PyObject *self, PyObject *args, PyObject *kwds)
eraFw2xy(_gamb, _phib, _psi, _eps, _x, _y);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3908,6 +4492,8 @@ static PyObject *Py_fw2m(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_gamb = ((double *)(dataptrarray[0]))[0];
_phib = ((double *)(dataptrarray[1]))[0];
@@ -3917,6 +4503,8 @@ static PyObject *Py_fw2m(PyObject *self, PyObject *args, PyObject *kwds)
eraFw2m(_gamb, _phib, _psi, _eps, _r);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -3933,6 +4521,8 @@ static PyObject *Py_ut1tai(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ut11 = ((double *)(dataptrarray[0]))[0];
_ut12 = ((double *)(dataptrarray[1]))[0];
@@ -3946,6 +4536,8 @@ static PyObject *Py_ut1tai(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3965,6 +4557,8 @@ static PyObject *Py_tdbtcb(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tdb1 = ((double *)(dataptrarray[0]))[0];
_tdb2 = ((double *)(dataptrarray[1]))[0];
@@ -3977,6 +4571,8 @@ static PyObject *Py_tdbtcb(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -3997,6 +4593,8 @@ static PyObject *Py_aticqn(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ri = ((double *)(dataptrarray[0]))[0];
_di = ((double *)(dataptrarray[1]))[0];
@@ -4008,6 +4606,8 @@ static PyObject *Py_aticqn(PyObject *self, PyObject *args, PyObject *kwds)
eraAticqn(_ri, _di, _astrom, _n, _b, _rc, _dc);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -4023,6 +4623,8 @@ static PyObject *Py_taiutc(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tai1 = ((double *)(dataptrarray[0]))[0];
_tai2 = ((double *)(dataptrarray[1]))[0];
@@ -4035,6 +4637,8 @@ static PyObject *Py_taiutc(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -4050,12 +4654,16 @@ static PyObject *Py_faur03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFaur03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -4071,6 +4679,8 @@ static PyObject *Py_jdcalf(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ndp = ((int *)(dataptrarray[0]))[0];
_dj1 = ((double *)(dataptrarray[1]))[0];
@@ -4083,6 +4693,8 @@ static PyObject *Py_jdcalf(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -4108,6 +4720,8 @@ static PyObject *Py_fk52h(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_r5 = ((double *)(dataptrarray[0]))[0];
_d5 = ((double *)(dataptrarray[1]))[0];
@@ -4124,6 +4738,8 @@ static PyObject *Py_fk52h(PyObject *self, PyObject *args, PyObject *kwds)
eraFk52h(_r5, _d5, _dr5, _dd5, _px5, _rv5, _rh, _dh, _drh, _ddh, _pxh, _rvh);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -4140,6 +4756,8 @@ static PyObject *Py_tdbtt(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tdb1 = ((double *)(dataptrarray[0]))[0];
_tdb2 = ((double *)(dataptrarray[1]))[0];
@@ -4153,6 +4771,8 @@ static PyObject *Py_tdbtt(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -4169,6 +4789,8 @@ static PyObject *Py_epj(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_dj1 = ((double *)(dataptrarray[0]))[0];
_dj2 = ((double *)(dataptrarray[1]))[0];
@@ -4176,6 +4798,8 @@ static PyObject *Py_epj(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEpj(_dj1, _dj2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -4194,6 +4818,8 @@ static PyObject *Py_atciq(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_rc = ((double *)(dataptrarray[0]))[0];
_dc = ((double *)(dataptrarray[1]))[0];
@@ -4207,6 +4833,8 @@ static PyObject *Py_atciq(PyObject *self, PyObject *args, PyObject *kwds)
eraAtciq(_rc, _dc, _pr, _pd, _px, _rv, _astrom, _ri, _di);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -4223,6 +4851,8 @@ static PyObject *Py_ut1tt(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_ut11 = ((double *)(dataptrarray[0]))[0];
_ut12 = ((double *)(dataptrarray[1]))[0];
@@ -4236,6 +4866,8 @@ static PyObject *Py_ut1tt(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -4252,6 +4884,8 @@ static PyObject *Py_epb(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_dj1 = ((double *)(dataptrarray[0]))[0];
_dj2 = ((double *)(dataptrarray[1]))[0];
@@ -4259,6 +4893,8 @@ static PyObject *Py_epb(PyObject *self, PyObject *args, PyObject *kwds)
_c_retval = eraEpb(_dj1, _dj2);
*((double *)(dataptrarray[2])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -4270,12 +4906,16 @@ static PyObject *Py_fapa03(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_t = ((double *)(dataptrarray[0]))[0];
_c_retval = eraFapa03(_t);
*((double *)(dataptrarray[1])) = _c_retval;
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -4291,6 +4931,8 @@ static PyObject *Py_tcgtt(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_tcg1 = ((double *)(dataptrarray[0]))[0];
_tcg2 = ((double *)(dataptrarray[1]))[0];
@@ -4303,6 +4945,8 @@ static PyObject *Py_tcgtt(PyObject *self, PyObject *args, PyObject *kwds)
stat_ok = 0;
}
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
if (stat_ok) {
Py_RETURN_TRUE;
} else {
@@ -4321,6 +4965,8 @@ static PyObject *Py_pb06(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -4330,6 +4976,8 @@ static PyObject *Py_pb06(PyObject *self, PyObject *args, PyObject *kwds)
eraPb06(_date1, _date2, _bzeta, _bz, _btheta);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -4341,12 +4989,16 @@ static PyObject *Py_aper(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_theta = ((double *)(dataptrarray[0]))[0];
_astrom = ((eraASTROM *)(dataptrarray[1]));
eraAper(_theta, _astrom);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
@@ -4360,6 +5012,8 @@ static PyObject *Py_apci13(PyObject *self, PyObject *args, PyObject *kwds)
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
_date1 = ((double *)(dataptrarray[0]))[0];
_date2 = ((double *)(dataptrarray[1]))[0];
@@ -4368,6 +5022,8 @@ static PyObject *Py_apci13(PyObject *self, PyObject *args, PyObject *kwds)
eraApci13(_date1, _date2, _astrom, _eo);
} while (iternext(it));
+
+ Py_END_ALLOW_THREADS
Py_RETURN_NONE;
}
diff --git a/astropy/_erfa/core.c.templ b/astropy/_erfa/core.c.templ
index 681f97a..1d18ff2 100644
--- a/astropy/_erfa/core.c.templ
+++ b/astropy/_erfa/core.c.templ
@@ -48,6 +48,8 @@ static PyObject *Py_{{ func.pyname }}(PyObject *self, PyObject *args, PyObject *
char **dataptrarray = NpyIter_GetDataPtrArray(it);
NpyIter_IterNextFunc *iternext = NpyIter_GetIterNext(it, NULL);
+ Py_BEGIN_ALLOW_THREADS
+
do {
{%- for arg in func.args_by_inout('in|inout|out') %}
_{{ arg.name }} = (({{ arg.ctype }} *)(dataptrarray[{{ func.args.index(arg) }}])){%- if arg.ctype_ptr[-1] != '*' %}[0]{%- endif %};
@@ -66,6 +68,8 @@ static PyObject *Py_{{ func.pyname }}(PyObject *self, PyObject *args, PyObject *
{%- endfor %}
} while (iternext(it));
+ Py_END_ALLOW_THREADS
+
{%- if func.args_by_inout('stat')|length > 0 %}
if (stat_ok) {
Py_RETURN_TRUE;
diff --git a/astropy/astropy.cfg b/astropy/astropy.cfg
index e067635..c10c94c 100644
--- a/astropy/astropy.cfg
+++ b/astropy/astropy.cfg
@@ -65,6 +65,9 @@
## Whether to allow astropy.vo.samp to use the internet, if available
# use_internet = True
+## How many times to retry communications when they fail
+# n_retries = 10
+
[vo.validator]
## Cone Search services master list for validation.
diff --git a/astropy/config/configuration.py b/astropy/config/configuration.py
index 655ca33..b2d8f27 100644
--- a/astropy/config/configuration.py
+++ b/astropy/config/configuration.py
@@ -225,7 +225,13 @@ class ConfigItem(object):
from ..utils import isiterable
if module is None:
- module = self.__class__.__module__
+ module = find_current_module(2)
+ if module is None:
+ msg1 = 'Cannot automatically determine get_config module, '
+ msg2 = 'because it is not called from inside a valid module'
+ raise RuntimeError(msg1 + msg2)
+ else:
+ module = module.__name__
self.module = module
self.description = description
diff --git a/astropy/coordinates/tests/test_angular_separation.py b/astropy/coordinates/tests/test_angular_separation.py
index 074bfcc..8ef769a 100644
--- a/astropy/coordinates/tests/test_angular_separation.py
+++ b/astropy/coordinates/tests/test_angular_separation.py
@@ -11,7 +11,7 @@ Tests for the projected separation stuff
import numpy as np
from numpy import testing as npt
-from ...tests.helper import pytest
+from ...tests.helper import pytest, assert_quantity_allclose as assert_allclose
from ... import units as u
from ..builtin_frames import ICRS, FK5, Galactic
from .. import Angle, Distance
@@ -74,7 +74,7 @@ def test_proj_separations():
assert isinstance(sep, Angle)
assert sep.degree == 1
- npt.assert_allclose(sep.arcminute, 60.)
+ assert_allclose(sep.arcminute, 60.)
# these operations have ambiguous interpretations for points on a sphere
with pytest.raises(TypeError):
@@ -87,12 +87,12 @@ def test_proj_separations():
# if there is a defined conversion between the relevant coordinate systems,
# it will be automatically performed to get the right angular separation
- npt.assert_allclose(ncp.separation(ngp.transform_to(ICRS)).degree,
- ncp.separation(ngp).degree)
+ assert_allclose(ncp.separation(ngp.transform_to(ICRS)).degree,
+ ncp.separation(ngp).degree)
# distance from the north galactic pole to celestial pole
- npt.assert_allclose(ncp.separation(ngp.transform_to(ICRS)).degree,
- 62.87174758503201)
+ assert_allclose(ncp.separation(ngp.transform_to(ICRS)).degree,
+ 62.87174758503201)
def test_3d_separations():
@@ -105,4 +105,4 @@ def test_3d_separations():
sep3d = c2.separation_3d(c1)
assert isinstance(sep3d, Distance)
- npt.assert_allclose(sep3d - 1*u.kpc, 0, atol=1e-12)
+ assert_allclose(sep3d - 1*u.kpc, 0*u.kpc, atol=1e-12*u.kpc)
diff --git a/astropy/coordinates/tests/test_api_ape5.py b/astropy/coordinates/tests/test_api_ape5.py
index 3b5af84..8c8d4fe 100644
--- a/astropy/coordinates/tests/test_api_ape5.py
+++ b/astropy/coordinates/tests/test_api_ape5.py
@@ -17,11 +17,10 @@ deviations from the original APE5 plan.
import numpy as np
from numpy.random import randn
from numpy import testing as npt
-from ...tests.helper import pytest
+from ...tests.helper import (pytest, quantity_allclose as allclose,
+ assert_quantity_allclose as assert_allclose)
raises = pytest.raises
-from ...extern import six
-
from ... import units as u
from ... import time
from ... import coordinates as coords
@@ -133,14 +132,14 @@ def test_representations_api():
c1 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.kpc)
c2 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.pc)
assert c1.xyz.unit == c2.xyz.unit == u.kpc
- npt.assert_allclose((c1.z / 1000) - c2.z, 0, atol=1e-10)
+ assert_allclose((c1.z / 1000) - c2.z, 0*u.kpc, atol=1e-10*u.kpc)
# representations convert into other representations via `represent_as`
srep = SphericalRepresentation(lon=90*u.deg, lat=0*u.deg, distance=1*u.pc)
crep = srep.represent_as(CartesianRepresentation)
- npt.assert_allclose(crep.x.value, 0, atol=1e-10)
- npt.assert_allclose(crep.y.value, 1, atol=1e-10)
- npt.assert_allclose(crep.z.value, 0, atol=1e-10)
+ assert_allclose(crep.x, 0*u.pc, atol=1e-10*u.pc)
+ assert_allclose(crep.y, 1*u.pc, atol=1e-10*u.pc)
+ assert_allclose(crep.z, 0*u.pc, atol=1e-10*u.pc)
# The functions that actually do the conversion are defined via methods on the
# representation classes. This may later be expanded into a full registerable
# transform graph like the coordinate frames, but initially it will be a simpler
@@ -190,8 +189,9 @@ def test_frame_api():
# advanced users' use.
# The actual position information is accessed via the representation objects
- npt.assert_allclose(icrs.represent_as(SphericalRepresentation).lat.to(u.deg), 5*u.deg)
- npt.assert_allclose(icrs.spherical.lat.to(u.deg), 5*u.deg) # shorthand for the above
+ assert_allclose(icrs.represent_as(SphericalRepresentation).lat, 5*u.deg)
+ # shorthand for the above
+ assert_allclose(icrs.spherical.lat, 5*u.deg)
assert icrs.cartesian.z.value > 0
# Many frames have a "default" representation, the one in which they are
@@ -199,15 +199,15 @@ def test_frame_api():
# coordinates. E.g., most equatorial coordinate systems are spherical with RA and
# Dec. This works simply as a shorthand for the longer form above
- npt.assert_allclose(icrs.dec.to(u.deg), 5*u.deg)
- npt.assert_allclose(fk5.ra.to(u.hourangle), 8*u.hourangle)
+ assert_allclose(icrs.dec, 5*u.deg)
+ assert_allclose(fk5.ra, 8*u.hourangle)
assert icrs.representation == SphericalRepresentation
# low-level classes can also be initialized with names valid for that representation
# and frame:
icrs_2 = ICRS(ra=8*u.hour, dec=5*u.deg, distance=1*u.kpc)
- npt.assert_allclose(icrs.ra.to(u.deg), icrs_2.ra.to(u.deg))
+ assert_allclose(icrs.ra, icrs_2.ra)
# and these are taken as the default if keywords are not given:
#icrs_nokwarg = ICRS(8*u.hour, 5*u.deg, distance=1*u.kpc)
@@ -272,8 +272,8 @@ def test_transform_api():
# the frame information:
samefk5 = fk5.transform_to(FK5)
# `fk5` was initialized using default `obstime` and `equinox`, so:
- npt.assert_allclose(samefk5.ra - fk5.ra, 0, atol=1e-10)
- npt.assert_allclose(samefk5.dec - fk5.dec, 0, atol=1e-10)
+ assert_allclose(samefk5.ra, fk5.ra, atol=1e-10*u.deg)
+ assert_allclose(samefk5.dec, fk5.dec, atol=1e-10*u.deg)
# transforming to a new frame necessarily loses framespec information if that
# information is not applicable to the new frame. This means transforms are not
@@ -284,11 +284,11 @@ def test_transform_api():
# `ic_trans` does not have an `equinox`, so now when we transform back to FK5,
# it's a *different* RA and Dec
fk5_trans = ic_trans.transform_to(FK5)
- assert not np.allclose(fk5_2.ra.to(u.deg), fk5_trans.ra.to(u.deg), rtol=0, atol=1e-10)
+ assert not allclose(fk5_2.ra, fk5_trans.ra, rtol=0, atol=1e-10*u.deg)
# But if you explicitly give the right equinox, all is fine
fk5_trans_2 = fk5_2.transform_to(FK5(equinox=J2001))
- npt.assert_allclose(fk5_2.ra.to(u.deg), fk5_trans_2.ra.to(u.deg), rtol=0, atol=1e-10)
+ assert_allclose(fk5_2.ra, fk5_trans_2.ra, rtol=0, atol=1e-10*u.deg)
# Trying to tansforming a frame with no data is of course an error:
with raises(ValueError):
@@ -403,7 +403,7 @@ def test_highlevel_api():
# But it *is* necessary once we transform to FK5
sc3 = sc2.transform_to('fk5')
assert sc3.equinox == J2001
- npt.assert_allclose(sc1.ra, sc3.ra)
+ assert_allclose(sc1.ra, sc3.ra)
# `SkyCoord` will also include the attribute-style access that is in the
# v0.2/0.3 coordinate objects. This will *not* be in the low-level classes
diff --git a/astropy/coordinates/tests/test_arrays.py b/astropy/coordinates/tests/test_arrays.py
index 8c4caa9..6544227 100644
--- a/astropy/coordinates/tests/test_arrays.py
+++ b/astropy/coordinates/tests/test_arrays.py
@@ -6,7 +6,7 @@
from __future__ import (absolute_import, division, print_function,
unicode_literals)
-from ...tests.helper import pytest
+from ...tests.helper import pytest, assert_quantity_allclose as assert_allclose
import numpy as np
from numpy import testing as npt
@@ -243,13 +243,13 @@ def test_array_indexing():
assert c2.dec.degree == -10
c3 = c1[2:5]
- npt.assert_allclose(c3.ra.degree, [80, 120, 160])
- npt.assert_allclose(c3.dec.degree, [-50, -30, -10])
+ assert_allclose(c3.ra, [80, 120, 160] * u.deg)
+ assert_allclose(c3.dec, [-50, -30, -10] * u.deg)
c4 = c1[np.array([2, 5, 8])]
- npt.assert_allclose(c4.ra.degree, [80, 200, 320])
- npt.assert_allclose(c4.dec.degree, [-50, 10, 70])
+ assert_allclose(c4.ra, [80, 200, 320] * u.deg)
+ assert_allclose(c4.dec, [-50, 10, 70] * u.deg)
#now make sure the equinox is preserved
assert c2.equinox == c1.equinox
diff --git a/astropy/coordinates/tests/test_earth.py b/astropy/coordinates/tests/test_earth.py
index f9ee7c8..db220a0 100644
--- a/astropy/coordinates/tests/test_earth.py
+++ b/astropy/coordinates/tests/test_earth.py
@@ -8,36 +8,35 @@ from __future__ import (absolute_import, division, print_function,
"""Test initalization of angles not already covered by the API tests"""
-import functools
import numpy as np
from ..earth import EarthLocation, ELLIPSOIDS
from ..angles import Longitude, Latitude
-from ...tests.helper import pytest
+from ...tests.helper import pytest, quantity_allclose
from ...utils import minversion
from ... import units as u
+def allclose_m14(a, b, rtol=1.e-14, atol=None):
+ if atol is None:
+ atol = 1.e-14 * getattr(a, 'unit', 1)
+ return quantity_allclose(a, b, rtol, atol)
-allclose_m14 = functools.partial(np.allclose, rtol=1.e-14, atol=1.e-14)
-allclose_m8 = functools.partial(np.allclose, rtol=1.e-8, atol=1.e-8)
+def allclose_m8(a, b, rtol=1.e-8, atol=None):
+ if atol is None:
+ atol = 1.e-8 * getattr(a, 'unit', 1)
+ return quantity_allclose(a, b, rtol, atol)
-if minversion(np, '1.7.0'):
- isclose_m14 = functools.partial(np.isclose, rtol=1.e-14, atol=1.e-14)
- isclose_m8 = functools.partial(np.isclose, rtol=1.e-8, atol=1.e-8)
-else:
- def isclose_m14(val, ref):
- return np.array([allclose_m14(v, r, rtol=1.e-14, atol=1.e-14)
- for (v, r) in zip(val, ref)])
+def isclose_m14(val, ref):
+ return np.array([allclose_m14(v, r) for (v, r) in zip(val, ref)])
- def isclose_m8(val, ref):
- return np.array([allclose_m8(v, r, rtol=1.e-8, atol=1.e-8)
- for (v, r) in zip(val, ref)])
+def isclose_m8(val, ref):
+ return np.array([allclose_m8(v, r) for (v, r) in zip(val, ref)])
def vvd(val, valok, dval, func, test, status):
"""Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)"""
- assert np.allclose(val, valok, atol=dval)
+ assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit)
def test_gc2gd():
@@ -48,19 +47,19 @@ def test_gc2gd():
location = EarthLocation.from_geocentric(x, y, z, u.m)
e, p, h = location.to_geodetic('WGS84')
- e, p, h = e.to(u.radian).value, p.to(u.radian).value, h.to(u.m).value
+ e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('GRS80')
- e, p, h = e.to(u.radian).value, p.to(u.radian).value, h.to(u.m).value
+ e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('WGS72')
- e, p, h = e.to(u.radian).value, p.to(u.radian).value, h.to(u.m).value
+ e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
@@ -75,19 +74,19 @@ def test_gd2gc():
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS84')
- xyz = location.to_geocentric()
+ xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='GRS80')
- xyz = location.to_geocentric()
+ xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS72')
- xyz = location.to_geocentric()
+ xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
diff --git a/astropy/coordinates/tests/test_frames.py b/astropy/coordinates/tests/test_frames.py
index 3380293..e5a153a 100644
--- a/astropy/coordinates/tests/test_frames.py
+++ b/astropy/coordinates/tests/test_frames.py
@@ -5,11 +5,10 @@ from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
-from numpy.testing import assert_allclose
from ... import units as u
-from ...tests.helper import pytest
-from ...extern import six
+from ...tests.helper import (pytest, quantity_allclose as allclose,
+ assert_quantity_allclose as assert_allclose)
from .. import representation
NUMPY_LT_1P7 = [int(x) for x in np.__version__.split('.')[:2]] < [1, 7]
@@ -353,7 +352,7 @@ def test_sep():
i4 = ICRS(ra=[1, 2]*u.deg, dec=[3, 4]*u.deg, distance=[4, 5]*u.kpc)
sep3d = i3.separation_3d(i4)
- assert_allclose(sep3d.to(u.kpc).value, np.array([1, 1]))
+ assert_allclose(sep3d.to(u.kpc), np.array([1, 1])*u.kpc)
def test_time_inputs():
@@ -565,16 +564,16 @@ def test_eloc_attributes():
# only along the z-axis), but latitude should not. Also, height is relative
# to the *surface* in EarthLocation, but the ITRS distance is relative to
# the center of the Earth
- assert not np.allclose(el2.latitude, it.spherical.lat)
- assert np.allclose(el2.longitude, it.spherical.lon)
+ assert not allclose(el2.latitude, it.spherical.lat)
+ assert allclose(el2.longitude, it.spherical.lon)
assert el2.height < -6000*u.km
el3 = AltAz(location=gc).location
# GCRS inputs implicitly get transformed to ITRS and then onto
# EarthLocation's elliptical geoid. So both lat and lon shouldn't match
assert isinstance(el3, EarthLocation)
- assert not np.allclose(el3.latitude, gc.dec)
- assert not np.allclose(el3.longitude, gc.ra)
+ assert not allclose(el3.latitude, gc.dec)
+ assert not allclose(el3.longitude, gc.ra)
assert np.abs(el3.height) < 500*u.km
diff --git a/astropy/coordinates/tests/test_matching.py b/astropy/coordinates/tests/test_matching.py
index e6d1b8b..a3d7217 100644
--- a/astropy/coordinates/tests/test_matching.py
+++ b/astropy/coordinates/tests/test_matching.py
@@ -5,7 +5,7 @@ from __future__ import (absolute_import, division, print_function,
import numpy as np
from numpy import testing as npt
-from ...tests.helper import pytest
+from ...tests.helper import pytest, assert_quantity_allclose as assert_allclose
from ... import units as u
from ...utils import minversion
@@ -61,15 +61,15 @@ def test_matching_function_3d_and_sky():
npt.assert_array_equal(idx, [2, 3])
- npt.assert_allclose(d2d.degree, [1, 1.9])
+ assert_allclose(d2d, [1, 1.9] * u.deg)
assert np.abs(d3d[0].to(u.kpc).value - np.radians(1)) < 1e-6
assert np.abs(d3d[1].to(u.kpc).value - 5*np.radians(1.9)) < 1e-5
idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog)
npt.assert_array_equal(idx, [3, 1])
- npt.assert_allclose(d2d.degree, [0, 0.1])
- npt.assert_allclose(d3d.to(u.kpc).value, [4, 4.0000019])
+ assert_allclose(d2d, [0, 0.1] * u.deg)
+ assert_allclose(d3d, [4, 4.0000019] * u.kpc)
@pytest.mark.skipif(str('not HAS_SCIPY'))
@@ -108,16 +108,16 @@ def test_matching_method():
idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx1, idx2)
- npt.assert_allclose(d2d1, d2d2)
- npt.assert_allclose(d3d1, d3d2)
+ assert_allclose(d2d1, d2d2)
+ assert_allclose(d3d1, d3d2)
#should be the same as above because there's no distance, but just make sure this method works
idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog)
idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog)
npt.assert_array_equal(idx1, idx2)
- npt.assert_allclose(d2d1, d2d2)
- npt.assert_allclose(d3d1, d3d2)
+ assert_allclose(d2d1, d2d2)
+ assert_allclose(d3d1, d3d2)
assert len(idx1) == len(d2d1) == len(d3d1) == 20
@@ -136,7 +136,7 @@ def test_search_around():
assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)]
assert d2d_1deg[0] == 1.0*u.deg
- npt.assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg)
+ assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg)
assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)]
@@ -145,7 +145,7 @@ def test_search_around():
assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)]
assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)]
- npt.assert_allclose(d2d_sm, [2, 1]*u.deg)
+ assert_allclose(d2d_sm, [2, 1]*u.deg)
@pytest.mark.skipif(str('not HAS_SCIPY'))
diff --git a/astropy/coordinates/tests/test_sky_coord.py b/astropy/coordinates/tests/test_sky_coord.py
index 1dd5855..c5a624b 100644
--- a/astropy/coordinates/tests/test_sky_coord.py
+++ b/astropy/coordinates/tests/test_sky_coord.py
@@ -10,13 +10,13 @@ from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
-import functools
import numpy as np
-from numpy import testing as npt
+import numpy.testing as npt
from ... import units as u
-from ...tests.helper import pytest, catch_warnings
+from ...tests.helper import (pytest, catch_warnings, quantity_allclose,
+ assert_quantity_allclose as assert_allclose)
from ..representation import REPRESENTATION_CLASSES
from ...coordinates import (ICRS, FK4, FK5, Galactic, SkyCoord, Angle,
SphericalRepresentation, CartesianRepresentation,
@@ -32,7 +32,11 @@ C_ICRS = ICRS(RA, DEC)
C_FK5 = C_ICRS.transform_to(FK5)
J2001 = Time('J2001', scale='utc')
-allclose = functools.partial(np.allclose, rtol=0.0, atol=1e-8)
+def allclose(a, b, rtol=0.0, atol=None):
+ if atol is None:
+ atol = 1.e-8 * getattr(a, 'unit', 1.)
+ return quantity_allclose(a, b, rtol, atol)
+
try:
import scipy
@@ -269,12 +273,12 @@ def test_coord_init_array():
[['1', '2'], ['3', '4']],
[[1, 2], [3, 4]]):
sc = SkyCoord(a, unit='deg')
- assert allclose(sc.ra - [1, 3] * u.deg, 0)
- assert allclose(sc.dec - [2, 4] * u.deg, 0)
+ assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
+ assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
sc = SkyCoord(np.array(a), unit='deg')
- assert allclose(sc.ra - [1, 3] * u.deg, 0)
- assert allclose(sc.dec - [2, 4] * u.deg, 0)
+ assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
+ assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
def test_coord_init_representation():
@@ -546,13 +550,13 @@ def test_position_angle():
c1 = SkyCoord(0*u.deg, 0*u.deg)
c2 = SkyCoord(1*u.deg, 0*u.deg)
- npt.assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0)
+ assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0*u.deg)
c3 = SkyCoord(1*u.deg, 0.1*u.deg)
assert c1.position_angle(c3) < 90*u.deg
c4 = SkyCoord(0*u.deg, 1*u.deg)
- npt.assert_allclose(c1.position_angle(c4), 0)
+ assert_allclose(c1.position_angle(c4), 0*u.deg)
carr1 = SkyCoord(0*u.deg, [0, 1, 2]*u.deg)
carr2 = SkyCoord([-1, -2, -3]*u.deg, [0.1, 1.1, 2.1]*u.deg)
@@ -585,8 +589,8 @@ def test_table_to_coord():
c = SkyCoord(t['ra'], t['dec'])
- assert allclose(c.ra.to(u.deg), [1, 2, 3])
- assert allclose(c.dec.to(u.deg), [4, 5, 6])
+ assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg)
+ assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg)
def assert_quantities_allclose(coord, q1s, attrs):
@@ -600,8 +604,7 @@ def assert_quantities_allclose(coord, q1s, attrs):
assert len(q1s) == len(q2s)
for q1, q2 in zip(q1s, q2s):
assert q1.shape == q2.shape
- dq = q1 - q2
- assert np.allclose(dq.value, 0.0, rtol=0, atol=1e-13)
+ assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit)
# Sets of inputs corresponding to Galactic frame
@@ -813,13 +816,13 @@ def test_wcs_methods(mode, origin):
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
- npt.assert_allclose(new.ra.degree, ref.ra.degree)
- npt.assert_allclose(new.dec.degree, ref.dec.degree)
+ assert_allclose(new.ra.degree, ref.ra.degree)
+ assert_allclose(new.dec.degree, ref.dec.degree)
#also try to round-trip with `from_pixel`
scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
- npt.assert_allclose(scnew.ra.degree, ref.ra.degree)
- npt.assert_allclose(scnew.dec.degree, ref.dec.degree)
+ assert_allclose(scnew.ra.degree, ref.ra.degree)
+ assert_allclose(scnew.dec.degree, ref.dec.degree)
#Also make sure the right type comes out
class SkyCoord2(SkyCoord):
diff --git a/astropy/coordinates/tests/test_transformations.py b/astropy/coordinates/tests/test_transformations.py
index ed84a80..cae9e66 100644
--- a/astropy/coordinates/tests/test_transformations.py
+++ b/astropy/coordinates/tests/test_transformations.py
@@ -5,7 +5,6 @@ from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
-from numpy import testing as npt
from ... import units as u
from ..distances import Distance
@@ -14,7 +13,8 @@ from ..builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic, \
Galactocentric, CIRS, GCRS, AltAz, ITRS
from .. import representation as r
from ..baseframe import frame_transform_graph
-from ...tests.helper import pytest
+from ...tests.helper import (pytest, quantity_allclose as allclose,
+ assert_quantity_allclose as assert_allclose)
from .utils import randomly_sample_sphere
from ...time import Time
@@ -39,8 +39,8 @@ def test_transform_classes():
c1 = TestCoo1(ra=1*u.radian, dec=0.5*u.radian)
c2 = c1.transform_to(TestCoo2)
- npt.assert_allclose(c2.ra.radian, 1)
- npt.assert_allclose(c2.dec.radian, 0.5)
+ assert_allclose(c2.ra.radian, 1)
+ assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
@@ -53,8 +53,8 @@ def test_transform_classes():
c3 = TestCoo1(ra=1*u.deg, dec=2*u.deg)
c4 = c3.transform_to(TestCoo2)
- npt.assert_allclose(c4.ra.degree, 1)
- npt.assert_allclose(c4.ra.degree, 1)
+ assert_allclose(c4.ra.degree, 1)
+ assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
@@ -72,8 +72,8 @@ def test_transform_decos():
return TestCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TestCoo2)
- npt.assert_allclose(c2.ra.degree, 1)
- npt.assert_allclose(c2.dec.degree, 4)
+ assert_allclose(c2.ra.degree, 1)
+ assert_allclose(c2.dec.degree, 4)
c3 = TestCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc))
@@ -85,9 +85,9 @@ def test_transform_decos():
c4 = c3.transform_to(TestCoo2)
- npt.assert_allclose(c4.cartesian.x.value, 2)
- npt.assert_allclose(c4.cartesian.y.value, 1)
- npt.assert_allclose(c4.cartesian.z.value, 2)
+ assert_allclose(c4.cartesian.x, 2*u.pc)
+ assert_allclose(c4.cartesian.y, 1*u.pc)
+ assert_allclose(c4.cartesian.z, 2*u.pc)
def test_shortest_path():
@@ -138,29 +138,28 @@ def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
- from numpy.testing.utils import assert_allclose
from ...utils import NumpyRNGContext
from .. import spherical_to_cartesian, cartesian_to_spherical
x, y, z = spherical_to_cartesian(1, 0, 0)
- npt.assert_allclose(x, 1)
- npt.assert_allclose(y, 0)
- npt.assert_allclose(z, 0)
+ assert_allclose(x, 1)
+ assert_allclose(y, 0)
+ assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
- npt.assert_allclose(x, 0)
- npt.assert_allclose(y, 0)
- npt.assert_allclose(z, 0)
+ assert_allclose(x, 0)
+ assert_allclose(y, 0)
+ assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.))
- npt.assert_allclose(x, 3)
- npt.assert_allclose(y, 4)
- npt.assert_allclose(z, 0)
+ assert_allclose(x, 3)
+ assert_allclose(y, 4)
+ assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
- npt.assert_allclose(r, 1)
- npt.assert_allclose(lat, 0)
- npt.assert_allclose(lon, np.pi / 2)
+ assert_allclose(r, 1)
+ assert_allclose(lat, 0 * u.deg)
+ assert_allclose(lon, np.pi / 2 * u.rad)
#test round-tripping
with NumpyRNGContext(13579):
@@ -297,17 +296,17 @@ def test_galactocentric():
g_xyz = icrs_coord.transform_to(Galactic).cartesian.xyz
gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0*u.kpc)).cartesian.xyz
- diff = np.array(np.abs(g_xyz - gc_xyz))
+ diff = np.abs(g_xyz - gc_xyz)
- assert np.allclose(diff[0], 8.3, atol=1E-5)
- assert np.allclose(diff[1:], 0, atol=1E-5)
+ assert allclose(diff[0], 8.3*u.kpc, atol=1E-5*u.kpc)
+ assert allclose(diff[1:], 0*u.kpc, atol=1E-5*u.kpc)
# generate some test coordinates
g = Galactic(l=[0,0,45,315]*u.deg, b=[-45,45,0,0]*u.deg,
distance=[np.sqrt(2)]*4*u.kpc)
xyz = g.transform_to(Galactocentric(galcen_distance=1.*u.kpc, z_sun=0.*u.pc)).cartesian.xyz
true_xyz = np.array([[0,0,-1.],[0,0,1],[0,1,0],[0,-1,0]]).T*u.kpc
- assert np.allclose(xyz.to(u.kpc).value, true_xyz.to(u.kpc).value, atol=1E-5)
+ assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1E-5*u.kpc)
# check that ND arrays work
@@ -322,7 +321,7 @@ def test_galactocentric():
g1t = g1.transform_to(Galactic)
g2t = g2.transform_to(Galactic)
- np.testing.assert_almost_equal(g1t.cartesian.xyz.value, g2t.cartesian.xyz.value[:,:,0,0])
+ assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:,:,0,0])
# from Galactic to Galactocentric
l = np.linspace(15, 30., 100) * u.deg
@@ -352,34 +351,34 @@ def test_icrs_cirs():
cirsnod = inod.transform_to(cframe1) #uses the default time
#first do a round-tripping test
inod2 = cirsnod.transform_to(ICRS)
- npt.assert_allclose(inod.ra, inod2.ra)
- npt.assert_allclose(inod.dec, inod2.dec)
+ assert_allclose(inod.ra, inod2.ra)
+ assert_allclose(inod.dec, inod2.dec)
#now check that a different time yields different answers
cframe2 = CIRS(obstime=Time('J2005', scale='utc'))
cirsnod2 = inod.transform_to(cframe2)
- assert not np.allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8)
- assert not np.allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8)
+ assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8)
+ assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8)
# parallax effects should be included, so with and w/o distance should be different
cirswd = iwd.transform_to(cframe1)
- assert not np.allclose(cirswd.ra, cirsnod.ra, rtol=1e-8)
- assert not np.allclose(cirswd.dec, cirsnod.dec, rtol=1e-8)
+ assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8)
+ assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8)
# and the distance should transform at least somehow
- assert not np.allclose(cirswd.distance, iwd.distance, rtol=1e-8)
+ assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8)
#now check that the cirs self-transform works as expected
cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op
- npt.assert_allclose(cirsnod.ra, cirsnod3.ra)
- npt.assert_allclose(cirsnod.dec, cirsnod3.dec)
+ assert_allclose(cirsnod.ra, cirsnod3.ra)
+ assert_allclose(cirsnod.dec, cirsnod3.dec)
cirsnod4 = cirsnod.transform_to(cframe2) # should be different
- assert not np.allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8)
- assert not np.allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8)
+ assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8)
+ assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8)
cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same
- npt.assert_allclose(cirsnod.ra, cirsnod5.ra)
- npt.assert_allclose(cirsnod.dec, cirsnod5.dec)
+ assert_allclose(cirsnod.ra, cirsnod5.ra)
+ assert_allclose(cirsnod.dec, cirsnod5.dec)
def test_icrs_gcrs():
@@ -394,44 +393,45 @@ def test_icrs_gcrs():
gcrsnod = inod.transform_to(gframe1) #uses the default time
#first do a round-tripping test
inod2 = gcrsnod.transform_to(ICRS)
- npt.assert_allclose(inod.ra, inod2.ra)
- npt.assert_allclose(inod.dec, inod2.dec)
+ assert_allclose(inod.ra, inod2.ra)
+ assert_allclose(inod.dec, inod2.dec)
#now check that a different time yields different answers
gframe2 = GCRS(obstime=Time('J2005', scale='utc'))
gcrsnod2 = inod.transform_to(gframe2)
- assert not np.allclose(gcrsnod.ra, gcrsnod2.ra, rtol=1e-8, atol=1e-10)
- assert not np.allclose(gcrsnod.dec, gcrsnod2.dec, rtol=1e-8, atol=1e-10)
+ assert not allclose(gcrsnod.ra, gcrsnod2.ra, rtol=1e-8, atol=1e-10*u.deg)
+ assert not allclose(gcrsnod.dec, gcrsnod2.dec, rtol=1e-8, atol=1e-10*u.deg)
# parallax effects should be included, so with and w/o distance should be different
gcrswd = iwd.transform_to(gframe1)
- assert not np.allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10)
- assert not np.allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10)
+ assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10*u.deg)
+ assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10*u.deg)
# and the distance should transform at least somehow
- assert not np.allclose(gcrswd.distance, iwd.distance, rtol=1e-8, atol=1e-10)
+ assert not allclose(gcrswd.distance, iwd.distance, rtol=1e-8,
+ atol=1e-10*u.pc)
#now check that the cirs self-transform works as expected
gcrsnod3 = gcrsnod.transform_to(gframe1) # should be a no-op
- npt.assert_allclose(gcrsnod.ra, gcrsnod3.ra)
- npt.assert_allclose(gcrsnod.dec, gcrsnod3.dec)
+ assert_allclose(gcrsnod.ra, gcrsnod3.ra)
+ assert_allclose(gcrsnod.dec, gcrsnod3.dec)
gcrsnod4 = gcrsnod.transform_to(gframe2) # should be different
- assert not np.allclose(gcrsnod4.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10)
- assert not np.allclose(gcrsnod4.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10)
+ assert not allclose(gcrsnod4.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10*u.deg)
+ assert not allclose(gcrsnod4.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10*u.deg)
gcrsnod5 = gcrsnod4.transform_to(gframe1) # should be back to the same
- npt.assert_allclose(gcrsnod.ra, gcrsnod5.ra, rtol=1e-8, atol=1e-10)
- npt.assert_allclose(gcrsnod.dec, gcrsnod5.dec, rtol=1e-8, atol=1e-10)
+ assert_allclose(gcrsnod.ra, gcrsnod5.ra, rtol=1e-8, atol=1e-10*u.deg)
+ assert_allclose(gcrsnod.dec, gcrsnod5.dec, rtol=1e-8, atol=1e-10*u.deg)
#also make sure that a GCRS with a different geoloc/geovel gets a different answer
# roughly a moon-like frame
gframe3 = GCRS(obsgeoloc=[385000., 0, 0]*u.km, obsgeovel=[1, 0, 0]*u.km/u.s)
gcrsnod6 = inod.transform_to(gframe3) # should be different
- assert not np.allclose(gcrsnod.ra, gcrsnod6.ra, rtol=1e-8, atol=1e-10)
- assert not np.allclose(gcrsnod.dec, gcrsnod6.dec, rtol=1e-8, atol=1e-10)
+ assert not allclose(gcrsnod.ra, gcrsnod6.ra, rtol=1e-8, atol=1e-10*u.deg)
+ assert not allclose(gcrsnod.dec, gcrsnod6.dec, rtol=1e-8, atol=1e-10*u.deg)
inodviag3 = gcrsnod6.transform_to(ICRS) # and now back to the original
- npt.assert_allclose(inod.ra, inodviag3.ra)
- npt.assert_allclose(inod.dec, inodviag3.dec)
+ assert_allclose(inod.ra, inodviag3.ra)
+ assert_allclose(inod.dec, inodviag3.dec)
def test_cirs_to_altaz():
@@ -453,10 +453,10 @@ def test_cirs_to_altaz():
cirs3 = cirscart.transform_to(altazframe).transform_to(cirs)
#check round-tripping
- npt.assert_allclose(cirs.ra.deg, cirs2.ra.deg)
- npt.assert_allclose(cirs.dec.deg, cirs2.dec.deg)
- npt.assert_allclose(cirs.ra.deg, cirs3.ra.deg)
- npt.assert_allclose(cirs.dec.deg, cirs3.dec.deg)
+ assert_allclose(cirs.ra, cirs2.ra)
+ assert_allclose(cirs.dec, cirs2.dec)
+ assert_allclose(cirs.ra, cirs3.ra)
+ assert_allclose(cirs.dec, cirs3.dec)
def test_gcrs_itrs():
@@ -470,17 +470,17 @@ def test_gcrs_itrs():
gcrs2 = gcrs.transform_to(ITRS).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(ITRS).transform_to(gcrs)
- npt.assert_allclose(gcrs.ra.deg, gcrs2.ra.deg)
- npt.assert_allclose(gcrs.dec.deg, gcrs2.dec.deg)
- assert not np.allclose(gcrs.ra.deg, gcrs6_2.ra.deg)
- assert not np.allclose(gcrs.dec.deg, gcrs6_2.dec.deg)
+ assert_allclose(gcrs.ra, gcrs2.ra)
+ assert_allclose(gcrs.dec, gcrs2.dec)
+ assert not allclose(gcrs.ra, gcrs6_2.ra)
+ assert not allclose(gcrs.dec, gcrs6_2.dec)
#also try with the cartesian representation
gcrsc = gcrs.realize_frame(gcrs.data)
gcrsc.representation = r.CartesianRepresentation
gcrsc2 = gcrsc.transform_to(ITRS).transform_to(gcrsc)
- npt.assert_allclose(gcrsc.spherical.lon.deg, gcrsc2.ra.deg)
- npt.assert_allclose(gcrsc.spherical.lat.deg, gcrsc2.dec.deg)
+ assert_allclose(gcrsc.spherical.lon.deg, gcrsc2.ra.deg)
+ assert_allclose(gcrsc.spherical.lat, gcrsc2.dec)
def test_cirs_itrs():
@@ -495,10 +495,10 @@ def test_cirs_itrs():
cirs6_2 = cirs6.transform_to(ITRS).transform_to(cirs) # different obstime
#just check round-tripping
- npt.assert_allclose(cirs.ra.deg, cirs2.ra.deg)
- npt.assert_allclose(cirs.dec.deg, cirs2.dec.deg)
- assert not np.allclose(cirs.ra.deg, cirs6_2.ra.deg)
- assert not np.allclose(cirs.dec.deg, cirs6_2.dec.deg)
+ assert_allclose(cirs.ra, cirs2.ra)
+ assert_allclose(cirs.dec, cirs2.dec)
+ assert not allclose(cirs.ra, cirs6_2.ra)
+ assert not allclose(cirs.dec, cirs6_2.dec)
def test_gcrs_cirs():
@@ -513,19 +513,19 @@ def test_gcrs_cirs():
gcrs2 = gcrs.transform_to(CIRS).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(CIRS).transform_to(gcrs)
- npt.assert_allclose(gcrs.ra.deg, gcrs2.ra.deg)
- npt.assert_allclose(gcrs.dec.deg, gcrs2.dec.deg)
- assert not np.allclose(gcrs.ra.deg, gcrs6_2.ra.deg)
- assert not np.allclose(gcrs.dec.deg, gcrs6_2.dec.deg)
+ assert_allclose(gcrs.ra, gcrs2.ra)
+ assert_allclose(gcrs.dec, gcrs2.dec)
+ assert not allclose(gcrs.ra, gcrs6_2.ra)
+ assert not allclose(gcrs.dec, gcrs6_2.dec)
#now try explicit intermediate pathways and ensure they're all consistent
gcrs3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(ITRS).transform_to(gcrs)
- npt.assert_allclose(gcrs.ra.deg, gcrs3.ra.deg)
- npt.assert_allclose(gcrs.dec.deg, gcrs3.dec.deg)
+ assert_allclose(gcrs.ra, gcrs3.ra)
+ assert_allclose(gcrs.dec, gcrs3.dec)
gcrs4 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(ICRS).transform_to(gcrs)
- npt.assert_allclose(gcrs.ra.deg, gcrs4.ra.deg)
- npt.assert_allclose(gcrs.dec.deg, gcrs4.dec.deg)
+ assert_allclose(gcrs.ra, gcrs4.ra)
+ assert_allclose(gcrs.dec, gcrs4.dec)
def test_gcrs_altaz():
@@ -549,7 +549,7 @@ def test_gcrs_altaz():
aa3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(aaframe)
# make sure they're all consistent
- npt.assert_allclose(aa1.alt.deg, aa2.alt.deg)
- npt.assert_allclose(aa1.az.deg, aa2.az.deg)
- npt.assert_allclose(aa1.alt.deg, aa3.alt.deg)
- npt.assert_allclose(aa1.az.deg, aa3.az.deg)
+ assert_allclose(aa1.alt, aa2.alt)
+ assert_allclose(aa1.az, aa2.az)
+ assert_allclose(aa1.alt, aa3.alt)
+ assert_allclose(aa1.az, aa3.az)
diff --git a/astropy/cosmology/tests/test_cosmology.py b/astropy/cosmology/tests/test_cosmology.py
index d0b7bea..e93cd8c 100644
--- a/astropy/cosmology/tests/test_cosmology.py
+++ b/astropy/cosmology/tests/test_cosmology.py
@@ -7,7 +7,7 @@ from io import StringIO
import numpy as np
from .. import core, funcs
-from ...tests.helper import pytest
+from ...tests.helper import pytest, quantity_allclose as allclose
from ... import units as u
try:
@@ -52,45 +52,45 @@ def test_init():
def test_basic():
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0, Neff=3.04, Ob0=0.05)
- assert np.allclose(cosmo.Om0, 0.27)
- assert np.allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
- assert np.allclose(cosmo.Ob0, 0.05)
- assert np.allclose(cosmo.Odm0, 0.27 - 0.05)
+ assert allclose(cosmo.Om0, 0.27)
+ assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
+ assert allclose(cosmo.Ob0, 0.05)
+ assert allclose(cosmo.Odm0, 0.27 - 0.05)
# This next test will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
- assert np.allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
- assert np.allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
- assert np.allclose(cosmo.Ok0, 0.0)
- assert np.allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
- 1.0, rtol=1e-6)
- assert np.allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
- cosmo.Onu(1), 1.0, rtol=1e-6)
- assert np.allclose(cosmo.Tcmb0.value, 2.0)
- assert np.allclose(cosmo.Tnu0.value, 1.4275317, rtol=1e-5)
- assert np.allclose(cosmo.Neff, 3.04)
- assert np.allclose(cosmo.h, 0.7)
- assert np.allclose(cosmo.H0.value, 70.0)
+ assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
+ assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
+ assert allclose(cosmo.Ok0, 0.0)
+ assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
+ 1.0, rtol=1e-6)
+ assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
+ cosmo.Onu(1), 1.0, rtol=1e-6)
+ assert allclose(cosmo.Tcmb0, 2.0 * u.K)
+ assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
+ assert allclose(cosmo.Neff, 3.04)
+ assert allclose(cosmo.h, 0.7)
+ assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
# Make sure setting them as quantities gives the same results
H0 = u.Quantity(70, u.km / (u.s * u.Mpc))
T = u.Quantity(2.0, u.K)
cosmo = core.FlatLambdaCDM(H0=H0, Om0=0.27, Tcmb0=T, Neff=3.04, Ob0=0.05)
- assert np.allclose(cosmo.Om0, 0.27)
- assert np.allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
- assert np.allclose(cosmo.Ob0, 0.05)
- assert np.allclose(cosmo.Odm0, 0.27 - 0.05)
- assert np.allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
- assert np.allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
- assert np.allclose(cosmo.Ok0, 0.0)
- assert np.allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
+ assert allclose(cosmo.Om0, 0.27)
+ assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
+ assert allclose(cosmo.Ob0, 0.05)
+ assert allclose(cosmo.Odm0, 0.27 - 0.05)
+ assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
+ assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
+ assert allclose(cosmo.Ok0, 0.0)
+ assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
- assert np.allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
+ assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
- assert np.allclose(cosmo.Tcmb0.value, 2.0)
- assert np.allclose(cosmo.Tnu0.value, 1.4275317, rtol=1e-5)
- assert np.allclose(cosmo.Neff, 3.04)
- assert np.allclose(cosmo.h, 0.7)
- assert np.allclose(cosmo.H0.value, 70.0)
+ assert allclose(cosmo.Tcmb0, 2.0 * u.K)
+ assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
+ assert allclose(cosmo.Neff, 3.04)
+ assert allclose(cosmo.h, 0.7)
+ assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -148,8 +148,8 @@ def test_distance_broadcast():
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
- assert np.allclose(value_flat.value, value_2d.flatten().value)
- assert np.allclose(value_flat.value, value_3d.flatten().value)
+ assert allclose(value_flat, value_2d.flatten())
+ assert allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H',
@@ -163,8 +163,8 @@ def test_distance_broadcast():
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
- assert np.allclose(value_flat, value_2d.flatten())
- assert np.allclose(value_flat, value_3d.flatten())
+ assert allclose(value_flat, value_2d.flatten())
+ assert allclose(value_flat, value_3d.flatten())
# Test some dark energy models
methods = ['Om', 'Ode', 'w', 'de_density_scale']
@@ -182,8 +182,8 @@ def test_distance_broadcast():
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
- assert np.allclose(value_flat, value_2d.flatten())
- assert np.allclose(value_flat, value_3d.flatten())
+ assert allclose(value_flat, value_2d.flatten())
+ assert allclose(value_flat, value_3d.flatten())
@pytest.mark.skipif('not HAS_SCIPY')
@@ -205,33 +205,33 @@ def test_clone():
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert not np.allclose(newclone.H0.value, cosmo.H0.value)
- assert np.allclose(newclone.H0.value, 60.0)
- assert np.allclose(newclone.Om0, cosmo.Om0)
- assert np.allclose(newclone.Ok0, cosmo.Ok0)
+ assert allclose(newclone.H0, 60.0 * u.km / u.s / u.Mpc)
+ assert allclose(newclone.Om0, cosmo.Om0)
+ assert allclose(newclone.Ok0, cosmo.Ok0)
assert not np.allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not np.allclose(newclone.Onu0, cosmo.Onu0)
- assert np.allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value)
- assert np.allclose(newclone.m_nu.value, cosmo.m_nu.value)
- assert np.allclose(newclone.Neff, cosmo.Neff)
+ assert allclose(newclone.Tcmb0, cosmo.Tcmb0)
+ assert allclose(newclone.m_nu, cosmo.m_nu)
+ assert allclose(newclone.Neff, cosmo.Neff)
# Compare modified version with directly instantiated one
cmp = core.FlatLambdaCDM(H0=60 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
- assert np.allclose(newclone.H0.value, cmp.H0.value)
- assert np.allclose(newclone.Om0, cmp.Om0)
- assert np.allclose(newclone.Ode0, cmp.Ode0)
- assert np.allclose(newclone.Ok0, cmp.Ok0)
- assert np.allclose(newclone.Ogamma0, cmp.Ogamma0)
- assert np.allclose(newclone.Onu0, cmp.Onu0)
- assert np.allclose(newclone.Tcmb0, cmp.Tcmb0)
- assert np.allclose(newclone.m_nu.value, cmp.m_nu.value)
- assert np.allclose(newclone.Neff, cmp.Neff)
- assert np.allclose(newclone.Om(z), cmp.Om(z))
- assert np.allclose(newclone.H(z).value, cmp.H(z).value)
- assert np.allclose(newclone.luminosity_distance(z).value,
- cmp.luminosity_distance(z).value)
+ assert allclose(newclone.H0, cmp.H0)
+ assert allclose(newclone.Om0, cmp.Om0)
+ assert allclose(newclone.Ode0, cmp.Ode0)
+ assert allclose(newclone.Ok0, cmp.Ok0)
+ assert allclose(newclone.Ogamma0, cmp.Ogamma0)
+ assert allclose(newclone.Onu0, cmp.Onu0)
+ assert allclose(newclone.Tcmb0, cmp.Tcmb0)
+ assert allclose(newclone.m_nu, cmp.m_nu)
+ assert allclose(newclone.Neff, cmp.Neff)
+ assert allclose(newclone.Om(z), cmp.Om(z))
+ assert allclose(newclone.H(z), cmp.H(z))
+ assert allclose(newclone.luminosity_distance(z),
+ cmp.luminosity_distance(z))
# Now try changing multiple things
newclone = cosmo.clone(name="New name", H0=65 * u.km / u.s / u.Mpc,
@@ -239,34 +239,34 @@ def test_clone():
assert newclone.__class__ == cosmo.__class__
assert not newclone.name == cosmo.name
assert not np.allclose(newclone.H0.value, cosmo.H0.value)
- assert np.allclose(newclone.H0.value, 65.0)
- assert np.allclose(newclone.Om0, cosmo.Om0)
- assert np.allclose(newclone.Ok0, cosmo.Ok0)
+ assert allclose(newclone.H0, 65.0 * u.km / u.s / u.Mpc)
+ assert allclose(newclone.Om0, cosmo.Om0)
+ assert allclose(newclone.Ok0, cosmo.Ok0)
assert not np.allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not np.allclose(newclone.Onu0, cosmo.Onu0)
assert not np.allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value)
- assert np.allclose(newclone.Tcmb0.value, 2.8)
- assert np.allclose(newclone.m_nu.value, cosmo.m_nu.value)
- assert np.allclose(newclone.Neff, cosmo.Neff)
+ assert allclose(newclone.Tcmb0, 2.8 * u.K)
+ assert allclose(newclone.m_nu, cosmo.m_nu)
+ assert allclose(newclone.Neff, cosmo.Neff)
# And direct comparison
cmp = core.FlatLambdaCDM(name="New name", H0=65 * u.km / u.s / u.Mpc,
Om0=0.27, Tcmb0=2.8 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
- assert np.allclose(newclone.H0.value, cmp.H0.value)
- assert np.allclose(newclone.Om0, cmp.Om0)
- assert np.allclose(newclone.Ode0, cmp.Ode0)
- assert np.allclose(newclone.Ok0, cmp.Ok0)
- assert np.allclose(newclone.Ogamma0, cmp.Ogamma0)
- assert np.allclose(newclone.Onu0, cmp.Onu0)
- assert np.allclose(newclone.Tcmb0, cmp.Tcmb0)
- assert np.allclose(newclone.m_nu.value, cmp.m_nu.value)
- assert np.allclose(newclone.Neff, cmp.Neff)
- assert np.allclose(newclone.Om(z), cmp.Om(z))
- assert np.allclose(newclone.H(z).value, cmp.H(z).value)
- assert np.allclose(newclone.luminosity_distance(z).value,
- cmp.luminosity_distance(z).value)
+ assert allclose(newclone.H0, cmp.H0)
+ assert allclose(newclone.Om0, cmp.Om0)
+ assert allclose(newclone.Ode0, cmp.Ode0)
+ assert allclose(newclone.Ok0, cmp.Ok0)
+ assert allclose(newclone.Ogamma0, cmp.Ogamma0)
+ assert allclose(newclone.Onu0, cmp.Onu0)
+ assert allclose(newclone.Tcmb0, cmp.Tcmb0)
+ assert allclose(newclone.m_nu, cmp.m_nu)
+ assert allclose(newclone.Neff, cmp.Neff)
+ assert allclose(newclone.Om(z), cmp.Om(z))
+ assert allclose(newclone.H(z), cmp.H(z))
+ assert allclose(newclone.luminosity_distance(z),
+ cmp.luminosity_distance(z))
# Try a dark energy class, make sure it can handle w params
cosmo = core.w0waCDM(name="test w0wa", H0=70 * u.km / u.s / u.Mpc,
@@ -274,14 +274,14 @@ def test_clone():
newclone = cosmo.clone(w0=-1.1, wa=0.2)
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
- assert np.allclose(newclone.H0.value, cosmo.H0.value)
- assert np.allclose(newclone.Om0, cosmo.Om0)
- assert np.allclose(newclone.Ode0, cosmo.Ode0)
- assert np.allclose(newclone.Ok0, cosmo.Ok0)
+ assert allclose(newclone.H0, cosmo.H0)
+ assert allclose(newclone.Om0, cosmo.Om0)
+ assert allclose(newclone.Ode0, cosmo.Ode0)
+ assert allclose(newclone.Ok0, cosmo.Ok0)
assert not np.allclose(newclone.w0, cosmo.w0)
- assert np.allclose(newclone.w0, -1.1)
+ assert allclose(newclone.w0, -1.1)
assert not np.allclose(newclone.wa, cosmo.wa)
- assert np.allclose(newclone.wa, 0.2)
+ assert allclose(newclone.wa, 0.2)
# Now test exception if user passes non-parameter
with pytest.raises(AttributeError):
@@ -364,29 +364,29 @@ def test_flat_z1():
# iCosmos: http://www.icosmos.co.uk/index.html
# The order of values below is Wright, Kempner, iCosmos'
- assert np.allclose(cosmo.comoving_distance(z).value,
- [3364.5, 3364.8, 3364.7988], rtol=1e-4)
- assert np.allclose(cosmo.angular_diameter_distance(z).value,
- [1682.3, 1682.4, 1682.3994], rtol=1e-4)
- assert np.allclose(cosmo.luminosity_distance(z).value,
- [6729.2, 6729.6, 6729.5976], rtol=1e-4)
- assert np.allclose(cosmo.lookback_time(z).value,
- [7.841, 7.84178, 7.843], rtol=1e-3)
- assert np.allclose(cosmo.lookback_distance(z).value,
- [2404.0, 2404.24, 2404.4], rtol=1e-3)
+ assert allclose(cosmo.comoving_distance(z),
+ [3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4)
+ assert allclose(cosmo.angular_diameter_distance(z),
+ [1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4)
+ assert allclose(cosmo.luminosity_distance(z),
+ [6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4)
+ assert allclose(cosmo.lookback_time(z),
+ [7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)
+ assert allclose(cosmo.lookback_distance(z),
+ [2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3)
def test_zeroing():
""" Tests if setting params to 0s always respects that"""
# Make sure Ode = 0 behaves that way
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0)
- assert np.allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
+ assert allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
# Ogamma0
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
- assert np.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
+ assert allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
# Obaryon
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Ob0=0.0)
- assert np.allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
+ assert allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# This class is to test whether the routines work correctly
@@ -407,22 +407,22 @@ def test_de_subclass():
z = [0.2, 0.4, 0.6, 0.9]
cosmo = core.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)
# Values taken from Ned Wrights advanced cosmo calcluator, Aug 17 2012
- assert np.allclose(cosmo.luminosity_distance(z).value,
- [975.5, 2158.2, 3507.3, 5773.1], rtol=1e-3)
+ assert allclose(cosmo.luminosity_distance(z),
+ [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Now try the subclass that only gives w(z)
cosmo = test_cos_sub()
- assert np.allclose(cosmo.luminosity_distance(z).value,
- [975.5, 2158.2, 3507.3, 5773.1], rtol=1e-3)
+ assert allclose(cosmo.luminosity_distance(z),
+ [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Test efunc
- assert np.allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
- assert np.allclose(cosmo.efunc([0.5, 1.0]),
- [1.31744953, 1.7489240754], rtol=1e-5)
- assert np.allclose(cosmo.inv_efunc([0.5, 1.0]),
- [0.75904236, 0.57178011], rtol=1e-5)
+ assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
+ assert allclose(cosmo.efunc([0.5, 1.0]),
+ [1.31744953, 1.7489240754], rtol=1e-5)
+ assert allclose(cosmo.inv_efunc([0.5, 1.0]),
+ [0.75904236, 0.57178011], rtol=1e-5)
# Test de_density_scale
- assert np.allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
- assert np.allclose(cosmo.de_density_scale([0.5, 1.0]),
- [1.12934694, 1.23114444], rtol=1e-4)
+ assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
+ assert allclose(cosmo.de_density_scale([0.5, 1.0]),
+ [1.12934694, 1.23114444], rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -433,58 +433,58 @@ def test_varyde_lumdist_mathematica():
# w0wa models
z = np.array([0.2, 0.4, 0.9, 1.2])
cosmo = core.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
- assert np.allclose(cosmo.w0, -1.1)
- assert np.allclose(cosmo.wa, 0.2)
+ assert allclose(cosmo.w0, -1.1)
+ assert allclose(cosmo.wa, 0.2)
- assert np.allclose(cosmo.luminosity_distance(z).value,
- [1004.0, 2268.62, 6265.76, 9061.84], rtol=1e-4)
- assert np.allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
- assert np.allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),
+ assert allclose(cosmo.luminosity_distance(z),
+ [1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4)
+ assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
+ assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957])
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
- assert np.allclose(cosmo.luminosity_distance(z).value,
- [971.667, 2141.67, 5685.96, 8107.41], rtol=1e-4)
+ assert allclose(cosmo.luminosity_distance(z),
+ [971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5, Tcmb0=0.0)
- assert np.allclose(cosmo.luminosity_distance(z).value,
- [974.087, 2157.08, 5783.92, 8274.08], rtol=1e-4)
+ assert allclose(cosmo.luminosity_distance(z),
+ [974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4)
# wpwa models
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5,
Tcmb0=0.0)
- assert np.allclose(cosmo.wp, -1.1)
- assert np.allclose(cosmo.wa, 0.2)
- assert np.allclose(cosmo.zp, 0.5)
- assert np.allclose(cosmo.luminosity_distance(z).value,
- [1010.81, 2294.45, 6369.45, 9218.95], rtol=1e-4)
+ assert allclose(cosmo.wp, -1.1)
+ assert allclose(cosmo.wa, 0.2)
+ assert allclose(cosmo.zp, 0.5)
+ assert allclose(cosmo.luminosity_distance(z),
+ [1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4)
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9,
Tcmb0=0.0)
- assert np.allclose(cosmo.wp, -1.1)
- assert np.allclose(cosmo.wa, 0.2)
- assert np.allclose(cosmo.zp, 0.9)
- assert np.allclose(cosmo.luminosity_distance(z).value,
- [1013.68, 2305.3, 6412.37, 9283.33], rtol=1e-4)
+ assert allclose(cosmo.wp, -1.1)
+ assert allclose(cosmo.wa, 0.2)
+ assert allclose(cosmo.zp, 0.9)
+ assert allclose(cosmo.luminosity_distance(z),
+ [1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_matter():
# Test non-relativistic matter evolution
tcos = core.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
- assert np.allclose(tcos.Om0, 0.3)
- assert np.allclose(tcos.H0.value, 70.0)
- assert np.allclose(tcos.Om(0), 0.3)
- assert np.allclose(tcos.Ob(0), 0.045)
+ assert allclose(tcos.Om0, 0.3)
+ assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)
+ assert allclose(tcos.Om(0), 0.3)
+ assert allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
- assert np.allclose(tcos.Om(z), [0.3, 0.59112134, 0.77387435, 0.91974179],
+ assert allclose(tcos.Om(z), [0.3, 0.59112134, 0.77387435, 0.91974179],
rtol=1e-4)
- assert np.allclose(tcos.Ob(z), [0.045, 0.08866820, 0.11608115,
+ assert allclose(tcos.Ob(z), [0.045, 0.08866820, 0.11608115,
0.13796127], rtol=1e-4)
- assert np.allclose(tcos.Odm(z), [0.255, 0.50245314, 0.6577932, 0.78178052],
+ assert allclose(tcos.Odm(z), [0.255, 0.50245314, 0.6577932, 0.78178052],
rtol=1e-4)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
- assert np.allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
+ assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif('not HAS_SCIPY')
@@ -492,21 +492,21 @@ def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = core.FlatLambdaCDM(70.0, 0.3)
- assert np.allclose(tcos.Ok0, 0.0)
- assert np.allclose(tcos.Ok(0), 0.0)
+ assert allclose(tcos.Ok0, 0.0)
+ assert allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
- assert np.allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],
+ assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],
rtol=1e-6)
# Not flat
tcos = core.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
- assert np.allclose(tcos.Ok0, 0.2)
- assert np.allclose(tcos.Ok(0), 0.2)
- assert np.allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],
+ assert allclose(tcos.Ok0, 0.2)
+ assert allclose(tcos.Ok(0), 0.2)
+ assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],
rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
- assert np.allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),
+ assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),
[1.0, 1.0, 1.0, 1.0], rtol=1e-5)
@@ -514,10 +514,10 @@ def test_ocurv():
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = core.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
- assert np.allclose(tcos.Ode0, 0.7)
- assert np.allclose(tcos.Ode(0), 0.7)
+ assert allclose(tcos.Ode0, 0.7)
+ assert allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
- assert np.allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],
+ assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],
rtol=1e-5)
@@ -539,27 +539,27 @@ def test_ogamma():
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
- assert np.allclose(cosmo.angular_diameter_distance(z).value,
- [1651.9, 858.2, 26.855, 13.642], rtol=5e-4)
+ assert allclose(cosmo.angular_diameter_distance(z),
+ [1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
- assert np.allclose(cosmo.angular_diameter_distance(z).value,
- [1651.8, 857.9, 26.767, 13.582], rtol=5e-4)
+ assert allclose(cosmo.angular_diameter_distance(z),
+ [1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
- assert np.allclose(cosmo.angular_diameter_distance(z).value,
- [1651.4, 856.6, 26.489, 13.405], rtol=5e-4)
+ assert allclose(cosmo.angular_diameter_distance(z),
+ [1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
- assert np.allclose(cosmo.angular_diameter_distance(z).value,
- [1651.91, 858.205, 26.8586, 13.6469], rtol=1e-5)
+ assert allclose(cosmo.angular_diameter_distance(z),
+ [1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
- assert np.allclose(cosmo.angular_diameter_distance(z).value,
- [1651.76, 857.817, 26.7688, 13.5841], rtol=1e-5)
+ assert allclose(cosmo.angular_diameter_distance(z),
+ [1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
- assert np.allclose(cosmo.angular_diameter_distance(z).value,
- [1651.21, 856.411, 26.4845, 13.4028], rtol=1e-5)
+ assert allclose(cosmo.angular_diameter_distance(z),
+ [1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
@@ -569,14 +569,14 @@ def test_ogamma():
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2
Om0 = 1.0 - Or0
- hubdis = 299792.458 / 70.0
+ hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
- assert np.allclose(cosmo.comoving_distance(z).value, targvals, rtol=1e-5)
+ assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
- assert np.allclose(cosmo.comoving_distance(z.astype(np.int)).value,
+ assert allclose(cosmo.comoving_distance(z.astype(np.int)),
targvals, rtol=1e-5)
# Try Tcmb0 = 4
@@ -585,35 +585,35 @@ def test_ogamma():
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
- assert np.allclose(cosmo.comoving_distance(z).value, targvals, rtol=1e-5)
+ assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tcmb():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
- assert np.allclose(cosmo.Tcmb0.value, 2.5)
- assert np.allclose(cosmo.Tcmb(2).value, 7.5)
+ assert allclose(cosmo.Tcmb0, 2.5 * u.K)
+ assert allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
- assert np.allclose(cosmo.Tcmb(z).value,
- [2.5, 5.0, 7.5, 10.0, 25.0], rtol=1e-6)
+ assert allclose(cosmo.Tcmb(z),
+ [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
- assert np.allclose(cosmo.Tcmb(z).value,
- [2.5, 5.0, 7.5, 10.0, 25.0], rtol=1e-6)
+ assert allclose(cosmo.Tcmb(z),
+ [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tnu():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
- assert np.allclose(cosmo.Tnu0.value, 2.1412975665108247, rtol=1e-6)
- assert np.allclose(cosmo.Tnu(2).value, 6.423892699532474, rtol=1e-6)
+ assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
+ assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
- expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027]
- assert np.allclose(cosmo.Tnu(z), expected, rtol=1e-6)
+ expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
+ assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
- assert np.allclose(cosmo.Tnu(z), expected, rtol=1e-6)
+ assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
def test_efunc_vs_invefunc():
@@ -625,32 +625,32 @@ def test_efunc_vs_invefunc():
# We do the non-standard case in test_efunc_vs_invefunc_flrw,
# since it requires scipy
cosmo = core.LambdaCDM(70, 0.3, 0.5)
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.LambdaCDM(70, 0.3, 0.5, m_nu=u.Quantity(0.01, u.eV))
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatLambdaCDM(50.0, 0.27)
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wCDM(60.0, 0.27, 0.6, w0=-0.8)
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6)
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, wa=0.1)
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2)
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, zp=0.3)
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2)
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
@@ -661,17 +661,21 @@ def test_efunc_vs_invefunc_flrw():
# FLRW is abstract, so requires test_cos_sub defined earlier
# This requires scipy, unlike the built-ins
cosmo = test_cos_sub()
- assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
- assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
+ assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
+ assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_kpc_methods():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
- assert np.allclose(cosmo.arcsec_per_kpc_comoving(3).value, 0.0317179)
- assert np.allclose(cosmo.arcsec_per_kpc_proper(3).value, 0.1268716668)
- assert np.allclose(cosmo.kpc_comoving_per_arcmin(3).value, 1891.6753126)
- assert np.allclose(cosmo.kpc_proper_per_arcmin(3).value, 472.918828)
+ assert allclose(cosmo.arcsec_per_kpc_comoving(3),
+ 0.0317179167 * u.arcsec / u.kpc)
+ assert allclose(cosmo.arcsec_per_kpc_proper(3),
+ 0.1268716668 * u.arcsec / u.kpc)
+ assert allclose(cosmo.kpc_comoving_per_arcmin(3),
+ 1891.6753126 * u.kpc / u.arcmin)
+ assert allclose(cosmo.kpc_proper_per_arcmin(3),
+ 472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -684,19 +688,19 @@ def test_comoving_volume():
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
- 3654.802]) * 1e9 # convert to Mpc**3
+ 3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
- 3123.814]) * 1e9
+ 3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
- 358.992]) * 1e9
+ 358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision
- assert np.allclose(c_flat.comoving_volume(redshifts).value, wright_flat,
- rtol=1e-2)
- assert np.allclose(c_open.comoving_volume(redshifts).value,
- wright_open, rtol=1e-2)
- assert np.allclose(c_closed.comoving_volume(redshifts).value,
- wright_closed, rtol=1e-2)
+ assert allclose(c_flat.comoving_volume(redshifts), wright_flat,
+ rtol=1e-2)
+ assert allclose(c_open.comoving_volume(redshifts),
+ wright_open, rtol=1e-2)
+ assert allclose(c_closed.comoving_volume(redshifts),
+ wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -711,26 +715,26 @@ def test_differential_comoving_volume():
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
- 3654.802]) * 1e9 # convert to Mpc**3
+ 3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
- 3123.814]) * 1e9
+ 3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
- 358.992]) * 1e9
+ 358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
ftemp = lambda x: c_flat.differential_comoving_volume(x).value
otemp = lambda x: c_open.differential_comoving_volume(x).value
ctemp = lambda x: c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
- assert np.allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]
- for redshift in redshifts]),
- wright_flat, rtol=1e-2)
- assert np.allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]
- for redshift in redshifts]),
- wright_open, rtol=1e-2)
- assert np.allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]
- for redshift in redshifts]),
- wright_closed, rtol=1e-2)
+ assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]
+ for redshift in redshifts]) * u.Mpc**3,
+ wright_flat, rtol=1e-2)
+ assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]
+ for redshift in redshifts]) * u.Mpc**3,
+ wright_open, rtol=1e-2)
+ assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]
+ for redshift in redshifts]) * u.Mpc**3,
+ wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -845,68 +849,77 @@ def test_flat_open_closed_icosmo():
"""
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)
+ dm = dm * u.Mpc
+ da = da * u.Mpc
+ dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)
- assert np.allclose(cosmo.comoving_transverse_distance(redshifts).value, dm)
- assert np.allclose(cosmo.angular_diameter_distance(redshifts).value, da)
- assert np.allclose(cosmo.luminosity_distance(redshifts).value, dl)
+ assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
+ assert allclose(cosmo.angular_diameter_distance(redshifts), da)
+ assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)
+ dm = dm * u.Mpc
+ da = da * u.Mpc
+ dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)
- assert np.allclose(cosmo.comoving_transverse_distance(redshifts).value, dm)
- assert np.allclose(cosmo.angular_diameter_distance(redshifts).value, da)
- assert np.allclose(cosmo.luminosity_distance(redshifts).value, dl)
+ assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
+ assert allclose(cosmo.angular_diameter_distance(redshifts), da)
+ assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)
+ dm = dm * u.Mpc
+ da = da * u.Mpc
+ dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)
- assert np.allclose(cosmo.comoving_transverse_distance(redshifts).value, dm)
- assert np.allclose(cosmo.angular_diameter_distance(redshifts).value, da)
- assert np.allclose(cosmo.luminosity_distance(redshifts).value, dl)
+ assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
+ assert allclose(cosmo.angular_diameter_distance(redshifts), da)
+ assert allclose(cosmo.luminosity_distance(redshifts), dl)
@pytest.mark.skipif('not HAS_SCIPY')
def test_integral():
# Test integer vs. floating point inputs
cosmo = core.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
- assert np.allclose(cosmo.comoving_distance(3),
+ assert allclose(cosmo.comoving_distance(3),
cosmo.comoving_distance(3.0), rtol=1e-7)
- assert np.allclose(cosmo.comoving_distance([1, 2, 3, 5]),
+ assert allclose(cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7)
- assert np.allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
- assert np.allclose(cosmo.efunc([1, 2, 6]),
+ assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
+ assert allclose(cosmo.efunc([1, 2, 6]),
cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
- assert np.allclose(cosmo.inv_efunc([1, 2, 6]),
+ assert allclose(cosmo.inv_efunc([1, 2, 6]),
cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7)
def test_wz():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
- assert np.allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
+ assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-1., -1, -1, -1, -1, -1])
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-0.5)
- assert np.allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
+ assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
- assert np.allclose(cosmo.w0, -0.5)
+ assert allclose(cosmo.w0, -0.5)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wz=0.5)
- assert np.allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
+ assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1.0, -0.75, -0.5, -0.25, 0.15])
- assert np.allclose(cosmo.w0, -1.0)
- assert np.allclose(cosmo.wz, 0.5)
+ assert allclose(cosmo.w0, -1.0)
+ assert allclose(cosmo.wz, 0.5)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
- assert np.allclose(cosmo.w0, -1.0)
- assert np.allclose(cosmo.wa, -0.5)
- assert np.allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
+ assert allclose(cosmo.w0, -1.0)
+ assert allclose(cosmo.wa, -0.5)
+ assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1, -1.16666667, -1.25, -1.3, -1.34848485])
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
- assert np.allclose(cosmo.wp, -0.9)
- assert np.allclose(cosmo.wa, 0.2)
- assert np.allclose(cosmo.zp, 0.5)
- assert np.allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
+ assert allclose(cosmo.wp, -0.9)
+ assert allclose(cosmo.wa, 0.2)
+ assert allclose(cosmo.zp, 0.5)
+ assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667,
-0.82380952, -0.78266667])
@@ -915,49 +928,49 @@ def test_wz():
def test_de_densityscale():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
- assert np.allclose(cosmo.de_density_scale(z),
+ assert allclose(cosmo.de_density_scale(z),
[1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
- assert np.allclose(cosmo.de_density_scale(3),
+ assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
- assert np.allclose(cosmo.de_density_scale([1, 2, 3]),
+ assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)
- assert np.allclose(cosmo.de_density_scale(z),
+ assert allclose(cosmo.de_density_scale(z),
[1.15369, 1.31453, 1.83712, 3.95285, 6.5479],
rtol=1e-4)
- assert np.allclose(cosmo.de_density_scale(3),
+ assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
- assert np.allclose(cosmo.de_density_scale([1, 2, 3]),
+ assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
- assert np.allclose(cosmo.de_density_scale(z),
+ assert allclose(cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129,
0.0035916468], rtol=1e-4)
- assert np.allclose(cosmo.de_density_scale(3),
+ assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
- assert np.allclose(cosmo.de_density_scale([1, 2, 3]),
+ assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
- assert np.allclose(cosmo.de_density_scale(z),
+ assert allclose(cosmo.de_density_scale(z),
[0.9934201, 0.9767912, 0.897450,
0.622236, 0.4458753], rtol=1e-4)
- assert np.allclose(cosmo.de_density_scale(3),
+ assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
- assert np.allclose(cosmo.de_density_scale([1, 2, 3]),
+ assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
- assert np.allclose(cosmo.de_density_scale(z),
+ assert allclose(cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439,
1.324988, 1.565746], rtol=1e-4)
- assert np.allclose(cosmo.de_density_scale(3),
+ assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
- assert np.allclose(cosmo.de_density_scale([1, 2, 3]),
+ assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
@@ -965,18 +978,21 @@ def test_de_densityscale():
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
- assert np.allclose(tcos.hubble_time.value, 13.889094057856937)
- assert np.allclose(tcos.age([1., 5.]).value, [5.97113193, 1.20553129])
- assert np.allclose(tcos.age([1, 5]).value, [5.97113193, 1.20553129])
+ assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
+ assert allclose(tcos.age([1., 5.]),
+ [5.97113193, 1.20553129] * u.Gyr)
+ assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
- assert np.allclose(tcos.hubble_distance.value, 4258.415596590909)
- assert np.allclose(tcos.distmod([1, 5]).value, [44.124857, 48.40167258])
- assert np.allclose(tcos.distmod([1., 5.]).value, [44.124857, 48.40167258])
+ assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
+ assert allclose(tcos.distmod([1, 5]),
+ [44.124857, 48.40167258] * u.mag)
+ assert allclose(tcos.distmod([1., 5.]),
+ [44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -984,10 +1000,10 @@ def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = core.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
- assert np.allclose(tcos.luminosity_distance([50, 100]).value,
- [16612.44047622, -46890.79092244])
- assert np.allclose(tcos.distmod([50, 100]).value,
- [46.102167189, 48.355437790944])
+ assert allclose(tcos.luminosity_distance([50, 100]),
+ [16612.44047622, -46890.79092244] * u.Mpc)
+ assert allclose(tcos.distmod([50, 100]),
+ [46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -996,14 +1012,14 @@ def test_critical_density():
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
- assert np.allclose(tcos.critical_density0.value,
- 9.31000324385361e-30)
- assert np.allclose(tcos.critical_density0.value,
- tcos.critical_density(0).value)
- assert np.allclose(tcos.critical_density([1, 5]).value,
- [2.70362491e-29, 5.53758986e-28])
- assert np.allclose(tcos.critical_density([1., 5.]).value,
- [2.70362491e-29, 5.53758986e-28])
+ assert allclose(tcos.critical_density0,
+ 9.31000324385361e-30 * u.g / u.cm**3)
+ assert allclose(tcos.critical_density0,
+ tcos.critical_density(0))
+ assert allclose(tcos.critical_density([1, 5]),
+ [2.70362491e-29, 5.53758986e-28] * u.g / u.cm**3)
+ assert allclose(tcos.critical_density([1., 5.]),
+ [2.70362491e-29, 5.53758986e-28] * u.g / u.cm**3)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -1019,49 +1035,49 @@ def test_angular_diameter_distance_z1z2():
with pytest.raises(ValueError): # test z1 > z2 fail
tcos.angular_diameter_distance_z1z2(4, 3)
# Tests that should actually work
- assert np.allclose(tcos.angular_diameter_distance_z1z2(1, 2).value,
- 646.22968662822018)
+ assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
+ 646.22968662822018 * u.Mpc)
z1 = 0, 0, 1, 0.5, 1
z2 = 2, 1, 2, 2.5, 1.1
results = (1760.0628637762106,
1670.7497657219858,
646.22968662822018,
1159.0970895962193,
- 115.72768186186921)
+ 115.72768186186921) * u.Mpc
- assert np.allclose(tcos.angular_diameter_distance_z1z2(z1, z2).value,
+ assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2),
results)
# Non-flat (positive Ocurv) test
tcos = core.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
- assert np.allclose(tcos.angular_diameter_distance_z1z2(1, 2).value,
- 620.1175337852428)
+ assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
+ 620.1175337852428 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_absorption_distance():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
- assert np.allclose(tcos.absorption_distance([1, 3]),
+ assert allclose(tcos.absorption_distance([1, 3]),
[1.72576635, 7.98685853])
- assert np.allclose(tcos.absorption_distance([1., 3.]),
+ assert allclose(tcos.absorption_distance([1., 3.]),
[1.72576635, 7.98685853])
- assert np.allclose(tcos.absorption_distance(3), 7.98685853)
- assert np.allclose(tcos.absorption_distance(3.), 7.98685853)
+ assert allclose(tcos.absorption_distance(3), 7.98685853)
+ assert allclose(tcos.absorption_distance(3.), 7.98685853)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_basic():
# Test no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Neff=4.05, m_nu=u.Quantity(0, u.eV))
- assert np.allclose(tcos.Neff, 4.05)
+ assert allclose(tcos.Neff, 4.05)
assert not tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 4
assert mnu.unit == u.eV
- assert np.allclose(mnu.value, [0.0, 0.0, 0.0, 0.0])
- assert np.allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05,
+ assert allclose(mnu, [0.0, 0.0, 0.0, 0.0] * u.eV)
+ assert allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05,
rtol=1e-6)
- assert np.allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05,
+ assert allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05,
rtol=1e-6)
# Test basic setting, retrieval of values
@@ -1071,17 +1087,17 @@ def test_massivenu_basic():
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
- assert np.allclose(mnu.value, [0.0, 0.01, 0.02])
+ assert allclose(mnu, [0.0, 0.01, 0.02] * u.eV)
# All massive neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, m_nu=u.Quantity(0.1, u.eV),
Neff=3.1)
- assert np.allclose(tcos.Neff, 3.1)
+ assert allclose(tcos.Neff, 3.1)
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
- assert np.allclose(mnu.value, [0.1, 0.1, 0.1])
+ assert allclose(mnu, [0.1, 0.1, 0.1] * u.eV)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -1103,21 +1119,21 @@ def test_massivenu_density():
assert tcos.Neff == 3
nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,
15633.5, 171.801])
- assert np.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
- assert np.allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
+ assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
+ assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.25, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,
39.1005, 1.11086])
- assert np.allclose(tcos.nu_relative_density(ztest), nurel_exp,
+ assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,
0.06999286, 0.1344951])
- assert np.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
+ assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,
@@ -1125,14 +1141,14 @@ def test_massivenu_density():
nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,
1.90671, 1.00021])
- assert np.allclose(tcos.nu_relative_density(ztest), nurel_exp,
+ assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,
0.00268404, 0.0978313])
- assert np.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
- assert np.allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
+ assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
+ assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
rtol=1e-4)
- assert np.allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
+ assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
@@ -1140,17 +1156,17 @@ def test_massivenu_density():
m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([149.386233, 74.87915, 50.0518,
14.002403, 1.03702333])
- assert np.allclose(tcos.nu_relative_density(ztest), nurel_exp,
+ assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,
0.01963451, 0.10227728])
- assert np.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
+ assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(np.int)
- assert np.allclose(tcos.nu_relative_density(ztest), nurel_exp,
+ assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
- assert np.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
+ assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
@pytest.mark.skipif('not HAS_SCIPY')
@@ -1164,19 +1180,19 @@ def test_z_at_value():
z_at_value = funcs.z_at_value
cosmo = core.Planck13
d = cosmo.luminosity_distance(3)
- assert np.allclose(z_at_value(cosmo.luminosity_distance, d), 3,
+ assert allclose(z_at_value(cosmo.luminosity_distance, d), 3,
rtol=1e-8)
- assert np.allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.198122684356,
+ assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.198122684356,
rtol=1e-6)
- assert np.allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc),
+ assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc),
1.3685790653802761, rtol=1e-6)
- assert np.allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr),
+ assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr),
0.7951983674601507, rtol=1e-6)
- assert np.allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
+ assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmax=2), 0.68127769625288614, rtol=1e-6)
- assert np.allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
+ assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmin=2.5), 3.7914908028272083, rtol=1e-6)
- assert np.allclose(z_at_value(cosmo.distmod, 46 * u.mag),
+ assert allclose(z_at_value(cosmo.distmod, 46 * u.mag),
1.9913891680278133, rtol=1e-6)
# test behaviour when the solution is outside z limits (should
@@ -1215,12 +1231,12 @@ def test_z_at_value_roundtrip():
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
- assert np.allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
+ assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
# Test angular_diameter_distance_z1z2
z2 = 2.0
func = lambda z1: core.Planck13.angular_diameter_distance_z1z2(z1, z2)
fval = func(z)
- assert np.allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
+ assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
diff --git a/astropy/io/ascii/core.py b/astropy/io/ascii/core.py
index eb82757..35afbac 100644
--- a/astropy/io/ascii/core.py
+++ b/astropy/io/ascii/core.py
@@ -363,7 +363,22 @@ class DefaultSplitter(BaseSplitter):
def _replace_tab_with_space(line, escapechar, quotechar):
- """Replace tab with space within ``line`` while respecting quoted substrings"""
+ """Replace tabs with spaces in given string, preserving quoted substrings
+
+ Parameters
+ ----------
+ line : str
+ String containing tabs to be replaced with spaces.
+ escapechar : str
+ Character in ``line`` used to escape special characters.
+ quotechar: str
+ Character in ``line`` indicating the start/end of a substring.
+
+ Returns
+ -------
+ line : str
+ A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
+ """
newline = []
in_quote = False
lastchar = 'NONE'
@@ -719,13 +734,30 @@ class BaseData(object):
def convert_numpy(numpy_type):
- """Return a tuple ``(converter_func, converter_type)``. The converter
- function converts a list into a numpy array of the given ``numpy_type``.
- This type must be a valid `numpy type
- <http://docs.scipy.org/doc/numpy/user/basics.types.html>`_, e.g.
- numpy.int, numpy.uint, numpy.int8, numpy.int64, numpy.float, numpy.float64,
- numpy.str. The converter type is used to track the generic data type (int,
- float, str) that is produced by the converter function.
+ """Return a tuple containing a function which converts a list into a numpy
+ array and the type produced by the converter function.
+
+ Parameters
+ ----------
+ numpy_type : numpy data-type
+ The numpy type required of an array returned by ``converter``. Must be a
+ valid `numpy type <http://docs.scipy.org/doc/numpy/user/basics.types.html>`_,
+ e.g. numpy.int, numpy.uint, numpy.int8, numpy.int64, numpy.float,
+ numpy.float64, numpy.str.
+
+ Returns
+ -------
+ (converter, converter_type) : (function, generic data-type)
+ ``converter`` is a function which accepts a list and converts it to a
+ numpy array of type ``numpy_type``.
+ ``converter_type`` tracks the generic data type produced by the converter
+ function.
+
+ Raises
+ ------
+ ValueError
+ Raised by ``converter`` if the list elements could not be converted to
+ the required type.
"""
# Infer converter type from an instance of numpy_type.
diff --git a/astropy/io/ascii/src/tokenizer.h b/astropy/io/ascii/src/tokenizer.h
index ba6dd91..4325072 100644
--- a/astropy/io/ascii/src/tokenizer.h
+++ b/astropy/io/ascii/src/tokenizer.h
@@ -12,7 +12,7 @@
#include <ctype.h>
#ifdef _MSC_VER
-#define inline __inline
+ #define inline __inline
#ifndef NAN
static const unsigned long __nan[2] = {0xffffffff, 0x7fffffff};
#define NAN (*(const double *) __nan)
@@ -21,6 +21,13 @@
static const unsigned long __infinity[2] = {0x00000000, 0x7ff00000};
#define INFINITY (*(const double *) __infinity)
#endif
+#else
+ #ifndef INFINITY
+ #define INFINITY (1.0/0.0)
+ #endif
+ #ifndef NAN
+ #define NAN (INFINITY-INFINITY)
+ #endif
#endif
typedef enum
diff --git a/astropy/io/fits/column.py b/astropy/io/fits/column.py
index 8390b59..77378e3 100644
--- a/astropy/io/fits/column.py
+++ b/astropy/io/fits/column.py
@@ -12,12 +12,13 @@ from functools import reduce
import numpy as np
from numpy import char as chararray
-from .card import Card
-from .util import pairwise, _is_int, _convert_array, encode_ascii, cmp
+from .card import Card, CARD_LENGTH
+from .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp,
+ NotifierMixin)
from .verify import VerifyError, VerifyWarning
from ...extern.six import string_types, iteritems
-from ...utils import lazyproperty, isiterable, indent
+from ...utils import lazyproperty, isiterable, indent, OrderedDict
from ...utils.compat import ignored
@@ -84,6 +85,19 @@ KEYWORD_ATTRIBUTES = ['name', 'format', 'unit', 'null', 'bscale', 'bzero',
'disp', 'start', 'dim']
"""This is a list of the attributes that can be set on `Column` objects."""
+
+KEYWORD_TO_ATTRIBUTE = \
+ OrderedDict((keyword, attr)
+ for keyword, attr in zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
+
+
+ATTRIBUTE_TO_KEYWORD = \
+ OrderedDict((value, key)
+ for key, value in KEYWORD_TO_ATTRIBUTE.items())
+
+
+# TODO: Define a list of default comments to associate with each table keyword
+
# TFORMn regular expression
TFORMAT_RE = re.compile(r'(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])'
r'(?P<option>[!-~]*)', re.I)
@@ -96,6 +110,12 @@ TFORMAT_ASCII_RE = re.compile(r'(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|'
r'(?:(?P<widthf>[0-9]+)\.'
r'(?P<precision>[0-9]+))?)')
+TTYPE_RE = re.compile(r'[0-9a-zA-Z_]+')
+"""
+Regular exprssion for valid table column names. See FITS Standard v3.0 section
+7.2.2.
+"""
+
# table definition keyword regular expression
TDEF_RE = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)')
@@ -150,6 +170,14 @@ class _BaseColumnFormat(str):
def __hash__(self):
return hash(self.canonical)
+ @lazyproperty
+ def dtype(self):
+ """
+ The Numpy dtype object created from the format's associated recformat.
+ """
+
+ return np.dtype(self.recformat)
+
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
@@ -350,7 +378,78 @@ class _FormatQ(_FormatP):
_descriptor_format = '2i8'
-class Column(object):
+class ColumnAttribute(object):
+ """
+ Descriptor for attributes of `Column` that are associated with keywords
+ in the FITS header and describe properties of the column as specified in
+ the FITS standard.
+
+ Each `ColumnAttribute` may have a ``validator`` method defined on it.
+ This validates values set on this attribute to ensure that they meet the
+ FITS standard. Invalid values will raise a warning and will not be used in
+ formatting the column. The validator should take two arguments--the
+ `Column` it is being assigned to, and the new value for the attribute, and
+ it must raise an `AssertionError` if the value is invalid.
+
+ The `ColumnAttribute` itself is a decorator that can be used to define the
+ ``validator`` for each column attribute. For example::
+
+ @ColumnAttribute('TTYPE')
+ def name(col, name):
+ assert isinstance(name, str)
+
+ The actual object returned by this decorator is the `ColumnAttribute`
+ instance though, not the ``name`` function. As such ``name`` is not a
+ method of the class it is defined in.
+
+ The setter for `ColumnAttribute` also updates the header of any table
+ HDU this column is attached to in order to reflect the change. The
+ ``validator`` should ensure that the value is valid for inclusion in a FITS
+ header.
+ """
+
+ def __init__(self, keyword):
+ self._keyword = keyword
+ self._validator = None
+
+ # The name of the attribute associated with this keyword is currently
+ # determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
+ # make more flexible in the future, for example, to support custom
+ # column attributes.
+ self._attr = KEYWORD_TO_ATTRIBUTE[self._keyword]
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+ else:
+ return getattr(obj, '_' + self._attr)
+
+ def __set__(self, obj, value):
+ if self._validator is not None:
+ self._validator(obj, value)
+
+ old_value = getattr(obj, '_' + self._attr, None)
+ setattr(obj, '_' + self._attr, value)
+ obj._notify('column_attribute_changed', obj, self._attr, old_value,
+ value)
+
+ def __call__(self, func):
+ """
+ Set the validator for this column attribute.
+
+ Returns ``self`` so that this can be used as a decorator, as described
+ in the docs for this class.
+ """
+
+ self._validator = func
+
+ return self
+
+ def __repr__(self):
+ return "{0}('{1}')".format(self.__class__.__name__, self._keyword)
+
+
+class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
@@ -368,7 +467,7 @@ class Column(object):
name : str, optional
column name, corresponding to ``TTYPE`` keyword
- format : str, optional
+ format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
@@ -432,7 +531,6 @@ class Column(object):
raise VerifyError('\n'.join(msg))
-
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
@@ -447,17 +545,6 @@ class Column(object):
self._dims = self.dim
self.dim = dim
- # Zero-length formats are legal in the FITS format, but since they
- # are not supported by numpy we mark columns that use them as
- # "phantom" columns, that are not considered when reading the data
- # as a record array.
- if self.format[0] == '0' or \
- (self.format[-1] == '0' and self.format[-2].isalpha()):
- self._phantom = True
- array = None
- else:
- self._phantom = False
-
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
@@ -524,9 +611,49 @@ class Column(object):
return hash((self.name.lower(), self.format))
+ @ColumnAttribute('TTYPE')
+ def name(col, name):
+ if name is None:
+ # Allow None to indicate deleting the name, or to just indicate an
+ # unspecified name (when creating a new Column).
+ return
+
+ # Check that the name meets the recommended standard--other column
+ # names are *allowed*, but will be discouraged
+ if isinstance(name, string_types) and not TTYPE_RE.match(name):
+ warnings.warn(
+ 'It is strongly recommended that column names contain only '
+ 'upper and lower-case ASCII letters, digits, or underscores '
+ 'for maximum compatibility with other software '
+ '(got {0!r}).'.format(name), VerifyWarning)
+
+ # This ensures that the new name can fit into a single FITS card
+ # without any special extension like CONTINUE cards or the like.
+ assert (isinstance(name, string_types) and
+ len(str(Card('TTYPE', name))) == CARD_LENGTH), \
+ ('Column name must be a string able to fit in a single '
+ 'FITS card--typically this means a maximum of 68 '
+ 'characters, though it may be fewer if the string '
+ 'contains special characters like quotes.')
+
+ format = ColumnAttribute('TFORM')
+ unit = ColumnAttribute('TUNIT')
+ null = ColumnAttribute('TNULL')
+ bscale = ColumnAttribute('TSCAL')
+ bzero = ColumnAttribute('TZERO')
+ disp = ColumnAttribute('TDISP')
+ start = ColumnAttribute('TBCOL')
+ dim = ColumnAttribute('TDIM')
+
+ @lazyproperty
+ def ascii(self):
+ """Whether this `Column` represents an column in an ASCII table."""
+
+ return isinstance(self.format, _AsciiColumnFormat)
+
@lazyproperty
def dtype(self):
- return np.dtype(_convert_format(self.format))
+ return self.format.dtype
def copy(self):
"""
@@ -868,7 +995,7 @@ class Column(object):
return _convert_array(array, dtype)
-class ColDefs(object):
+class ColDefs(NotifierMixin):
"""
Column definitions class.
@@ -959,6 +1086,10 @@ class ColDefs(object):
raise TypeError('Input to ColDefs must be a table HDU, a list '
'of Columns, or a record/field array.')
+ # Listen for changes on all columns
+ for col in self.columns:
+ col._add_listener(self)
+
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
@@ -1010,8 +1141,6 @@ class ColDefs(object):
def _init_from_table(self, table):
hdr = table._header
nfields = hdr['TFIELDS']
- self._width = hdr['NAXIS1']
- self._shape = hdr['NAXIS2']
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
@@ -1025,8 +1154,7 @@ class ColDefs(object):
if keyword in KEYWORD_NAMES:
col = int(key.group('num'))
if col <= nfields and col > 0:
- idx = KEYWORD_NAMES.index(keyword)
- attr = KEYWORD_ATTRIBUTES[idx]
+ attr = KEYWORD_TO_ATTRIBUTE[keyword]
if attr == 'format':
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
@@ -1054,7 +1182,11 @@ class ColDefs(object):
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
- self._listener = weakref.proxy(table)
+
+ # Add the table HDU is a listener to changes to the columns
+ # (either changes to individual columns, or changes to the set of
+ # columns (add/remove/etc.))
+ self._add_listener(table)
def __copy__(self):
return self.__class__(self)
@@ -1132,12 +1264,9 @@ class ColDefs(object):
@lazyproperty
def dtype(self):
- recformats = [f for idx, f in enumerate(self._recformats)
- if not self[idx]._phantom]
- formats = ','.join(recformats)
- names = [n for idx, n in enumerate(self.names)
- if not self[idx]._phantom]
- return np.rec.format_parser(formats, names, None).dtype
+ dtypes = [f.dtype for idx, f in enumerate(self.formats)]
+ names = [n for idx, n in enumerate(self.names)]
+ return np.dtype(list(zip(names, dtypes)))
@lazyproperty
def _arrays(self):
@@ -1154,6 +1283,9 @@ class ColDefs(object):
return [col._dims for col in self.columns]
def __getitem__(self, key):
+ if isinstance(key, string_types):
+ key = _get_index(self.names, key)
+
x = self.columns[key]
if _is_int(key):
return x
@@ -1200,14 +1332,25 @@ class ColDefs(object):
tmp = [self[i] for i in indx]
return ColDefs(tmp)
- def _update_listener(self):
- if hasattr(self, '_listener'):
- try:
- if self._listener._data_loaded:
- del self._listener.data
- self._listener.columns = self
- except ReferenceError:
- del self._listener
+ def _update_column_attribute_changed(self, column, attr, old_value,
+ new_value):
+ """
+ Handle column attribute changed notifications from columns that are
+ members of this `ColDefs`.
+
+ `ColDefs` itself does not currently do anything with this, and just
+ bubbles the notification up to any listening table HDUs that may need
+ to update their headers, etc. However, this also informs the table of
+ the numerical index of the column that changed.
+ """
+
+ idx = 0
+ for idx, col in enumerate(self.columns):
+ if col is column:
+ break
+
+ self._notify('column_attribute_changed', column, idx, attr, old_value,
+ new_value)
def add_col(self, column):
"""
@@ -1216,10 +1359,6 @@ class ColDefs(object):
assert isinstance(column, Column)
- for cname in KEYWORD_ATTRIBUTES:
- attr = getattr(self, cname + 's')
- attr.append(getattr(column, cname))
-
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
@@ -1228,9 +1367,12 @@ class ColDefs(object):
self.columns.append(column)
+ # Listen for changes on the new column
+ column._add_listener(self)
+
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
- self._update_listener()
+ self._notify('column_added', self, column)
return self
def del_col(self, col_name):
@@ -1242,10 +1384,7 @@ class ColDefs(object):
"""
indx = _get_index(self.names, col_name)
-
- for cname in KEYWORD_ATTRIBUTES:
- attr = getattr(self, cname + 's')
- del attr[indx]
+ col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
@@ -1255,9 +1394,13 @@ class ColDefs(object):
del self.columns[indx]
- # If this ColDefs is being tracked by a Table, inform the
- # table that its data is now invalid.
- self._update_listener()
+ col._remove_listener(self)
+
+ # If this ColDefs is being tracked by a table HDU, inform the HDU (or
+ # any other listeners) that the column has been removed
+ # Just send a reference to self, and the index of the column that was
+ # removed
+ self._notify('column_removed', self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
@@ -1276,12 +1419,7 @@ class ColDefs(object):
The new value for the attribute
"""
- indx = _get_index(self.names, col_name)
- getattr(self, attrib + 's')[indx] = new_value
-
- # If this ColDefs is being tracked by a Table, inform the
- # table that its data is now invalid.
- self._update_listener()
+ setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
@@ -1301,10 +1439,6 @@ class ColDefs(object):
else:
self.change_attrib(col_name, 'name', new_name)
- # If this ColDefs is being tracked by a Table, inform the
- # table that its data is now invalid.
- self._update_listener()
-
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
@@ -1320,10 +1454,6 @@ class ColDefs(object):
self.change_attrib(col_name, 'unit', new_unit)
- # If this ColDefs is being tracked by a Table, inform the
- # table that its data is now invalid.
- self._update_listener()
-
def info(self, attrib='all', output=None):
"""
Get attribute(s) information of the column definition.
@@ -1456,6 +1586,9 @@ class _AsciiColDefs(ColDefs):
self._width = end_col
+# Utilities
+
+
class _VLF(np.ndarray):
"""Variable length field object."""
@@ -1509,9 +1642,9 @@ class _VLF(np.ndarray):
def _get_index(names, key):
"""
- Get the index of the `key` in the `names` list.
+ Get the index of the ``key`` in the ``names`` list.
- The `key` can be an integer or string. If integer, it is the index
+ The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
diff --git a/astropy/io/fits/file.py b/astropy/io/fits/file.py
index 83c03f2..9cb99da 100644
--- a/astropy/io/fits/file.py
+++ b/astropy/io/fits/file.py
@@ -2,9 +2,6 @@
from __future__ import division, with_statement
-from ...utils.compat import gzip as _astropy_gzip
-from ...utils.data import download_file, _is_url
-import gzip as _system_gzip
import mmap
import os
import tempfile
@@ -18,9 +15,11 @@ from numpy import memmap as Memmap
from .util import (isreadable, iswritable, isfile, fileobj_open, fileobj_name,
fileobj_closed, fileobj_mode, _array_from_file,
- _array_to_file, _write_string)
+ _array_to_file, _write_string, _GZIP_FILE_TYPES)
from ...extern.six import b, string_types
from ...extern.six.moves import urllib
+from ...utils.compat import gzip
+from ...utils.data import download_file, _is_url
from ...utils.exceptions import AstropyUserWarning
@@ -72,8 +71,6 @@ MEMMAP_MODES = {'readonly': 'c', 'copyonwrite': 'c', 'update': 'r+',
GZIP_MAGIC = b('\x1f\x8b\x08')
PKZIP_MAGIC = b('\x50\x4b\x03\x04')
-_GZIP_FILE_TYPES = (_astropy_gzip, _system_gzip)
-
class _File(object):
"""
Represents a FITS file on disk (or in some other file-like object).
@@ -88,7 +85,7 @@ class _File(object):
memmap = True if memmap is None else memmap
if fileobj is None:
- self.__file = None
+ self._file = None
self.closed = False
self.binary = True
self.mode = mode
@@ -133,7 +130,7 @@ class _File(object):
self.readonly = False
self.writeonly = False
- # Initialize the internal self.__file object
+ # Initialize the internal self._file object
if _is_random_access_file_backed(fileobj):
self._open_fileobj(fileobj, mode, clobber)
elif isinstance(fileobj, string_types):
@@ -141,9 +138,9 @@ class _File(object):
else:
self._open_filelike(fileobj, mode, clobber)
- self.fileobj_mode = fileobj_mode(self.__file)
+ self.fileobj_mode = fileobj_mode(self._file)
- if isinstance(fileobj, (_astropy_gzip.GzipFile, _system_gzip.GzipFile)):
+ if isinstance(fileobj, _GZIP_FILE_TYPES):
self.compression = 'gzip'
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
@@ -159,18 +156,18 @@ class _File(object):
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if (mode == 'ostream' or self.compression or
- not hasattr(self.__file, 'seek')):
+ not hasattr(self._file, 'seek')):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
- pos = self.__file.tell()
- self.__file.seek(0, 2)
- self.size = self.__file.tell()
- self.__file.seek(pos)
+ pos = self._file.tell()
+ self._file.seek(0, 2)
+ self.size = self._file.tell()
+ self._file.seek(pos)
if self.memmap:
- if not isfile(self.__file):
+ if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._test_mmap():
# Test mmap.flush--see
@@ -179,7 +176,7 @@ class _File(object):
def __repr__(self):
return '<%s.%s %s>' % (self.__module__, self.__class__.__name__,
- self.__file)
+ self._file)
# Support the 'with' statement
def __enter__(self):
@@ -191,13 +188,13 @@ class _File(object):
def readable(self):
if self.writeonly:
return False
- return isreadable(self.__file)
+ return isreadable(self._file)
def read(self, size=None):
- if not hasattr(self.__file, 'read'):
+ if not hasattr(self._file, 'read'):
raise EOFError
try:
- return self.__file.read(size)
+ return self._file.read(size)
except IOError:
# On some versions of Python, it appears, GzipFile will raise an
# IOError if you try to read past its end (as opposed to just
@@ -216,7 +213,7 @@ class _File(object):
it's provided for compatibility.
"""
- if not hasattr(self.__file, 'read'):
+ if not hasattr(self._file, 'read'):
raise EOFError
if not isinstance(dtype, np.dtype):
@@ -246,26 +243,26 @@ class _File(object):
shape = (1,)
if self.memmap:
- return Memmap(self.__file, offset=offset,
+ return Memmap(self._file, offset=offset,
mode=MEMMAP_MODES[self.mode], dtype=dtype,
shape=shape).view(np.ndarray)
else:
count = reduce(lambda x, y: x * y, shape)
- pos = self.__file.tell()
- self.__file.seek(offset)
- data = _array_from_file(self.__file, dtype, count, '')
+ pos = self._file.tell()
+ self._file.seek(offset)
+ data = _array_from_file(self._file, dtype, count, '')
data.shape = shape
- self.__file.seek(pos)
+ self._file.seek(pos)
return data
def writable(self):
if self.readonly:
return False
- return iswritable(self.__file)
+ return iswritable(self._file)
def write(self, string):
- if hasattr(self.__file, 'write'):
- _write_string(self.__file, string)
+ if hasattr(self._file, 'write'):
+ _write_string(self._file, string)
def writearray(self, array):
"""
@@ -275,51 +272,51 @@ class _File(object):
the file on disk reflects the data written.
"""
- if hasattr(self.__file, 'write'):
- _array_to_file(array, self.__file)
+ if hasattr(self._file, 'write'):
+ _array_to_file(array, self._file)
def flush(self):
- if hasattr(self.__file, 'flush'):
- self.__file.flush()
+ if hasattr(self._file, 'flush'):
+ self._file.flush()
def seek(self, offset, whence=0):
# In newer Python versions, GzipFiles support the whence argument, but
# I don't think it was added until 2.6; instead of assuming it's
# present, we implement our own support for it here
- if not hasattr(self.__file, 'seek'):
+ if not hasattr(self._file, 'seek'):
return
- if isinstance(self.__file, (_astropy_gzip.GzipFile, _system_gzip.GzipFile)):
+ if isinstance(self._file, _GZIP_FILE_TYPES):
if whence:
if whence == 1:
- offset = self.__file.offset + offset
+ offset = self._file.offset + offset
else:
raise ValueError('Seek from end not supported')
- self.__file.seek(offset)
+ self._file.seek(offset)
else:
- self.__file.seek(offset, whence)
+ self._file.seek(offset, whence)
- pos = self.__file.tell()
+ pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn('File may have been truncated: actual file length '
'(%i) is smaller than the expected size (%i)' %
(self.size, pos), AstropyUserWarning)
def tell(self):
- if not hasattr(self.__file, 'tell'):
+ if not hasattr(self._file, 'tell'):
raise EOFError
- return self.__file.tell()
+ return self._file.tell()
def truncate(self, size=None):
- if hasattr(self.__file, 'truncate'):
- self.__file.truncate(size)
+ if hasattr(self._file, 'truncate'):
+ self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
- if hasattr(self.__file, 'close'):
- self.__file.close()
+ if hasattr(self._file, 'close'):
+ self._file.close()
self.closed = True
@@ -366,16 +363,16 @@ class _File(object):
raise ValueError(
"Mode argument '%s' does not match mode of the input "
"file (%s)." % (mode, fmode))
- self.__file = fileobj
+ self._file = fileobj
elif isfile(fileobj):
- self.__file = fileobj_open(self.name, PYFITS_MODES[mode])
+ self._file = fileobj_open(self.name, PYFITS_MODES[mode])
else:
- self.__file = _astropy_gzip.open(self.name, PYFITS_MODES[mode])
+ self._file = gzip.open(self.name, PYFITS_MODES[mode])
if fmode == 'ab+':
# Return to the beginning of the file--in Python 3 when opening in
# append mode the file pointer is at the end of the file
- self.__file.seek(0)
+ self._file.seek(0)
def _open_filelike(self, fileobj, mode, clobber):
"""Open a FITS file from a file-like object, i.e. one that has
@@ -383,7 +380,7 @@ class _File(object):
"""
self.file_like = True
- self.__file = fileobj
+ self._file = fileobj
if fileobj_closed(fileobj):
raise IOError("Cannot read from/write to a closed file-like "
@@ -391,15 +388,15 @@ class _File(object):
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
- self.__file.seek(0)
+ self._file.seek(0)
# We can bypass any additional checks at this point since now
- # self.__file points to the temp file extracted from the zip
+ # self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
- if (not hasattr(self.__file, 'seek') or
- not hasattr(self.__file, 'tell')):
+ if (not hasattr(self._file, 'seek') or
+ not hasattr(self._file, 'tell')):
self.mode = mode = 'ostream'
if mode == 'ostream':
@@ -407,13 +404,13 @@ class _File(object):
# Any "writeable" mode requires a write() method on the file object
if (self.mode in ('update', 'append', 'ostream') and
- not hasattr(self.__file, 'write')):
+ not hasattr(self._file, 'write')):
raise IOError("File-like object does not have a 'write' "
"method, required for mode '%s'."
% self.mode)
# Any mode except for 'ostream' requires readability
- if self.mode != 'ostream' and not hasattr(self.__file, 'read'):
+ if self.mode != 'ostream' and not hasattr(self._file, 'read'):
raise IOError("File-like object does not have a 'read' "
"method, required for mode %r."
% self.mode)
@@ -434,15 +431,15 @@ class _File(object):
if ext == '.gz' or magic.startswith(GZIP_MAGIC):
# Handle gzip files
- self.__file = _astropy_gzip.open(self.name, PYFITS_MODES[mode])
+ self._file = gzip.open(self.name, PYFITS_MODES[mode])
self.compression = 'gzip'
elif ext == '.zip' or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
else:
- self.__file = fileobj_open(self.name, PYFITS_MODES[mode])
+ self._file = fileobj_open(self.name, PYFITS_MODES[mode])
# Make certain we're back at the beginning of the file
- self.__file.seek(0)
+ self._file.seek(0)
def _test_mmap(self):
"""Tests that mmap, and specifically mmap.flush works. This may
@@ -508,8 +505,8 @@ class _File(object):
if len(namelist) != 1:
raise IOError(
"Zip files with multiple members are not supported.")
- self.__file = tempfile.NamedTemporaryFile(suffix='.fits')
- self.__file.write(zfile.read(namelist[0]))
+ self._file = tempfile.NamedTemporaryFile(suffix='.fits')
+ self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
@@ -525,4 +522,4 @@ def _is_random_access_file_backed(fileobj):
from an already opened `zipfile.ZipFile` object.
"""
- return isfile(fileobj) or isinstance(fileobj, (_astropy_gzip.GzipFile, _system_gzip.GzipFile))
+ return isfile(fileobj) or isinstance(fileobj, _GZIP_FILE_TYPES)
diff --git a/astropy/io/fits/fitsrec.py b/astropy/io/fits/fitsrec.py
index 6e995a0..771abd3 100644
--- a/astropy/io/fits/fitsrec.py
+++ b/astropy/io/fits/fitsrec.py
@@ -183,13 +183,12 @@ class FITS_rec(np.recarray):
buf=input.data, strides=input.strides)
self._nfields = len(self.dtype.names)
- self._convert = [None] * len(self.dtype.names)
+ self._converted = {}
self._heapoffset = 0
self._heapsize = 0
self._coldefs = None
self._gap = 0
self._uint = False
- self.formats = None
return self
def __setstate__(self, state):
@@ -216,8 +215,8 @@ class FITS_rec(np.recarray):
column_state = []
meta = []
- for attrs in ['_convert', '_heapoffset', '_heapsize', '_nfields',
- '_gap', '_uint', 'formats', 'parnames', '_coldefs']:
+ for attrs in ['_converted', '_heapoffset', '_heapsize', '_nfields',
+ '_gap', '_uint', 'parnames', '_coldefs']:
with ignored(AttributeError):
# _coldefs can be Delayed, and file objects cannot be
@@ -237,19 +236,18 @@ class FITS_rec(np.recarray):
return
if isinstance(obj, FITS_rec):
- self._convert = obj._convert
+ self._converted = obj._converted
self._heapoffset = obj._heapoffset
self._heapsize = obj._heapsize
self._coldefs = obj._coldefs
self._nfields = obj._nfields
self._gap = obj._gap
self._uint = obj._uint
- self.formats = obj.formats
else:
# This will allow regular ndarrays with fields, rather than
# just other FITS_rec objects
self._nfields = len(obj.dtype.names)
- self._convert = [None] * len(obj.dtype.names)
+ self._converted = {}
self._heapoffset = getattr(obj, '_heapoffset', 0)
self._heapsize = getattr(obj, '_heapsize', 0)
@@ -258,10 +256,7 @@ class FITS_rec(np.recarray):
self._gap = getattr(obj, '_gap', 0)
self._uint = getattr(obj, '_uint', False)
- # Bypass setattr-based assignment to fields; see #86
- self.formats = None
-
- attrs = ['_convert', '_coldefs', '_gap']
+ attrs = ['_converted', '_coldefs', '_gap']
for attr in attrs:
if hasattr(obj, attr):
value = getattr(obj, attr, None)
@@ -272,7 +267,6 @@ class FITS_rec(np.recarray):
if self._coldefs is None:
self._coldefs = ColDefs(self)
- self.formats = self._coldefs.formats
@classmethod
def from_columns(cls, columns, nrows=0, fill=False):
@@ -312,14 +306,17 @@ class FITS_rec(np.recarray):
columns = ColDefs(columns)
# read the delayed data
- for idx in range(len(columns)):
- arr = columns._arrays[idx]
+ for column in columns:
+ arr = column.array
if isinstance(arr, Delayed):
if arr.hdu.data is None:
- columns._arrays[idx] = None
+ column.array = None
else:
- columns._arrays[idx] = np.rec.recarray.field(arr.hdu.data,
- arr.field)
+ column.array = np.rec.recarray.field(arr.hdu.data,
+ arr.field)
+ # Reset columns._arrays (which we may want to just do away with
+ # altogether
+ del columns._arrays
# use the largest column shape as the shape of the record
if nrows == 0:
@@ -335,6 +332,9 @@ class FITS_rec(np.recarray):
raw_data.fill(ord(columns._padding_byte))
data = np.recarray(nrows, dtype=columns.dtype, buf=raw_data).view(cls)
+ # Make sure the data is a listener for changes to the columns
+ columns._add_listener(data)
+
# Previously this assignment was made from hdu.columns, but that's a
# bug since if a _TableBaseHDU has a FITS_rec in its .data attribute
# the _TableBaseHDU.columns property is actually returned from
@@ -343,7 +343,6 @@ class FITS_rec(np.recarray):
# All of this is an artifact of the fragility of the FITS_rec class,
# and that it can't just be initialized by columns...
data._coldefs = columns
- data.formats = columns.formats
# If fill is True we don't copy anything from the column arrays. We're
# just using them as a template, and returning a table filled with
@@ -353,14 +352,14 @@ class FITS_rec(np.recarray):
# Otherwise we have to fill the recarray with data from the input
# columns
- for idx in range(len(columns)):
+ for idx, column in enumerate(columns):
# For each column in the ColDef object, determine the number of
# rows in that column. This will be either the number of rows in
# the ndarray associated with the column, or the number of rows
# given in the call to this function, which ever is smaller. If
# the input FILL argument is true, the number of rows is set to
# zero so that no data is copied from the original input data.
- arr = columns._arrays[idx]
+ arr = column.array
if arr is None:
array_size = 0
@@ -379,8 +378,9 @@ class FITS_rec(np.recarray):
continue
field = np.rec.recarray.field(data, idx)
- fitsformat = columns.formats[idx]
- recformat = columns._recformats[idx]
+ name = column.name
+ fitsformat = column.format
+ recformat = fitsformat.recformat
outarr = field[:n]
inarr = arr[:n]
@@ -391,8 +391,8 @@ class FITS_rec(np.recarray):
_wrapx(inarr, outarr, recformat.repeat)
continue
elif isinstance(recformat, _FormatP):
- data._convert[idx] = _makep(inarr, field, recformat,
- nrows=nrows)
+ data._converted[name] = _makep(inarr, field, recformat,
+ nrows=nrows)
continue
# TODO: Find a better way of determining that the column is meant
# to be FITS L formatted
@@ -403,17 +403,18 @@ class FITS_rec(np.recarray):
field[:] = ord('F')
# Also save the original boolean array in data._converted so
# that it doesn't have to be re-converted
- data._convert[idx] = np.zeros(field.shape, dtype=bool)
- data._convert[idx][:n] = inarr
+ data._converted[name] = np.zeros(field.shape, dtype=bool)
+ data._converted[name][:n] = inarr
# TODO: Maybe this step isn't necessary at all if _scale_back
# will handle it?
inarr = np.where(inarr == False, ord('F'), ord('T'))
elif (columns[idx]._physical_values and
columns[idx]._pseudo_unsigned_ints):
# Temporary hack...
- bzero = columns[idx].bzero
- data._convert[idx] = np.zeros(field.shape, dtype=inarr.dtype)
- data._convert[idx][:n] = inarr
+ bzero = column.bzero
+ data._converted[name] = np.zeros(field.shape,
+ dtype=inarr.dtype)
+ data._converted[name][:n] = inarr
if n < nrows:
# Pre-scale rows below the input data
field[n:] = -bzero
@@ -431,14 +432,16 @@ class FITS_rec(np.recarray):
outarr = field.view(np.uint8, np.ndarray)[:n]
elif not isinstance(arr, chararray.chararray):
# Fill with the appropriate blanks for the column format
- data._convert[idx] = np.zeros(nrows, dtype=arr.dtype)
- outarr = data._convert[idx][:n]
+ data._converted[name] = np.zeros(nrows, dtype=arr.dtype)
+ outarr = data._converted[name][:n]
outarr[:] = inarr
continue
if inarr.shape != outarr.shape:
- if inarr.dtype != outarr.dtype:
+ if (inarr.dtype.kind == outarr.dtype.kind and
+ inarr.dtype.kind in ('U', 'S') and
+ inarr.dtype != outarr.dtype):
inarr = inarr.view(outarr.dtype)
# This is a special case to handle input arrays with
@@ -458,28 +461,18 @@ class FITS_rec(np.recarray):
else:
outarr[:] = inarr
+ # Now replace the original column array references with the new
+ # fields
+ # This is required to prevent the issue reported in
+ # https://github.com/spacetelescope/PyFITS/issues/99
+ for idx in range(len(columns)):
+ columns._arrays[idx] = data.field(idx)
+
return data
def __repr__(self):
return np.recarray.__repr__(self)
- def __getattribute__(self, attr):
- # See the comment in __setattr__
- if attr in ('names', 'formats'):
- return object.__getattribute__(self, attr)
- else:
- return super(FITS_rec, self).__getattribute__(attr)
-
- def __setattr__(self, attr, value):
- # Overrides the silly attribute-based assignment to fields supported by
- # recarrays for our two built-in public attributes: names and formats
- # Otherwise, the default behavior, bad as it is, is preserved. See
- # ticket #86
- if attr in ('names', 'formats'):
- return object.__setattr__(self, attr, value)
- else:
- return super(FITS_rec, self).__setattr__(attr, value)
-
def __getitem__(self, key):
if isinstance(key, string_types):
return self.field(key)
@@ -491,25 +484,23 @@ class FITS_rec(np.recarray):
out = self.view(np.recarray).__getitem__(key).view(subtype)
out._coldefs = ColDefs(self._coldefs)
arrays = []
- out._convert = [None] * len(self.dtype.names)
- for idx in range(len(self.dtype.names)):
+ out._converted = {}
+ for idx, name in enumerate(self._coldefs.names):
#
# Store the new arrays for the _coldefs object
#
arrays.append(self._coldefs._arrays[idx][key])
- # touch all fields to expand the original ._convert list
+ # touch all fields to expand the original ._converted dict
# so the sliced FITS_rec will view the same scaled columns as
# the original
dummy = self.field(idx)
- if self._convert[idx] is not None:
- out._convert[idx] = \
- np.ndarray.__getitem__(self._convert[idx], key)
+ if name in self._converted:
+ out._converted[name] = \
+ np.ndarray.__getitem__(self._converted[name], key)
del dummy
out._coldefs._arrays = arrays
- out._coldefs._shape = len(arrays[0])
-
return out
# if not a slice, do this because Record has no __getstate__.
@@ -562,9 +553,9 @@ class FITS_rec(np.recarray):
`numpy.copy`. Differences include that it re-views the copied array as
self's ndarray subclass, as though it were taking a slice; this means
``__array_finalize__`` is called and the copy shares all the array
- attributes (including ``._convert``!). So we need to make a deep copy
- of all those attributes so that the two arrays truly do not share any
- data.
+ attributes (including ``._converted``!). So we need to make a deep
+ copy of all those attributes so that the two arrays truly do not share
+ any data.
"""
new = super(FITS_rec, self).copy(order=order)
@@ -591,6 +582,11 @@ class FITS_rec(np.recarray):
else:
return list(self.dtype.names)
+ @property
+ def formats(self):
+ """List of column FITS foramts."""
+
+ return self._coldefs.formats
def field(self, key):
"""
@@ -599,18 +595,15 @@ class FITS_rec(np.recarray):
# NOTE: The *column* index may not be the same as the field index in
# the recarray, if the column is a phantom column
- col_indx = _get_index(self.columns.names, key)
- if self.columns[col_indx]._phantom:
+ column = self.columns[key]
+ name = column.name
+ format = column.format
+
+ if format.dtype.itemsize == 0:
warnings.warn(
'Field %r has a repeat count of 0 in its format code, '
'indicating an empty field.' % key)
- recformat = self.columns._recformats[col_indx].lstrip('0')
- return np.array([], dtype=recformat)
- # Ignore phantom columns in determining the physical field number
- n_phantom = len([c for c in self.columns[:col_indx] if c._phantom])
- field_indx = col_indx - n_phantom
-
- recformat = self._coldefs._recformats[col_indx]
+ return np.array([], dtype=format.dtype)
# If field's base is a FITS_rec, we can run into trouble because it
# contains a reference to the ._coldefs object of the original data;
@@ -622,21 +615,47 @@ class FITS_rec(np.recarray):
# base could still be a FITS_rec in some cases, so take care to
# use rec.recarray.field to avoid a potential infinite
# recursion
- field = np.recarray.field(base, field_indx)
+ field = np.recarray.field(base, name)
- if self._convert[field_indx] is None:
+ if name not in self._converted:
+ recformat = format.recformat
+ # TODO: If we're now passing the column to these subroutines, do we
+ # really need to pass them the recformat?
if isinstance(recformat, _FormatP):
# for P format
- converted = self._convert_p(col_indx, field, recformat)
+ converted = self._convert_p(column, field, recformat)
else:
# Handle all other column data types which are fixed-width
# fields
- converted = self._convert_other(col_indx, field, recformat)
+ converted = self._convert_other(column, field, recformat)
- self._convert[field_indx] = converted
+ self._converted[name] = converted
return converted
- return self._convert[field_indx]
+ return self._converted[name]
+
+ def _update_column_attribute_changed(self, column, idx, attr, old_value,
+ new_value):
+ """
+ Update how the data is formatted depending on changes to column
+ attributes initiated by the user through the `Column` interface.
+
+ Dispatches column attribute change notifications to individual methods
+ for each attribute ``_update_column_<attr>``
+ """
+
+ method_name = '_update_column_{0}'.format(attr)
+ if hasattr(self, method_name):
+ # Right now this is so we can be lazy and not implement updaters
+ # for every attribute yet--some we may not need at all, TBD
+ getattr(self, method_name)(column, idx, old_value, new_value)
+
+ def _update_column_name(self, column, idx, old_name, name):
+ """Update the dtype field names when a column name is changed."""
+
+ dtype = self.dtype
+ # Updating the names on the dtype should suffice
+ dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1:]
def _convert_x(self, field, recformat):
"""Convert a raw table column to a bit array as specified by the
@@ -647,7 +666,7 @@ class FITS_rec(np.recarray):
_unwrapx(field, dummy, recformat.repeat)
return dummy
- def _convert_p(self, indx, field, recformat):
+ def _convert_p(self, column, field, recformat):
"""Convert a raw table column of FITS P or Q format descriptors
to a VLA column with the array data returned from the heap.
"""
@@ -658,7 +677,7 @@ class FITS_rec(np.recarray):
if raw_data is None:
raise IOError(
"Could not find heap data for the %r variable-length "
- "array column." % self.columns.names[indx])
+ "array column." % column.name)
for idx in range(len(self)):
offset = field[idx, 1] + self._heapoffset
@@ -684,20 +703,21 @@ class FITS_rec(np.recarray):
# TODO: Test that this works for X format; I don't think
# that it does--the recformat variable only applies to the P
# format not the X format
- dummy[idx] = self._convert_other(indx, dummy[idx], recformat)
+ dummy[idx] = self._convert_other(column, dummy[idx],
+ recformat)
return dummy
- def _convert_ascii(self, indx, field):
+ def _convert_ascii(self, column, field):
"""
Special handling for ASCII table columns to convert columns containing
numeric types to actual numeric arrays from the string representation.
"""
- format = self._coldefs.formats[indx]
+ format = column.format
recformat = ASCII2NUMPY[format[0]]
# if the string = TNULL, return ASCIITNULL
- nullval = str(self._coldefs.nulls[indx]).strip().encode('ascii')
+ nullval = str(column.null).strip().encode('ascii')
if len(nullval) > format.width:
nullval = nullval[:format.width]
@@ -713,6 +733,7 @@ class FITS_rec(np.recarray):
try:
dummy = np.array(dummy, dtype=recformat)
except ValueError as exc:
+ indx = self._coldefs.names.index(column.name)
raise ValueError(
'%s; the header may be missing the necessary TNULL%d '
'keyword or the table contains invalid data' %
@@ -720,7 +741,7 @@ class FITS_rec(np.recarray):
return dummy
- def _convert_other(self, indx, field, recformat):
+ def _convert_other(self, column, field, recformat):
"""Perform conversions on any other fixed-width column data types.
This may not perform any conversion at all if it's not necessary, in
@@ -732,7 +753,9 @@ class FITS_rec(np.recarray):
return self._convert_x(field, recformat)
(_str, _bool, _number, _scale, _zero, bscale, bzero, dim) = \
- self._get_scale_factors(indx)
+ self._get_scale_factors(column)
+
+ indx = self._coldefs.names.index(column.name)
# ASCII table, convert strings to numbers
# TODO:
@@ -742,7 +765,7 @@ class FITS_rec(np.recarray):
# converting their data from FITS format to native format and vice
# versa...
if not _str and isinstance(self._coldefs, _AsciiColDefs):
- field = self._convert_ascii(indx, field)
+ field = self._convert_ascii(column, field)
# Test that the dimensions given in dim are sensible; otherwise
# display a warning and ignore them
@@ -764,6 +787,8 @@ class FITS_rec(np.recarray):
nitems = reduce(operator.mul, dim)
if _str:
actual_nitems = field.itemsize
+ elif len(field.shape) == 1: # No repeat count in TFORMn, equivalent to 1
+ actual_nitems = 1
else:
actual_nitems = field.shape[1]
if nitems > actual_nitems:
@@ -780,17 +805,25 @@ class FITS_rec(np.recarray):
# actually doing the scaling
# TODO: This also needs to be fixed in the effort to make Columns
# responsible for scaling their arrays to/from FITS native values
- column = self._coldefs[indx]
+ if not column.ascii and column.format.p_format:
+ format_code = column.format.p_format
+ else:
+ # TODO: Rather than having this if/else it might be nice if the
+ # ColumnFormat class had an attribute guaranteed to give the format
+ # of actual values in a column regardless of whether the true
+ # format is something like P or Q
+ format_code = column.format.format
+
if (_number and (_scale or _zero) and not column._physical_values):
# This is to handle pseudo unsigned ints in table columns
# TODO: For now this only really works correctly for binary tables
# Should it work for ASCII tables as well?
if self._uint:
- if bzero == 2**15 and 'I' in self._coldefs.formats[indx]:
+ if bzero == 2**15 and format_code == 'I':
field = np.array(field, dtype=np.uint16)
- elif bzero == 2**31 and 'J' in self._coldefs.formats[indx]:
+ elif bzero == 2**31 and format_code == 'J':
field = np.array(field, dtype=np.uint32)
- elif bzero == 2**63 and 'K' in self._coldefs.formats[indx]:
+ elif bzero == 2**63 and format_code == 'K':
field = np.array(field, dtype=np.uint64)
bzero64 = np.uint64(2 ** 63)
else:
@@ -801,7 +834,7 @@ class FITS_rec(np.recarray):
if _scale:
np.multiply(field, bscale, field)
if _zero:
- if self._uint and 'K' in self._coldefs.formats[indx]:
+ if self._uint and format_code == 'K':
# There is a chance of overflow, so be careful
test_overflow = field.copy()
try:
@@ -882,34 +915,30 @@ class FITS_rec(np.recarray):
if hasattr(base, 'nbytes') and base.nbytes >= raw_data_bytes:
return base
- def _get_scale_factors(self, indx):
- """
- Get the scaling flags and factors for one field.
+ def _get_scale_factors(self, column):
+ """Get all the scaling flags and factors for one column."""
- `indx` is the index of the field.
- """
-
- if isinstance(self._coldefs, _AsciiColDefs):
- _str = self._coldefs.formats[indx][0] == 'A'
- _bool = False # there is no boolean in ASCII table
- else:
- _str = 'a' in self._coldefs._recformats[indx]
- # TODO: Determine a better way to determine if the column is bool
- # formatted
- _bool = self._coldefs._recformats[indx][-2:] == FITS2NUMPY['L']
+ # TODO: Maybe this should be a method/property on Column? Or maybe
+ # it's not really needed at all...
+ _str = column.format.format == 'A'
+ _bool = column.format.format == 'L'
_number = not (_bool or _str)
- bscale = self._coldefs.bscales[indx]
- bzero = self._coldefs.bzeros[indx]
+ bscale = column.bscale
+ bzero = column.bzero
+
_scale = bscale not in ('', None, 1)
_zero = bzero not in ('', None, 0)
+
# ensure bscale/bzero are numbers
if not _scale:
bscale = 1
if not _zero:
bzero = 0
- dim = self._coldefs._dims[indx]
+ # column._dims gives a tuple, rather than column.dim which returns the
+ # original string format code from the FITS header...
+ dim = column._dims
return (_str, _bool, _number, _scale, _zero, bscale, bzero, dim)
@@ -928,8 +957,9 @@ class FITS_rec(np.recarray):
# Running total for the new heap size
heapsize = 0
- for indx in range(len(self.dtype.names)):
- recformat = self._coldefs._recformats[indx]
+ for indx, name in enumerate(self.dtype.names):
+ column = self._coldefs[indx]
+ recformat = column.format.recformat
field = super(FITS_rec, self).field(indx)
# add the location offset of the heap area for each
@@ -941,11 +971,11 @@ class FITS_rec(np.recarray):
# an array of characters.
dtype = np.array([], dtype=recformat.dtype).dtype
- if update_heap_pointers and self._convert[indx] is not None:
+ if update_heap_pointers and name in self._converted:
# The VLA has potentially been updated, so we need to
# update the array descriptors
field[:] = 0 # reset
- npts = [len(arr) for arr in self._convert[indx]]
+ npts = [len(arr) for arr in self._converted[name]]
field[:len(npts), 0] = npts
field[1:, 1] = (np.add.accumulate(field[:-1, 0]) *
@@ -957,21 +987,20 @@ class FITS_rec(np.recarray):
# include the size of its constituent arrays in the heap size
# total
- if self._convert[indx] is None:
+ if name not in self._converted:
continue
if isinstance(recformat, _FormatX):
- _wrapx(self._convert[indx], field, recformat.repeat)
+ _wrapx(self._converted[name], field, recformat.repeat)
continue
_str, _bool, _number, _scale, _zero, bscale, bzero, _ = \
- self._get_scale_factors(indx)
+ self._get_scale_factors(column)
# conversion for both ASCII and binary tables
if _number or _str:
- column = self._coldefs[indx]
if _number and (_scale or _zero) and column._physical_values:
- dummy = self._convert[indx].copy()
+ dummy = self._converted[name].copy()
if _zero:
dummy -= bzero
if _scale:
@@ -981,9 +1010,9 @@ class FITS_rec(np.recarray):
# be mark is not scaled
column._physical_values = False
elif _str:
- dummy = self._convert[indx]
+ dummy = self._converted[name]
elif isinstance(self._coldefs, _AsciiColDefs):
- dummy = self._convert[indx]
+ dummy = self._converted[name]
else:
continue
@@ -1026,7 +1055,7 @@ class FITS_rec(np.recarray):
# ASCII table does not have Boolean type
elif _bool:
- field[:] = np.choose(self._convert[indx],
+ field[:] = np.choose(self._converted[name],
(np.array([ord('F')], dtype=np.int8)[0],
np.array([ord('T')], dtype=np.int8)[0]))
diff --git a/astropy/io/fits/hdu/base.py b/astropy/io/fits/hdu/base.py
index c0b86d5..195ab86 100644
--- a/astropy/io/fits/hdu/base.py
+++ b/astropy/io/fits/hdu/base.py
@@ -756,6 +756,15 @@ class _BaseHDU(object):
self._data_size = datsize
self._data_replaced = False
+ def _close(self, closed=True):
+ # If the data was mmap'd, close the underlying mmap (this will
+ # prevent any future access to the .data attribute if there are
+ # not other references to it; if there are other references then
+ # it is up to the user to clean those up
+ if (closed and self._data_loaded and
+ _get_array_mmap(self.data) is not None):
+ del self.data
+
# For backwards-compatibility, though nobody should have
# been using this directly:
_AllHDU = _BaseHDU
diff --git a/astropy/io/fits/hdu/compressed.py b/astropy/io/fits/hdu/compressed.py
index 0f1e0d7..3112927 100644
--- a/astropy/io/fits/hdu/compressed.py
+++ b/astropy/io/fits/hdu/compressed.py
@@ -1,6 +1,7 @@
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import ctypes
+import gc
import math
import re
import time
@@ -16,7 +17,8 @@ from ..column import Column, ColDefs, TDEF_RE
from ..column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES
from ..fitsrec import FITS_rec
from ..header import Header
-from ..util import _is_pseudo_unsigned, _unsigned_zero, _is_int
+from ..util import (_is_pseudo_unsigned, _unsigned_zero, _is_int,
+ _get_array_mmap)
from ....extern.six import string_types, iteritems
from ....utils import lazyproperty, deprecated
@@ -1436,6 +1438,26 @@ class CompImageHDU(BinTableHDU):
return self.compressed_data
+ @compressed_data.deleter
+ def compressed_data(self):
+ # Deleting the compressed_data attribute has to be handled
+ # with a little care to prevent a reference leak
+ # First delete the ._coldefs attributes under it to break a possible
+ # reference cycle
+ if 'compressed_data' in self.__dict__:
+ del self.__dict__['compressed_data']._coldefs
+
+ # Now go ahead and delete from self.__dict__; normally
+ # lazyproperty.__delete__ does this for us, but we can prempt it to
+ # do some additional cleanup
+ del self.__dict__['compressed_data']
+
+ # If this file was mmap'd, numpy.memmap will hold open a file
+ # handle until the underlying mmap object is garbage-collected;
+ # since this reference leak can sometimes hang around longer than
+ # welcome go ahead and force a garbage collection
+ gc.collect()
+
@lazyproperty
@deprecated('0.3', alternative='the ``compressed_data`` attribute',
pending=True)
@@ -1641,6 +1663,10 @@ class CompImageHDU(BinTableHDU):
del self._header['THEAP']
self._theap = tbsize
+ # First delete the original compressed data, if it exists
+ del self.compressed_data
+
+
# Compress the data.
# The current implementation of compress_hdu assumes the empty
# compressed data table has already been initialized in
@@ -1663,7 +1689,6 @@ class CompImageHDU(BinTableHDU):
self.compressed_data._coldefs = self.columns
self.compressed_data._heapoffset = self._theap
self.compressed_data._heapsize = heapsize
- self.compressed_data.formats = self.columns.formats
@deprecated('0.3', alternative='(refactor your code)')
def updateCompressedData(self):
@@ -1840,6 +1865,17 @@ class CompImageHDU(BinTableHDU):
else:
del self.data
+ def _close(self, closed=True):
+ super(CompImageHDU, self)._close(closed=closed)
+
+ # Also make sure to close access to the compressed data mmaps
+ if (closed and self._data_loaded and
+ _get_array_mmap(self.compressed_data) is not None):
+ del self.compressed_data
+ # Close off the deprected compData attribute as well if it has been
+ # used
+ del self.compData
+
# TODO: This was copied right out of _ImageBaseHDU; get rid of it once we
# find a way to rewrite this class as either a subclass or wrapper for an
# ImageHDU
diff --git a/astropy/io/fits/hdu/groups.py b/astropy/io/fits/hdu/groups.py
index 5e3d8df..e4915f1 100644
--- a/astropy/io/fits/hdu/groups.py
+++ b/astropy/io/fits/hdu/groups.py
@@ -150,7 +150,7 @@ class GroupData(FITS_rec):
if bitpix is None:
bitpix = _ImageBaseHDU.ImgCode[input.dtype.name]
- fits_fmt = GroupsHDU._width2format[bitpix] # -32 -> 'E'
+ fits_fmt = GroupsHDU._bitpix2tform[bitpix] # -32 -> 'E'
format = FITS2NUMPY[fits_fmt] # 'E' -> 'f4'
data_fmt = '%s%s' % (str(input.shape[1:]), format)
formats = ','.join(([format] * npars) + [data_fmt])
@@ -169,19 +169,30 @@ class GroupData(FITS_rec):
formats=formats,
names=coldefs.names,
shape=gcount))
+
+ # By default the data field will just be 'DATA', but it may be
+ # uniquified if 'DATA' is already used by one of the group names
+ self._data_field = unique_parnames[-1]
+
self._coldefs = coldefs
self.parnames = parnames
- for idx in range(npars):
- scale, zero = self._get_scale_factors(idx)[3:5]
+ for idx, name in enumerate(unique_parnames[:-1]):
+ column = coldefs[idx]
+ # Note: _get_scale_factors is used here and in other cases
+ # below to determine whether the column has non-default
+ # scale/zero factors.
+ # TODO: Find a better way to do this than using this interface
+ scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
- self._convert[idx] = pardata[idx]
+ self._converted[name] = pardata[idx]
else:
np.rec.recarray.field(self, idx)[:] = pardata[idx]
- scale, zero = self._get_scale_factors(npars)[3:5]
+ column = coldefs[self._data_field]
+ scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
- self._convert[npars] = input
+ self._converted[self._data_field] = input
else:
np.rec.recarray.field(self, npars)[:] = input
else:
@@ -245,17 +256,18 @@ class GroupsHDU(PrimaryHDU, _TableLikeHDU):
details on working with this type of HDU.
"""
- _width2format = {8: 'B', 16: 'I', 32: 'J', 64: 'K', -32: 'E', -64: 'D'}
+ _bitpix2tform = {8: 'B', 16: 'I', 32: 'J', 64: 'K', -32: 'E', -64: 'D'}
_data_type = GroupData
+ _data_field = 'DATA'
+ """
+ The name of the table record array field that will contain the group data
+ for each group; 'DATA' by default, but may be preceded by any number of
+ underscores if 'DATA' is already a parameter name
+ """
def __init__(self, data=None, header=None):
super(GroupsHDU, self).__init__(data=data, header=header)
- # The name of the table record array field that will contain the group
- # data for each group; 'data' by default, but may be preceded by any
- # number of underscores if 'data' is already a parameter name
- self._data_field = 'DATA'
-
# Update the axes; GROUPS HDUs should always have at least one axis
if len(self._axes) <= 0:
self._axes = [0]
@@ -277,7 +289,6 @@ class GroupsHDU(PrimaryHDU, _TableLikeHDU):
data = self._get_tbdata()
data._coldefs = self.columns
- data.formats = self.columns.formats
data.parnames = self.parnames
del self.columns
return data
@@ -296,39 +307,49 @@ class GroupsHDU(PrimaryHDU, _TableLikeHDU):
if self._has_data and hasattr(self.data, '_coldefs'):
return self.data._coldefs
- format = self._width2format[self._header['BITPIX']]
+ format = self._bitpix2tform[self._header['BITPIX']]
pcount = self._header['PCOUNT']
parnames = []
bscales = []
bzeros = []
for idx in range(pcount):
- bscales.append(self._header.get('PSCAL' + str(idx + 1), 1))
- bzeros.append(self._header.get('PZERO' + str(idx + 1), 0))
+ bscales.append(self._header.get('PSCAL' + str(idx + 1), None))
+ bzeros.append(self._header.get('PZERO' + str(idx + 1), None))
parnames.append(self._header['PTYPE' + str(idx + 1)])
+ formats = [format] * len(parnames)
+ dim = [None] * len(parnames)
+
# Now create columns from collected parameters, but first add the DATA
# column too, to contain the group data.
- formats = [format] * len(parnames)
parnames.append('DATA')
- bscales.append(self._header.get('BSCALE', 1))
- bzeros.append(self._header.get('BZEROS', 0))
+ bscales.append(self._header.get('BSCALE'))
+ bzeros.append(self._header.get('BZEROS'))
data_shape = self.shape[:-1]
- formats.append(str(int(np.array(data_shape).sum())) + format)
+ formats.append(str(int(np.prod(data_shape))) + format)
+ dim.append(data_shape)
parnames = _unique_parnames(parnames)
+
self._data_field = parnames[-1]
- cols = [Column(name=name, format=fmt, bscale=bscale, bzero=bzero)
- for name, fmt, bscale, bzero in
- zip(parnames, formats, bscales, bzeros)]
+ cols = [Column(name=name, format=fmt, bscale=bscale, bzero=bzero,
+ dim=dim)
+ for name, fmt, bscale, bzero, dim in
+ zip(parnames, formats, bscales, bzeros, dim)]
coldefs = ColDefs(cols)
- # TODO: Something has to be done about this spaghetti code of arbitrary
- # attributes getting tacked on to the coldefs here.
- coldefs._shape = self._header['GCOUNT']
- coldefs._dat_format = FITS2NUMPY[format]
return coldefs
+ @property
+ def _nrows(self):
+ if not self._data_loaded:
+ # The number of 'groups' equates to the number of rows in the table
+ # representation of the data
+ return self._header.get('GCOUNT', 0)
+ else:
+ return len(self.data)
+
@lazyproperty
def _theap(self):
# Only really a lazyproperty for symmetry with _TableBaseHDU
@@ -395,22 +416,22 @@ class GroupsHDU(PrimaryHDU, _TableLikeHDU):
after='NAXIS' + str(len(self._axes)))
self._header.set('PCOUNT', len(self.data.parnames), after='GROUPS')
self._header.set('GCOUNT', len(self.data), after='PCOUNT')
- npars = len(self.data.parnames)
- scale, zero = self.data._get_scale_factors(npars)[3:5]
+
+ column = self.data._coldefs[self.data._data_field]
+ scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
- self._header.set('BSCALE', self.data._coldefs.bscales[npars])
+ self._header.set('BSCALE', column.bscale)
if zero:
- self._header.set('BZERO', self.data._coldefs.bzeros[npars])
- for idx in range(npars):
- self._header.set('PTYPE' + str(idx + 1),
- self.data.parnames[idx])
- scale, zero = self.data._get_scale_factors(idx)[3:5]
+ self._header.set('BZERO', column.bzero)
+
+ for idx, name in enumerate(self.data.parnames):
+ self._header.set('PTYPE' + str(idx + 1), name)
+ column = self.data._coldefs[idx]
+ scale, zero = self.data._get_scale_factors(column)[3:5]
if scale:
- self._header.set('PSCAL' + str(idx + 1),
- self.data._coldefs.bscales[idx])
+ self._header.set('PSCAL' + str(idx + 1), column.bscale)
if zero:
- self._header.set('PZERO' + str(idx + 1),
- self.data._coldefs.bzeros[idx])
+ self._header.set('PZERO' + str(idx + 1), column.bzero)
# Update the position of the EXTEND keyword if it already exists
if 'EXTEND' in self._header:
@@ -420,14 +441,6 @@ class GroupsHDU(PrimaryHDU, _TableLikeHDU):
after = 'NAXIS'
self._header.set('EXTEND', after=after)
- def _get_tbdata(self):
- # get the right shape for the data part of the random group,
- # since binary table does not support ND yet
- self.columns._recformats[-1] = (repr(self.shape[:-1]) +
- self.columns._dat_format)
-
- return super(GroupsHDU, self)._get_tbdata()
-
def _writedata_internal(self, fileobj):
"""
Basically copy/pasted from `_ImageBaseHDU._writedata_internal()`, but
diff --git a/astropy/io/fits/hdu/hdulist.py b/astropy/io/fits/hdu/hdulist.py
index 6933b71..7d9acb8 100644
--- a/astropy/io/fits/hdu/hdulist.py
+++ b/astropy/io/fits/hdu/hdulist.py
@@ -149,7 +149,7 @@ class HDUList(list, _Verify):
The opened physical file associated with the `HDUList`.
"""
- self.__file = file
+ self._file = file
self._save_backup = False
if hdus is None:
@@ -340,7 +340,7 @@ class HDUList(list, _Verify):
"""
- if self.__file is not None:
+ if self._file is not None:
output = self[index].fileinfo()
if not output:
@@ -361,7 +361,7 @@ class HDUList(list, _Verify):
output = {'file': f, 'filemode': fm, 'hdrLoc': None,
'datLoc': None, 'datSpan': None}
- output['filename'] = self.__file.name
+ output['filename'] = self._file.name
output['resized'] = self._wasresized()
else:
output = None
@@ -557,13 +557,13 @@ class HDUList(list, _Verify):
When `True`, print verbose messages
"""
- if self.__file.mode not in ('append', 'update', 'ostream'):
+ if self._file.mode not in ('append', 'update', 'ostream'):
warnings.warn("Flush for '%s' mode is not supported."
- % self.__file.mode, AstropyUserWarning)
+ % self._file.mode, AstropyUserWarning)
return
- if self._save_backup and self.__file.mode in ('append', 'update'):
- filename = self.__file.name
+ if self._save_backup and self._file.mode in ('append', 'update'):
+ filename = self._file.name
if os.path.exists(filename):
# The the file doesn't actually exist anymore for some reason
# then there's no point in trying to make a backup
@@ -582,7 +582,7 @@ class HDUList(list, _Verify):
self.verify(option=output_verify)
- if self.__file.mode in ('append', 'ostream'):
+ if self._file.mode in ('append', 'ostream'):
for hdu in self:
if verbose:
try:
@@ -594,13 +594,13 @@ class HDUList(list, _Verify):
if hdu._new:
hdu._prewriteto(checksum=hdu._output_checksum)
try:
- hdu._writeto(self.__file)
+ hdu._writeto(self._file)
if verbose:
print('append HDU', hdu.name, extver)
hdu._new = False
finally:
hdu._postwriteto()
- elif self.__file.mode == 'update':
+ elif self._file.mode == 'update':
self._flush_update()
def update_extend(self):
@@ -679,7 +679,7 @@ class HDUList(list, _Verify):
for hdu in self:
hdu._prewriteto(checksum=checksum)
try:
- hdu._writeto(hdulist.__file)
+ hdu._writeto(hdulist._file)
finally:
hdu._postwriteto()
@@ -705,12 +705,16 @@ class HDUList(list, _Verify):
When `True`, close the underlying file object.
"""
- if self.__file:
- if self.__file.mode in ['append', 'update']:
+ if self._file:
+ if self._file.mode in ['append', 'update']:
self.flush(output_verify=output_verify, verbose=verbose)
- if closed and hasattr(self.__file, 'close'):
- self.__file.close()
+ if closed and hasattr(self._file, 'close'):
+ self._file.close()
+
+ # Give individual HDUs an opportunity to do on-close cleanup
+ for hdu in self:
+ hdu._close(closed=closed)
def info(self, output=None):
"""
@@ -730,10 +734,10 @@ class HDUList(list, _Verify):
if output is None:
output = sys.stdout
- if self.__file is None:
+ if self._file is None:
name = '(No file associated with this HDUList)'
else:
- name = self.__file.name
+ name = self._file.name
results = ['Filename: %s' % name,
'No. Name Type Cards Dimensions Format']
@@ -768,9 +772,9 @@ class HDUList(list, _Verify):
HDUList object if an association exists. Otherwise returns
None.
"""
- if self.__file is not None:
- if hasattr(self.__file, 'name'):
- return self.__file.name
+ if self._file is not None:
+ if hasattr(self._file, 'name'):
+ return self._file.name
return None
@classmethod
@@ -926,12 +930,12 @@ class HDUList(list, _Verify):
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
- if self._resize or self.__file.compression:
+ if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
- hdu._writeto(self.__file, inplace=True)
+ hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
@@ -946,16 +950,16 @@ class HDUList(list, _Verify):
need to be resized.
"""
- old_name = self.__file.name
- old_memmap = self.__file.memmap
+ old_name = self._file.name
+ old_memmap = self._file.memmap
name = _tmp_name(old_name)
- if not self.__file.file_like:
+ if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
- if self.__file.compression == 'gzip':
+ if self._file.compression == 'gzip':
new_file = gzip.GzipFile(name, mode='ab+')
else:
new_file = name
@@ -963,7 +967,7 @@ class HDUList(list, _Verify):
hdulist = self.fromfile(new_file, mode='append')
for hdu in self:
- hdu._writeto(hdulist.__file, inplace=True, copy=True)
+ hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith('win'):
# Collect a list of open mmaps to the data; this well be used
@@ -971,8 +975,8 @@ class HDUList(list, _Verify):
mmaps = [(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self) if hdu._has_data]
- hdulist.__file.close()
- self.__file.close()
+ hdulist._file.close()
+ self._file.close()
if sys.platform.startswith('win'):
# Close all open mmaps to the data. This is only necessary on
@@ -982,7 +986,7 @@ class HDUList(list, _Verify):
if mmap is not None:
mmap.close()
- os.remove(self.__file.name)
+ os.remove(self._file.name)
# reopen the renamed new file with "update" mode
os.rename(name, old_name)
@@ -995,7 +999,7 @@ class HDUList(list, _Verify):
ffo = _File(old_file, mode='update', memmap=old_memmap)
- self.__file = ffo
+ self._file = ffo
for hdu in self:
# Need to update the _file attribute and close any open mmaps
@@ -1029,7 +1033,7 @@ class HDUList(list, _Verify):
# like object.
self.writeto(name)
hdulist = self.fromfile(name)
- ffo = self.__file
+ ffo = self._file
ffo.truncate(0)
ffo.seek(0)
@@ -1039,7 +1043,7 @@ class HDUList(list, _Verify):
# Close the temporary file and delete it.
hdulist.close()
- os.remove(hdulist.__file.name)
+ os.remove(hdulist._file.name)
# reset the resize attributes after updating
self._resize = False
@@ -1085,7 +1089,7 @@ class HDUList(list, _Verify):
if self._truncate:
try:
- self.__file.truncate(hdu._data_offset + hdu._data_size)
+ self._file.truncate(hdu._data_offset + hdu._data_size)
except IOError:
self._resize = True
self._truncate = False
diff --git a/astropy/io/fits/hdu/table.py b/astropy/io/fits/hdu/table.py
index 95d4586..0723bac 100644
--- a/astropy/io/fits/hdu/table.py
+++ b/astropy/io/fits/hdu/table.py
@@ -16,14 +16,16 @@ from .base import DELAYED, _ValidHDU, ExtensionHDU
# This module may have many dependencies on pyfits.column, but pyfits.column
# has fewer dependencies overall, so it's easier to keep table/column-related
# utilities in pyfits.column
-from ..column import (FITS2NUMPY, KEYWORD_NAMES, KEYWORD_ATTRIBUTES, TDEF_RE,
- Column, ColDefs, _AsciiColDefs, _FormatP, _FormatQ,
- _makep, _parse_tformat, _scalar_to_format,
- _convert_format, _cmp_recformats, _get_index)
+from ..column import (FITS2NUMPY, KEYWORD_NAMES, KEYWORD_TO_ATTRIBUTE,
+ ATTRIBUTE_TO_KEYWORD, TDEF_RE, Column, ColDefs,
+ _AsciiColDefs, _FormatP, _FormatQ, _makep,
+ _parse_tformat, _scalar_to_format, _convert_format,
+ _cmp_recformats, _get_index)
from ..fitsrec import FITS_rec
from ..header import Header, _pad_length
from ..util import _is_int, _str_to_num
+from ....extern import six
from ....extern.six import string_types
from ....utils import deprecated, lazyproperty
from ....utils.compat import ignored
@@ -119,7 +121,9 @@ class _TableLikeHDU(_ValidHDU):
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(coldefs, nrows=nrows, fill=fill)
- return cls(data=data, header=header, **kwargs)
+ hdu = cls(data=data, header=header, **kwargs)
+ coldefs._add_listener(hdu)
+ return hdu
@lazyproperty
def columns(self):
@@ -131,6 +135,17 @@ class _TableLikeHDU(_ValidHDU):
# definitions come from, so just return an empty ColDefs
return ColDefs([])
+ @property
+ def _nrows(self):
+ """
+ Table-like HDUs must provide an attribute that specifies the number of
+ rows in the HDU's table.
+
+ For now this is an internal-only attribute.
+ """
+
+ raise NotImplementedError
+
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
@@ -141,6 +156,7 @@ class _TableLikeHDU(_ValidHDU):
# specific to FITS binary tables
if (any(type(r) in (_FormatP, _FormatQ)
for r in columns._recformats) and
+ self._data_size is not None and
self._data_size > self._theap):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8,
@@ -148,12 +164,20 @@ class _TableLikeHDU(_ValidHDU):
data = raw_data[:self._theap].view(dtype=columns.dtype,
type=np.rec.recarray)
else:
- raw_data = self._get_raw_data(columns._shape, columns.dtype,
+ raw_data = self._get_raw_data(self._nrows, columns.dtype,
self._data_offset)
+ if raw_data is None:
+ # This can happen when a brand new table HDU is being created
+ # and no data has been assigned to the columns, which case just
+ # return an empty array
+ raw_data = np.array([], dtype=columns.dtype)
+
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
- return data.view(self._data_type)
+ data = data.view(self._data_type)
+ columns._add_listener(data)
+ return data
def _init_tbdata(self, data):
columns = self.columns
@@ -172,15 +196,40 @@ class _TableLikeHDU(_ValidHDU):
# pass the attributes
fidx = 0
for idx in range(len(columns)):
- if not columns[idx]._phantom:
- # get the data for each column object from the rec.recarray
- columns[idx].array = data.field(fidx)
- fidx += 1
+ # get the data for each column object from the rec.recarray
+ columns[idx].array = data.field(fidx)
+ fidx += 1
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
+ def _update_column_added(self, columns, column):
+ """
+ Update the data upon addition of a new column through the `ColDefs`
+ interface.
+ """
+
+ # TODO: It's not clear that this actually works--it probably does not.
+ # This is what the code used to do before introduction of the
+ # notifier interface, but I don't believe it actually worked (there are
+ # several bug reports related to this...)
+ if self._data_loaded:
+ del self.data
+
+ def _update_column_removed(self, columns, col_idx):
+ """
+ Update the data upon removal of a column through the `ColDefs`
+ interface.
+ """
+
+ # For now this doesn't do anything fancy--it just deletes the data
+ # attribute so that it is forced to be recreated again. It doesn't
+ # change anything on the existing data recarray (this is also how this
+ # worked before introducing the notifier interface)
+ if self._data_loaded:
+ del self.data
+
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
@@ -349,7 +398,6 @@ class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
- data.formats = self.columns.formats
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@@ -410,6 +458,13 @@ class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
# setting self.__dict__['data']
return data
+ @property
+ def _nrows(self):
+ if not self._data_loaded:
+ return self._header.get('NAXIS2', 0)
+ else:
+ return len(self.data)
+
@lazyproperty
def _theap(self):
size = self._header['NAXIS1'] * self._header['NAXIS2']
@@ -519,28 +574,109 @@ class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
return (self.name, class_name, ncards, dims, format)
- def _clear_table_keywords(self):
- """Wipe out any existing table definition keywords from the header."""
+ def _update_column_removed(self, columns, idx):
+ super(_TableBaseHDU, self)._update_column_removed(columns, idx)
- # Go in reverse so as to not confusing indexing while deleting.
- for idx, keyword in reversed(list(enumerate(self._header.keys()))):
- keyword = TDEF_RE.match(keyword)
+ # Fix the header to reflect the column removal
+ self._clear_table_keywords(index=idx)
+
+ def _update_column_attribute_changed(self, column, col_idx, attr,
+ old_value, new_value):
+ """
+ Update the header when one of the column objects is updated.
+ """
+
+ # base_keyword is the keyword without the index such as TDIM
+ # while keyword is like TDIM1
+ base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
+ keyword = base_keyword + str(col_idx + 1)
+
+ if keyword in self._header:
+ if new_value is None:
+ # If the new value is None, i.e. None was assigned to the
+ # column attribute, then treat this as equivalent to deleting
+ # that attribute
+ del self._header[keyword]
+ else:
+ self._header[keyword] = new_value
+ else:
+ keyword_idx = KEYWORD_NAMES.index(base_keyword)
+ # Determine the appropriate keyword to insert this one before/after
+ # if it did not already exist in the header
+ for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
+ before_keyword += str(col_idx + 1)
+ if before_keyword in self._header:
+ self._header.insert(before_keyword, (keyword, new_value),
+ after=True)
+ break
+ else:
+ for after_keyword in KEYWORD_NAMES[keyword_idx + 1:]:
+ after_keyword += str(col_idx + 1)
+ if after_keyword in header:
+ self._header.insert(after_keyword,
+ (keyword, new_value))
+ break
+ else:
+ # Just append
+ self._header[keyword] = new_value
+
+ def _clear_table_keywords(self, index=None):
+ """
+ Wipe out any existing table definition keywords from the header.
+
+ If specified, only clear keywords for the given table index (shifting
+ up keywords for any other columns). The index is zero-based.
+ Otherwise keywords for all columns.
+ """
+
+ # First collect all the table structure related keyword in the header
+ # into a single list so we can then sort them by index, which will be
+ # useful later for updating the header in a sensible order (since the
+ # header *might* not already be written in a reasonable order)
+ table_keywords = []
+
+ for idx, keyword in enumerate(self._header.keys()):
+ match = TDEF_RE.match(keyword)
try:
- keyword = keyword.group('label')
+ base_keyword = match.group('label')
except:
continue # skip if there is no match
- if keyword in KEYWORD_NAMES:
+
+ if base_keyword in KEYWORD_TO_ATTRIBUTE:
+ num = int(match.group('num')) - 1 # convert to zero-base
+ table_keywords.append((idx, match.group(0), base_keyword,
+ num))
+
+ # First delete
+ for idx, keyword, _, num in sorted(table_keywords,
+ key=lambda k: k[0], reverse=True):
+ if index is None or index == num:
del self._header[idx]
+ # Now shift up remaining column keywords if only one column was cleared
+ if index is not None:
+ for _, keyword, base_keyword, num in sorted(table_keywords,
+ key=lambda k: k[3]):
+ if num <= index:
+ continue
+
+ old_card = self._header.cards[keyword]
+ new_card = (base_keyword + str(num), old_card.value,
+ old_card.comment)
+ self._header.insert(keyword, new_card)
+ del self._header[keyword]
+
+ # Also decrement TFIELDS
+ if 'TFIELDS' in self._header:
+ self._header['TFIELDS'] -= 1
+
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
- cols = self.columns
-
- for idx in range(len(cols)):
- for attr, keyword in zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES):
- val = getattr(cols, attr + 's')[idx]
- if val:
+ for idx, column in enumerate(self.columns):
+ for keyword, attr in six.iteritems(KEYWORD_TO_ATTRIBUTE):
+ val = getattr(column, attr)
+ if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
@@ -572,8 +708,7 @@ class TableHDU(_TableBaseHDU):
def _get_tbdata(self):
columns = self.columns
- names = [n for idx, n in enumerate(columns.names)
- if not columns[idx]._phantom]
+ names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
@@ -598,7 +733,7 @@ class TableHDU(_TableBaseHDU):
self._header['NAXIS1'] - itemsize)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
- raw_data = self._get_raw_data(columns._shape, dtype, self._data_offset)
+ raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
@@ -1170,6 +1305,9 @@ class BinTableHDU(_TableBaseHDU):
# new_table() could use a similar feature.
hdu = BinTableHDU.from_columns(np.recarray(shape=1, dtype=dtype),
nrows=nrows, fill=True)
+
+ # TODO: It seems to me a lot of this could/should be handled from
+ # within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
@@ -1182,7 +1320,8 @@ class BinTableHDU(_TableBaseHDU):
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
- data._convert[idx] = _makep(arr, arr, recformats[idx])
+ name = data.columns.names[idx]
+ data._converted[name] = _makep(arr, arr, recformats[idx])
def format_value(col, val):
# Special formatting for a couple particular data types
diff --git a/astropy/io/fits/src/compressionmodule.c b/astropy/io/fits/src/compressionmodule.c
index cd7ae9a..1014d12 100644
--- a/astropy/io/fits/src/compressionmodule.c
+++ b/astropy/io/fits/src/compressionmodule.c
@@ -403,23 +403,6 @@ int get_header_longlong(PyObject* header, char* keyword, long long* val,
}
-void* compression_realloc(void* ptr, size_t size) {
- // This realloc()-like function actually just mallocs the requested
- // size and copies from the original memory address into the new one, and
- // returns the newly malloc'd address.
- // This is generally less efficient than an actual realloc(), but the
- // problem with using realloc in this case is that when it succeeds it will
- // free() the original memory, which may still be in use by the ndarray
- // using that memory as its data buffer. This seems like the least hacky
- // way around that for now.
- // I'm open to other ideas though.
- void* tmp;
- tmp = malloc(size);
- memcpy(tmp, ptr, size);
- return tmp;
-}
-
-
void tcolumns_from_header(fitsfile* fileptr, PyObject* header,
tcolumn** columns) {
// Creates the array of tcolumn structures from the table column keywords
@@ -441,7 +424,10 @@ void tcolumns_from_header(fitsfile* fileptr, PyObject* header,
get_header_int(header, "TFIELDS", &tfields, 0);
- *columns = column = PyMem_New(tcolumn, (size_t) tfields);
+ // This used to use PyMem_New, but don't do that; CFITSIO will later
+ // free() this object when the file is closed, so just use malloc here
+ // *columns = column = PyMem_New(tcolumn, (size_t) tfields);
+ *columns = column = calloc((size_t) tfields, sizeof(tcolumn));
if (column == NULL) {
return;
}
@@ -598,6 +584,7 @@ void configure_compression(fitsfile* fileptr, PyObject* header) {
znaxis = MAX_COMPRESS_DIM;
}
+ Fptr->tilerow = NULL;
Fptr->maxtilelen = 1;
for (idx = 1; idx <= znaxis; idx++) {
snprintf(keyword, 9, "ZNAXIS%u", idx);
@@ -823,7 +810,7 @@ fail:
void open_from_hdu(fitsfile** fileptr, void** buf, size_t* bufsize,
- PyObject* hdu, tcolumn** columns) {
+ PyObject* hdu, tcolumn** columns, int mode) {
PyObject* header = NULL;
FITSfile* Fptr;
@@ -859,6 +846,7 @@ void open_from_hdu(fitsfile** fileptr, void** buf, size_t* bufsize,
Fptr = (*fileptr)->Fptr;
// Now we have some fun munging some of the elements in the fitsfile struct
+ Fptr->writemode = mode;
Fptr->open_count = 1;
Fptr->hdutype = BINARY_TBL; /* This is a binary table HDU */
Fptr->lasthdu = 1;
@@ -929,7 +917,7 @@ PyObject* compression_compress_hdu(PyObject* self, PyObject* args)
return NULL;
}
- open_from_hdu(&fileptr, &outbuf, &outbufsize, hdu, &columns);
+ open_from_hdu(&fileptr, &outbuf, &outbufsize, hdu, &columns, READWRITE);
if (PyErr_Occurred()) {
goto fail;
}
@@ -1026,7 +1014,7 @@ PyObject* compression_decompress_hdu(PyObject* self, PyObject* args)
long arrsize;
unsigned int idx;
- fitsfile* fileptr;
+ fitsfile* fileptr = NULL;
int anynul = 0;
int status = 0;
@@ -1048,7 +1036,7 @@ PyObject* compression_decompress_hdu(PyObject* self, PyObject* args)
return Py_None;
}
- open_from_hdu(&fileptr, &inbuf, &inbufsize, hdu, &columns);
+ open_from_hdu(&fileptr, &inbuf, &inbufsize, hdu, &columns, READONLY);
if (PyErr_Occurred()) {
return NULL;
}
@@ -1078,10 +1066,15 @@ PyObject* compression_decompress_hdu(PyObject* self, PyObject* args)
}
fail:
+ // CFITSIO will free this object in the ffchdu function by way of
+ // fits_close_file; we need to let CFITSIO handle this so that it also
+ // cleans up the compressed tile cache
+ /*
if (columns != NULL) {
PyMem_Free(columns);
fileptr->Fptr->tableptr = NULL;
}
+ */
if (fileptr != NULL) {
status = 1;// Disable header-related errors
diff --git a/astropy/io/fits/tests/data/tdim.fits b/astropy/io/fits/tests/data/tdim.fits
new file mode 100644
index 0000000..b70e48c
Binary files /dev/null and b/astropy/io/fits/tests/data/tdim.fits differ
diff --git a/astropy/io/fits/tests/test_checksum.py b/astropy/io/fits/tests/test_checksum.py
index 42f8066..d5b1561 100644
--- a/astropy/io/fits/tests/test_checksum.py
+++ b/astropy/io/fits/tests/test_checksum.py
@@ -353,7 +353,7 @@ class TestChecksumFunctions(FitsTestCase):
with fits.open(self.temp('checksum.fits')) as hdul:
assert 'CHECKSUM' in hdul[1].header
assert 'DATASUM' in hdul[1].header
- assert (data == hdul[1].data).all()
+ assert comparerecords(data, hdul[1].data)
def test_open_update_mode_update_checksum(self):
"""
diff --git a/astropy/io/fits/tests/test_image.py b/astropy/io/fits/tests/test_image.py
index 2462558..5c54264 100644
--- a/astropy/io/fits/tests/test_image.py
+++ b/astropy/io/fits/tests/test_image.py
@@ -859,7 +859,7 @@ class TestCompressedImage(FitsTestCase):
"""
hdu = fits.CompImageHDU()
- hdu.data is None
+ assert hdu.data is None
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode='update') as hdul:
diff --git a/astropy/io/fits/tests/test_table.py b/astropy/io/fits/tests/test_table.py
index 0bf6aca..99d6a29 100644
--- a/astropy/io/fits/tests/test_table.py
+++ b/astropy/io/fits/tests/test_table.py
@@ -1529,6 +1529,7 @@ class TestTableFunctions(FitsTestCase):
# The ORBPARM column should not be in the data, though the data should
# be readable
assert 'ORBPARM' in tbhdu.data.names
+ assert 'ORBPARM' in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
@@ -1552,6 +1553,7 @@ class TestTableFunctions(FitsTestCase):
# Verify that the previous tests still hold after writing
assert 'ORBPARM' in tbhdu.columns.names
assert 'ORBPARM' in tbhdu.data.names
+ assert 'ORBPARM' in tbhdu.data.dtype.names
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
@@ -1758,6 +1760,12 @@ class TestTableFunctions(FitsTestCase):
pytest.raises(VerifyError, fits.Column, name='a', format='2I',
dim='(2,2)', array=arra)
+ def test_tdim_of_size_one(self):
+ """Regression test for https://github.com/astropy/astropy/pull/3580"""
+
+ hdulist = fits.open(self.data('tdim.fits'))
+ assert hdulist[1].data['V_mag'].shape == (3,1,1)
+
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
@@ -1787,9 +1795,10 @@ class TestTableFunctions(FitsTestCase):
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
- assert (s1 == s2).all()
- assert (s2 == s3).all()
- assert (s3 == s4).all()
+
+ assert comparerecords(s1, s2)
+ assert comparerecords(s2, s3)
+ assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
@@ -1822,9 +1831,9 @@ class TestTableFunctions(FitsTestCase):
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
- assert (s1 == s2).all()
- assert (s2 == s3).all()
- assert (s3 == s4).all()
+ assert comparerecords(s1, s2)
+ assert comparerecords(s2, s3)
+ assert comparerecords(s3, s4)
def test_dump_load_round_trip(self):
"""
@@ -2149,6 +2158,56 @@ class TestTableFunctions(FitsTestCase):
field = hdu.data.field(1)
assert field.shape == (0,)
+ def test_dim_column_byte_order_mismatch(self):
+ """
+ When creating a table column with non-trivial TDIMn, and
+ big-endian array data read from an existing FITS file, the data
+ should not be unnecessarily byteswapped.
+
+ Regression test for https://github.com/astropy/astropy/issues/3561
+ """
+
+ data = fits.getdata(self.data('random_groups.fits'))['DATA']
+ col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)',
+ format='1152E')
+ thdu = fits.BinTableHDU.from_columns([col])
+ thdu.writeto(self.temp('test.fits'))
+
+ with fits.open(self.temp('test.fits')) as hdul:
+ assert np.all(hdul[1].data['TEST'] == data)
+
+ def test_fits_rec_from_existing(self):
+ """
+ Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
+ from an existing `FITS_rec` object read from a FITS file.
+
+ This ensures that the per-column arrays are updated properly.
+
+ Regression test for https://github.com/spacetelescope/PyFITS/issues/99
+ """
+
+ # The use case that revealed this problem was trying to create a new
+ # table from an existing table, but with additional rows so that we can
+ # append data from a second table (with the same column structure)
+
+ data1 = fits.getdata(self.data('tb.fits'))
+ data2 = fits.getdata(self.data('tb.fits'))
+ nrows = len(data1) + len(data2)
+
+ merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
+ merged[len(data1):] = data2
+ mask = merged['c1'] > 1
+ masked = merged[mask]
+
+ # The test table only has two rows, only the second of which is > 1 for
+ # the 'c1' column
+ assert comparerecords(data1[1:], masked[:1])
+ assert comparerecords(data1[1:], masked[1:])
+
+ # Double check that the original data1 table hasn't been affected by
+ # its use in creating the "merged" table
+ assert comparerecords(data1, fits.getdata(self.data('tb.fits')))
+
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
@@ -2496,3 +2555,64 @@ class TestColumnFunctions(FitsTestCase):
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
assert comparerecords(zwc_pl, zwc[2].data)
+
+ def test_column_lookup_by_name(self):
+ """Tests that a `ColDefs` can be indexed by column name."""
+
+ a = fits.Column(name='a', format='D')
+ b = fits.Column(name='b', format='D')
+
+ cols = fits.ColDefs([a, b])
+
+ assert cols['a'] == cols[0]
+ assert cols['b'] == cols[1]
+
+ def test_column_attribute_change_after_removal(self):
+ """
+ This is a test of the column attribute change notification system.
+
+ After a column has been removed from a table (but other references
+ are kept to that same column) changes to that column's attributes
+ should not trigger a notification on the table it was removed from.
+ """
+
+ # One way we can check this is to ensure there are no further changes
+ # to the header
+ table = fits.BinTableHDU.from_columns([
+ fits.Column('a', format='D'),
+ fits.Column('b', format='D')])
+
+ b = table.columns['b']
+
+ table.columns.del_col('b')
+ assert table.data.dtype.names == ('a',)
+
+ b.name = 'HELLO'
+
+ assert b.name == 'HELLO'
+ assert 'TTYPE2' not in table.header
+ assert table.header['TTYPE1'] == 'a'
+ assert table.columns.names == ['a']
+
+ with pytest.raises(KeyError):
+ table.columns['b']
+
+ # Make sure updates to the remaining column still work
+ table.columns.change_name('a', 'GOODBYE')
+ with pytest.raises(KeyError):
+ table.columns['a']
+
+ assert table.columns['GOODBYE'].name == 'GOODBYE'
+ assert table.data.dtype.names == ('GOODBYE',)
+ assert table.columns.names == ['GOODBYE']
+ assert table.data.columns.names == ['GOODBYE']
+
+ table.columns['GOODBYE'].name = 'foo'
+ with pytest.raises(KeyError):
+ table.columns['GOODBYE']
+
+ assert table.columns['foo'].name == 'foo'
+ assert table.data.dtype.names == ('foo',)
+ assert table.columns.names == ['foo']
+ assert table.data.columns.names == ['foo']
+
diff --git a/astropy/io/fits/util.py b/astropy/io/fits/util.py
index cc84e5d..037a773 100644
--- a/astropy/io/fits/util.py
+++ b/astropy/io/fits/util.py
@@ -2,11 +2,12 @@
from __future__ import division
-import gzip
+import gzip as _system_gzip
import itertools
import io
import mmap
import os
+import platform
import signal
import string
import sys
@@ -14,7 +15,8 @@ import tempfile
import textwrap
import threading
import warnings
-import platform
+import weakref
+
from distutils.version import LooseVersion
import numpy as np
@@ -32,15 +34,142 @@ from ...extern.six import (string_types, integer_types, text_type,
binary_type, next)
from ...extern.six.moves import zip
from ...utils import wraps
+from ...utils.compat import gzip as _astropy_gzip
from ...utils.exceptions import AstropyUserWarning
+_GZIP_FILE_TYPES = (_astropy_gzip.GzipFile, _system_gzip.GzipFile)
+
+
if six.PY3:
cmp = lambda a, b: (a > b) - (a < b)
elif six.PY2:
cmp = cmp
+class NotifierMixin(object):
+ """
+ Mixin class that provides services by which objects can register
+ listeners to changes on that object.
+
+ All methods provided by this class are underscored, since this is intended
+ for internal use to communicate between classes in a generic way, and is
+ not machinery that should be exposed to users of the classes involved.
+
+ Use the ``_add_listener`` method to register a listener on an instance of
+ the notifier. This registers the listener with a weak reference, so if
+ no other references to the listener exist it is automatically dropped from
+ the list and does not need to be manually removed.
+
+ Call the ``_notify`` method on the notifier to update all listeners
+ upon changes. ``_notify('change_type', *args, **kwargs)`` results
+ in calling ``listener._update_change_type(*args, **kwargs)`` on all
+ listeners subscribed to that notifier.
+
+ If a particular listener does not have the appropriate update method
+ it is ignored.
+
+ Examples
+ --------
+
+ >>> class Widget(NotifierMixin):
+ ... state = 1
+ ... def __init__(self, name):
+ ... self.name = name
+ ... def update_state(self):
+ ... self.state += 1
+ ... self._notify('widget_state_changed', self)
+ ...
+ >>> class WidgetListener(object):
+ ... def _update_widget_state_changed(self, widget):
+ ... print('Widget {0} changed state to {1}'.format(
+ ... widget.name, widget.state))
+ ...
+ >>> widget = Widget('fred')
+ >>> listener = WidgetListener()
+ >>> widget._add_listener(listener)
+ >>> widget.update_state()
+ Widget fred changed state to 2
+ """
+
+ _listeners = None
+
+ def _add_listener(self, listener):
+ """
+ Add an object to the list of listeners to notify of changes to this
+ object. This adds a weakref to the list of listeners that is
+ removed from the listeners list when the listener has no other
+ references to it.
+ """
+
+ if self._listeners is None:
+ self._listeners = []
+
+ def remove(p):
+ self._listeners.remove(p)
+
+ # Make sure this object is not already in the listeners:
+ for ref in self._listeners:
+ if ref() is listener:
+ return
+
+ ref = weakref.ref(listener, remove)
+ self._listeners.append(ref)
+
+ def _remove_listener(self, listener):
+ """
+ Removes the specified listener from the listeners list. This relies
+ on object identity (i.e. the ``is`` operator).
+ """
+
+ if self._listeners is None:
+ return
+
+ for idx, ref in enumerate(self._listeners[:]):
+ if ref() is listener:
+ del self._listeners[idx]
+ break
+
+ def _notify(self, notification, *args, **kwargs):
+ """
+ Notify all listeners of some particular state change by calling their
+ ``_update_<notification>`` method with the given ``*args`` and
+ ``**kwargs``.
+
+ The notification does not by default include the object that actually
+ changed (``self``), but it certainly may if required.
+ """
+
+ if self._listeners is None:
+ return
+
+ method_name = '_update_{0}'.format(notification)
+ for listener in self._listeners:
+ listener = listener() # dereference weakref
+ if hasattr(listener, method_name):
+ method = getattr(listener, method_name)
+ if callable(method):
+ method(*args, **kwargs)
+
+ def __getstate__(self):
+ """
+ Exclude listeners when saving the listener's state, since they may be
+ ephemeral.
+ """
+
+ # TODO: This hasn't come up often, but if anyone needs to pickle HDU
+ # objects it will be necessary when HDU objects' states are restored to
+ # re-register themselves as listeners on their new column instances.
+ try:
+ state = super(NotifierMixin, self).__getstate__()
+ except AttributeError:
+ # Chances are the super object doesn't have a getstate
+ state = self.__dict__.copy()
+
+ state['_listeners'] = None
+ return state
+
+
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
@@ -299,6 +428,15 @@ def fileobj_name(f):
if isinstance(f, string_types):
return f
+ elif isinstance(f, _GZIP_FILE_TYPES):
+ # The .name attribute on GzipFiles does not always represent the name
+ # of the file being read/written--it can also represent the original
+ # name of the file being compressed
+ # See the documentation at
+ # https://docs.python.org/3/library/gzip.html#gzip.GzipFile
+ # As such, for gzip files only return the name of the underlying
+ # fileobj, if it exists
+ return fileobj_name(f.fileobj)
elif hasattr(f, 'name'):
return f.name
elif hasattr(f, 'filename'):
@@ -358,11 +496,11 @@ def _fileobj_normalize_mode(f):
# normalize it for them:
mode = f.mode
- if isinstance(f, gzip.GzipFile):
+ if isinstance(f, _GZIP_FILE_TYPES):
# GzipFiles can be either readonly or writeonly
- if mode == gzip.READ:
+ if mode == _system_gzip.READ:
return 'rb'
- elif mode == gzip.WRITE:
+ elif mode == _system_gzip.WRITE:
return 'wb'
else:
# This shouldn't happen?
diff --git a/astropy/io/votable/tests/data/empty_table.xml b/astropy/io/votable/tests/data/empty_table.xml
new file mode 100644
index 0000000..d35cdef
--- /dev/null
+++ b/astropy/io/votable/tests/data/empty_table.xml
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<VOTABLE version="1.2"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:noNamespaceSchemaLocation="xmlns:http://www.ivoa.net/xml/VOTable/VOTable-1.2.xsd"
+xmlns="http://www.ivoa.net/xml/VOTable/v1.2">
+ <RESOURCE>
+ <TABLE>
+ <FIELD ID="unsignedByte" name="unsignedByte" datatype="unsignedByte"/>
+ <FIELD ID="short" name="short" datatype="short"/>
+ </TABLE>
+ </RESOURCE>
+</VOTABLE>
diff --git a/astropy/io/votable/tests/table_test.py b/astropy/io/votable/tests/table_test.py
index 6b4fdfb..0cce7f3 100644
--- a/astropy/io/votable/tests/table_test.py
+++ b/astropy/io/votable/tests/table_test.py
@@ -143,3 +143,11 @@ def test_write_with_format():
t.write(output, format='votable', tabledata_format="binary2")
assert b'BINARY2' in output.getvalue()
assert b'TABLEDATA' not in output.getvalue()
+
+
+def test_empty_table():
+ votable = parse(
+ get_pkg_data_filename('data/empty_table.xml'),
+ pedantic=False)
+ table = votable.get_first_table()
+ astropy_table = table.to_table()
diff --git a/astropy/io/votable/tree.py b/astropy/io/votable/tree.py
index ee17b0f..447cb38 100644
--- a/astropy/io/votable/tree.py
+++ b/astropy/io/votable/tree.py
@@ -2341,6 +2341,9 @@ class Table(Element, _IDProperty, _NameProperty, _UcdProperty,
elif tag == 'TABLE':
# For error checking purposes
Field.uniqify_names(self.fields)
+ # We still need to create arrays, even if the file
+ # contains no DATA section
+ self.create_arrays(nrows=0, config=config)
return self
self.create_arrays(nrows=self._nrows, config=config)
diff --git a/astropy/modeling/core.py b/astropy/modeling/core.py
index b95d85b..5f52a03 100644
--- a/astropy/modeling/core.py
+++ b/astropy/modeling/core.py
@@ -913,7 +913,7 @@ class Model(object):
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
- self._constraints[constraint] = values
+ self._constraints[constraint] = values.copy()
# Update with default parameter constraints
for param_name in self.param_names:
@@ -1660,7 +1660,18 @@ class _CompoundModelMeta(_ModelMeta):
# Both models used in the operator were already instantiated models,
# not model *classes*. As such it's not particularly useful to return
# the class itself, but to instead produce a new instance:
- return new_cls()
+ instance = new_cls()
+
+ # Workaround for https://github.com/astropy/astropy/issues/3542
+ # TODO: Any effort to restructure the tree-like data structure for
+ # compound models should try to obviate this workaround--if
+ # intermediate compound models are stored in the tree as well then
+ # we can immediately check for custom inverses on sub-models when
+ # computing the inverse
+ instance._custom_inverse = mcls._make_custom_inverse(
+ operator, left, right)
+
+ return instance
# Otherwise return the new uninstantiated class itself
return new_cls
@@ -1675,6 +1686,48 @@ class _CompoundModelMeta(_ModelMeta):
# _ModelMeta._handle_backwards_compat
return
+ @classmethod
+ def _make_custom_inverse(mcls, operator, left, right):
+ """
+ Generates an inverse `Model` for this `_CompoundModel` when either
+ model in the operation has a *custom inverse* that was manually
+ assigned by the user.
+
+ If either model has a custom inverse, and in particular if another
+ `_CompoundModel` has a custom inverse, then none of that model's
+ sub-models should be considered at all when computing the inverse.
+ So in that case we just compute the inverse ahead of time and set
+ it as the new compound model's custom inverse.
+
+ Note, this use case only applies when combining model instances,
+ since model classes don't currently have a notion of a "custom
+ inverse" (though it could probably be supported by overriding the
+ class's inverse property).
+
+ TODO: Consider fixing things so the aforementioned class-based case
+ works as well. However, for the present purposes this is good enough.
+ """
+
+ if not (operator in ('&', '|') and
+ (left._custom_inverse or right._custom_inverse)):
+ # These are the only operators that support an inverse right now
+ return None
+
+ try:
+ left_inv = left.inverse
+ right_inv = right.inverse
+ except NotImplementedError:
+ # If either inverse is undefined then just return False; this
+ # means the normal _CompoundModel.inverse routine will fail
+ # naturally anyways, since it requires all sub-models to have
+ # an inverse defined
+ return None
+
+ if operator == '&':
+ return left_inv & right_inv
+ else:
+ return right_inv | left_inv
+
# TODO: Perhaps, just perhaps, the post-order (or ???-order) ordering of
# leaf nodes is something the ExpressionTree class itself could just know
def _get_submodels(cls):
@@ -2365,7 +2418,7 @@ def _validate_input_shapes(inputs, argnames, n_models, model_set_axis,
try:
input_broadcast = check_broadcast(*all_shapes)
- except IncompatibleShapesError as exc:
+ except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
arg_a = argnames[shape_a_idx]
arg_b = argnames[shape_b_idx]
diff --git a/astropy/modeling/fitting.py b/astropy/modeling/fitting.py
index 7be75a4..8b8dca7 100644
--- a/astropy/modeling/fitting.py
+++ b/astropy/modeling/fitting.py
@@ -491,6 +491,7 @@ class LevMarLSQFitter(object):
constraints, instead of using p directly, we set the parameter list in
this function.
"""
+
if any(model.fixed.values()) or any(model.tied.values()):
if z is None:
@@ -508,7 +509,7 @@ class LevMarLSQFitter(object):
if not model.col_fit_deriv:
full_deriv = np.asarray(full_deriv).T
- residues = np.asarray(full_deriv[np.nonzero(ind)])
+ residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
diff --git a/astropy/modeling/functional_models.py b/astropy/modeling/functional_models.py
index 4839967..dd5a1a3 100644
--- a/astropy/modeling/functional_models.py
+++ b/astropy/modeling/functional_models.py
@@ -170,16 +170,16 @@ class Gaussian2D(Fittable2DModel):
y_mean : float
Mean of the Gaussian in y.
x_stddev : float
- Standard deviation of the Gaussian in x.
+ Standard deviation of the Gaussian in x before rotating by theta.
``x_stddev`` and ``y_stddev`` must be specified unless a covariance
matrix (``cov_matrix``) is input.
y_stddev : float
- Standard deviation of the Gaussian in y.
+ Standard deviation of the Gaussian in y before rotating by theta.
``x_stddev`` and ``y_stddev`` must be specified unless a covariance
matrix (``cov_matrix``) is input.
theta : float, optional
Rotation angle in radians. The rotation angle increases
- counterclockwise.
+ counterclockwise, from the positive x-axis.
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` specification.
diff --git a/astropy/modeling/polynomial.py b/astropy/modeling/polynomial.py
index 9a07a8e..012abeb 100644
--- a/astropy/modeling/polynomial.py
+++ b/astropy/modeling/polynomial.py
@@ -12,7 +12,7 @@ import numpy as np
from .core import FittableModel, Model
from .functional_models import Shift
from .parameters import Parameter
-from .utils import poly_map_domain, comb
+from .utils import poly_map_domain, comb, check_broadcast
from ..utils import lazyproperty, indent
@@ -78,14 +78,6 @@ class PolynomialBase(FittableModel):
else:
super(PolynomialBase, self).__setattr__(attr, value)
- def _validate_params(self, **params):
- valid_params = set(self._param_names)
- provided_params = set(params)
- intersection = valid_params.intersection(provided_params)
- if len(intersection) != len(provided_params):
- diff = list(provided_params.difference(valid_params))
- raise TypeError('Unrecognized input parameters: %s' % diff)
-
class PolynomialModel(PolynomialBase):
"""
@@ -102,9 +94,6 @@ class PolynomialModel(PolynomialBase):
self._order = self.get_num_coeff(self.n_inputs)
self._param_names = self._generate_coeff_names(self.n_inputs)
- if params:
- self._validate_params(**params)
-
super(PolynomialModel, self).__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name,
meta=meta, **params)
@@ -126,8 +115,8 @@ class PolynomialModel(PolynomialBase):
Return the number of coefficients in one parameter set
"""
- if self.degree < 1 or self.degree > 16:
- raise ValueError("Degree of polynomial must be 1< deg < 16")
+ if self.degree < 0:
+ raise ValueError("Degree of polynomial must be positive or null")
# deg+1 is used to account for the difference between iraf using
# degree and numpy using exact degree
if ndim != 1:
@@ -209,9 +198,6 @@ class OrthoPolynomialBase(PolynomialBase):
self.y_window = y_window
self._param_names = self._generate_coeff_names()
- if params:
- self._validate_params(**params)
-
super(OrthoPolynomialBase, self).__init__(
n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
@@ -380,11 +366,12 @@ class Chebyshev1D(PolynomialModel):
x = np.array(x, dtype=np.float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
- v[0] = x * 0 + 1
- x2 = 2 * x
- v[1] = x
- for i in range(2, self.degree + 1):
- v[i] = v[i - 1] * x2 - v[i - 2]
+ v[0] = 1
+ if self.degree > 0:
+ x2 = 2 * x
+ v[1] = x
+ for i in range(2, self.degree + 1):
+ v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
@@ -486,10 +473,11 @@ class Legendre1D(PolynomialModel):
x = np.array(x, dtype=np.float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
- v[0] = x * 0 + 1
- v[1] = x
- for i in range(2, self.degree + 1):
- v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i
+ v[0] = 1
+ if self.degree > 0:
+ v[1] = x
+ for i in range(2, self.degree + 1):
+ v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i
return np.rollaxis(v, 0, v.ndim)
@staticmethod
@@ -574,10 +562,11 @@ class Polynomial1D(PolynomialModel):
"""
v = np.empty((self.degree + 1,) + x.shape, dtype=np.float)
- v[0] = x * 0 + 1
- v[1] = x
- for i in range(2, self.degree + 1):
- v[i] = v[i - 1] * x
+ v[0] = 1
+ if self.degree > 0:
+ v[1] = x
+ for i in range(2, self.degree + 1):
+ v[i] = v[i - 1] * x
return np.rollaxis(v, 0, v.ndim)
@staticmethod
@@ -650,7 +639,19 @@ class Polynomial2D(PolynomialModel):
def evaluate(self, x, y, *coeffs):
invcoeff = self.invlex_coeff(coeffs)
- return self.multivariate_horner(x, y, invcoeff)
+ result = self.multivariate_horner(x, y, invcoeff)
+
+ # Special case for degree==0 to ensure that the shape of the output is
+ # still as expected by the broadcasting rules, even though the x and y
+ # inputs are not used in the evaluation
+ if self.degree == 0:
+ output_shape = check_broadcast(np.shape(coeffs[0]), x.shape)
+ if output_shape:
+ new_result = np.empty(output_shape)
+ new_result[:] = result
+ result = new_result
+
+ return result
def fit_deriv(self, x, y, *params):
"""
@@ -702,7 +703,7 @@ class Polynomial2D(PolynomialModel):
name = 'c{0}_{1}'.format(j, i)
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
- return np.array(invlex_coeffs[::-1])
+ return invlex_coeffs[::-1]
def multivariate_horner(self, x, y, coeffs):
"""
@@ -714,7 +715,7 @@ class Polynomial2D(PolynomialModel):
coeff : array of coefficients in inverse lexical order
"""
- alpha = np.array(self._invlex())
+ alpha = self._invlex()
r0 = coeffs[0]
r1 = r0 * 0.0
r2 = r0 * 0.0
@@ -968,9 +969,6 @@ class _SIP1D(PolynomialBase):
self.coeff_prefix = coeff_prefix
self._param_names = self._generate_coeff_names(coeff_prefix)
- if params:
- self._validate_params(**params)
-
super(_SIP1D, self).__init__(n_models=n_models,
model_set_axis=model_set_axis,
name=name, meta=meta, **params)
diff --git a/astropy/modeling/projections.py b/astropy/modeling/projections.py
index 42111f8..2f5d52f 100644
--- a/astropy/modeling/projections.py
+++ b/astropy/modeling/projections.py
@@ -1,4 +1,5 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
+# -*- coding: utf-8 -*-
"""
Implements projections--particularly sky projections defined in WCS Paper II
@@ -30,6 +31,7 @@ projcodes = ['TAN', 'AZP', 'SZP', 'STG', 'SIN', 'ARC', 'ZPN', 'ZEA', 'AIR',
__all__ = ['Projection', 'Pix2SkyProjection', 'Sky2PixProjection',
+ 'Zenithal', 'Cylindrical',
'Pix2Sky_AZP', 'Sky2Pix_AZP', 'Pix2Sky_CAR', 'Sky2Pix_CAR',
'Pix2Sky_CEA', 'Sky2Pix_CEA', 'Pix2Sky_CYP', 'Sky2Pix_CYP',
'Pix2Sky_MER', 'Sky2Pix_MER',
@@ -66,12 +68,41 @@ class Sky2PixProjection(Projection):
class Zenithal(Projection):
- """Base class for all Zenithal projections."""
+ r"""Base class for all Zenithal projections.
+ Zenithal (or azimuthal) projections map the sphere directly onto a
+ plane. All zenithal projections are specified by defining the
+ radius as a function of native latitude, :math:`R_\theta`.
-class Pix2Sky_AZP(Pix2SkyProjection, Zenithal):
+ The pixel-to-sky transformation is defined as:
+
+ .. math::
+ \phi &= \arg(-y, x) \\
+ R_\theta &= \sqrt{x^2 + y^2}
+
+ and the inverse (sky-to-pixel) is defined as:
+
+ .. math::
+ x &= R_\theta \sin \phi \\
+ y &= R_\theta \cos \phi
"""
- AZP : Zenital perspective projection - pixel to sky.
+
+
+class Pix2Sky_AZP(Pix2SkyProjection, Zenithal):
+ r"""
+ AZP : Zenithal perspective projection - pixel to sky.
+
+ .. math::
+ \phi &= \arg(-y \cos \gamma, x) \\
+ \theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
+
+ where:
+
+ .. math::
+ \psi &= \arg(\rho, 1) \\
+ \omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
+ \rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
+ R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
--------------
@@ -138,14 +169,24 @@ class Pix2Sky_AZP(Pix2SkyProjection, Zenithal):
class Sky2Pix_AZP(Sky2PixProjection, Zenithal):
- """
+ r"""
AZP : Zenital perspective projection - sky to pixel.
+ .. math::
+ x &= R \sin \phi \\
+ y &= -R \sec \gamma \cos \theta
+
+ where:
+
+ .. math::
+ R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma}
+
Parameters
- --------------
+ ----------
mu : float
distance from point of projection to center of sphere
in spherical radii, default is 0.
+
gamma : float
look angle in deg, default is 0.
"""
@@ -185,8 +226,13 @@ class Sky2Pix_AZP(Sky2PixProjection, Zenithal):
class Pix2Sky_TAN(Pix2SkyProjection, Zenithal):
- """
+ r"""
TAN : Gnomonic projection - pixel to sky.
+
+ See `Zenithal` for a definition of the full transformation.
+
+ .. math::
+ \theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right)
"""
@property
@@ -207,8 +253,13 @@ class Pix2Sky_TAN(Pix2SkyProjection, Zenithal):
class Sky2Pix_TAN(Sky2PixProjection, Zenithal):
- """
+ r"""
TAN : Gnomonic Projection - sky to pixel.
+
+ See `Zenithal` for a definition of the full transformation.
+
+ .. math::
+ R_\theta = \frac{180^{\circ}}{\pi}\cot \theta
"""
@property
@@ -232,8 +283,13 @@ class Sky2Pix_TAN(Sky2PixProjection, Zenithal):
class Pix2Sky_STG(Pix2SkyProjection, Zenithal):
- """
+ r"""
STG : Stereographic Projection - pixel to sky.
+
+ See `Zenithal` for a definition of the full transformation.
+
+ .. math::
+ \theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right)
"""
@property
@@ -254,8 +310,13 @@ class Pix2Sky_STG(Pix2SkyProjection, Zenithal):
class Sky2Pix_STG(Sky2PixProjection, Zenithal):
- """
+ r"""
STG : Stereographic Projection - sky to pixel.
+
+ See `Zenithal` for a definition of the full transformation.
+
+ .. math::
+ R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta}
"""
@property
@@ -279,8 +340,13 @@ class Sky2Pix_STG(Sky2PixProjection, Zenithal):
class Pix2Sky_SIN(Pix2SkyProjection, Zenithal):
- """
+ r"""
SIN : Slant orthographic projection - pixel to sky.
+
+ See `Zenithal` for a definition of the full transformation.
+
+ .. math::
+ \theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right)
"""
@property
@@ -301,8 +367,13 @@ class Pix2Sky_SIN(Pix2SkyProjection, Zenithal):
class Sky2Pix_SIN(Sky2PixProjection, Zenithal):
- """
+ r"""
SIN : Slant orthographic projection - sky to pixel.
+
+ See `Zenithal` for a definition of the full transformation.
+
+ .. math::
+ R_\theta = \frac{180^{\circ}}{\pi}\cos \theta
"""
@property
@@ -325,12 +396,34 @@ class Sky2Pix_SIN(Sky2PixProjection, Zenithal):
class Cylindrical(Projection):
- """Base class for Cylindrical projections."""
+ r"""Base class for Cylindrical projections.
+
+ Cylindrical projections are so-named because the surface of
+ projection is a cylinder.
+ """
class Pix2Sky_CYP(Pix2SkyProjection, Cylindrical):
- """
+ r"""
CYP : Cylindrical perspective - pixel to sky.
+
+ .. math::
+ \phi &= \frac{x}{\lambda} \\
+ \theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
+
+ where:
+
+ .. math::
+ \eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
+
+ Parameters
+ ----------
+ mu : float
+ distance from center of sphere in the direction opposite the
+ projected surface, in spherical radii, default is 0.
+
+ lam : float
+ radius of the cylinder in spherical radii, default is 0.
"""
def _validate_mu(mu, model):
@@ -367,8 +460,21 @@ class Pix2Sky_CYP(Pix2SkyProjection, Cylindrical):
class Sky2Pix_CYP(Sky2PixProjection, Cylindrical):
- """
+ r"""
CYP : Cylindrical Perspective - sky to pixel.
+
+ .. math::
+ x &= \lambda \phi \\
+ y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta
+
+ Parameters
+ ----------
+ mu : float
+ distance from center of sphere in the direction opposite the
+ projected surface, in spherical radii, default is 0.
+
+ lam : float
+ radius of the cylinder in spherical radii, default is 0.
"""
# TODO: Eliminate duplication on these
@@ -403,8 +509,17 @@ class Sky2Pix_CYP(Sky2PixProjection, Cylindrical):
class Pix2Sky_CEA(Pix2SkyProjection, Cylindrical):
- """
+ r"""
CEA : Cylindrical equal area projection - pixel to sky.
+
+ .. math::
+ \phi &= x \\
+ \theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
+
+ Parameters
+ ----------
+ lam : float
+ radius of the cylinder in spherical radii, default is 0.
"""
lam = Parameter(default=1)
@@ -422,8 +537,17 @@ class Pix2Sky_CEA(Pix2SkyProjection, Cylindrical):
class Sky2Pix_CEA(Sky2PixProjection, Cylindrical):
- """
+ r"""
CEA: Cylindrical equal area projection - sky to pixel.
+
+ .. math::
+ x &= \phi \\
+ y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda}
+
+ Parameters
+ ----------
+ lam : float
+ radius of the cylinder in spherical radii, default is 0.
"""
lam = Parameter(default=1)
@@ -442,8 +566,12 @@ class Sky2Pix_CEA(Sky2PixProjection, Cylindrical):
class Pix2Sky_CAR(Pix2SkyProjection, Cylindrical):
- """
- CAR: Plate carree projection - pixel to sky.
+ r"""
+ CAR: Plate carrée projection - pixel to sky.
+
+ .. math::
+ \phi &= x \\
+ \theta &= y
"""
@property
@@ -460,8 +588,12 @@ class Pix2Sky_CAR(Pix2SkyProjection, Cylindrical):
class Sky2Pix_CAR(Sky2PixProjection, Cylindrical):
- """
- CAR: Plate carree projection - sky to pixel.
+ r"""
+ CAR: Plate carrée projection - sky to pixel.
+
+ .. math::
+ x &= \phi \\
+ y &= \theta
"""
@property
@@ -478,8 +610,12 @@ class Sky2Pix_CAR(Sky2PixProjection, Cylindrical):
class Pix2Sky_MER(Pix2SkyProjection, Cylindrical):
- """
+ r"""
MER: Mercator - pixel to sky.
+
+ .. math::
+ \phi &= x \\
+ \theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ}
"""
@property
@@ -495,8 +631,12 @@ class Pix2Sky_MER(Pix2SkyProjection, Cylindrical):
class Sky2Pix_MER(Sky2PixProjection, Cylindrical):
- """
+ r"""
MER: Mercator - sky to pixel.
+
+ .. math::
+ x &= \phi \\
+ y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right)
"""
@property
diff --git a/astropy/modeling/tests/test_compound.py b/astropy/modeling/tests/test_compound.py
index 1be5745..d7918a5 100644
--- a/astropy/modeling/tests/test_compound.py
+++ b/astropy/modeling/tests/test_compound.py
@@ -789,3 +789,32 @@ def test_inherit_constraints(model):
assert model.mean_1.fixed is True
assert model[1].mean.fixed is True
assert model[1].fixed['mean'] is True
+
+
+def test_compound_custom_inverse():
+ """
+ Test that a compound model with a custom inverse has that inverse applied
+ when the inverse of another model, of which it is a component, is computed.
+ Regression test for https://github.com/astropy/astropy/issues/3542
+ """
+
+ poly = Polynomial1D(1, c0=1, c1=2)
+ scale = Scale(1)
+ shift = Shift(1)
+
+ model1 = poly | scale
+ model1.inverse = poly
+
+ # model1 now has a custom inverse (the polynomial itself, ignoring the
+ # trivial scale factor)
+ model2 = shift | model1
+
+ assert_allclose(model2.inverse(1), (poly | shift.inverse)(1))
+
+ # Make sure an inverse is not allowed if the models were combined with the
+ # wrong operator, or if one of the models doesn't have an inverse defined
+ with pytest.raises(NotImplementedError):
+ (shift + model1).inverse
+
+ with pytest.raises(NotImplementedError):
+ (model1 & poly).inverse
diff --git a/astropy/modeling/tests/test_polynomial.py b/astropy/modeling/tests/test_polynomial.py
index a52c289..44043bd 100644
--- a/astropy/modeling/tests/test_polynomial.py
+++ b/astropy/modeling/tests/test_polynomial.py
@@ -5,15 +5,20 @@
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
+
+from itertools import product
+
import numpy as np
-from numpy.testing import utils
+from numpy.testing.utils import assert_allclose
+
from .. import fitting
from ...tests.helper import pytest
from ... import wcs
from ...io import fits
from ..polynomial import (Chebyshev1D, Legendre1D, Polynomial1D,
- Chebyshev2D, Legendre2D, Polynomial2D, SIP)
+ Chebyshev2D, Legendre2D, Polynomial2D, SIP,
+ PolynomialBase, OrthoPolynomialBase)
from ..functional_models import Linear1D
from ...utils.data import get_pkg_data_filename
@@ -25,28 +30,52 @@ except ImportError:
linear1d = {
- Chebyshev1D: {'parameters': [3],
- 'kwargs': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2,
- 'domain': [1, 10]}},
- Legendre1D: {'parameters': [3],
- 'kwargs': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2,
- 'domain': [1, 10]}},
- Polynomial1D: {'parameters': [3],
- 'kwargs': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2}},
- Linear1D: {'parameters': [1.2, 23.1],
- 'kwargs': {}}
+ Chebyshev1D: {
+ 'args': (3,),
+ 'kwargs': {'domain': [1, 10]},
+ 'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
+ 'constraints': {'fixed': {'c0': 1.2}}
+ },
+ Legendre1D: {
+ 'args': (3,),
+ 'kwargs': {'domain': [1, 10]},
+ 'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
+ 'constraints': {'fixed': {'c0': 1.2}}
+ },
+ Polynomial1D: {
+ 'args': (3,),
+ 'kwargs': {'domain': [1, 10]},
+ 'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
+ 'constraints': {'fixed': {'c0': 1.2}}
+ },
+ Linear1D: {
+ 'args': (),
+ 'kwargs': {},
+ 'parameters': {'intercept': 1.2, 'slope': 23.1},
+ 'constraints': {'fixed': {'intercept': 1.2}}
+ }
}
linear2d = {
- Chebyshev2D: {'parameters': [1, 1],
- 'kwargs': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2,
- 'x_domain': [0, 99], 'y_domain': [0, 82]}},
- Legendre2D: {'parameters': [1, 1],
- 'kwargs': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2,
- 'x_domain': [0, 99], 'y_domain': [0, 82]}},
- Polynomial2D: {'parameters': [1],
- 'kwargs': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3}},
+ Chebyshev2D: {
+ 'args': (1, 1),
+ 'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
+ 'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
+ 'constraints': {'fixed': {'c0_0': 1.2}}
+ },
+ Legendre2D: {
+ 'args': (1, 1),
+ 'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
+ 'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
+ 'constraints': {'fixed': {'c0_0': 1.2}}
+ },
+ Polynomial2D: {
+ 'args': (1,),
+ 'kwargs': {},
+ 'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3},
+ 'constraints': {'fixed': {'c0_0': 1.2}}
+ }
}
@@ -66,54 +95,153 @@ class TestFitting(object):
self.linear_fitter = fitting.LinearLSQFitter()
self.non_linear_fitter = fitting.LevMarLSQFitter()
- @pytest.mark.parametrize(('model_class'), linear1d.keys())
- def test_linear_fitter_1D(self, model_class):
+ # TODO: Most of these test cases have some pretty repetitive setup that we
+ # could probably factor out
+
+ @pytest.mark.parametrize('model_class,constraints',
+ product(linear1d.keys(), (False, True)))
+ def test_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
- parameters = linear1d[model_class]['parameters']
- kwargs = linear1d[model_class]['kwargs']
- model = model_class(*parameters, **kwargs)
+ model_args = linear1d[model_class]
+ kwargs = {}
+ kwargs.update(model_args['kwargs'])
+ kwargs.update(model_args['parameters'])
+
+ if constraints:
+ kwargs.update(model_args['constraints'])
+
+ model = model_class(*model_args['args'], **kwargs)
+
y1 = model(self.x1)
model_lin = self.linear_fitter(model, self.x1, y1 + self.n1)
- utils.assert_allclose(model_lin.parameters, model.parameters,
- atol=0.2)
- @pytest.mark.parametrize(('model_class'), linear1d.keys())
- def test_non_linear_fitter_1D(self, model_class):
+ if constraints:
+ # For the constraints tests we're not checking the overall fit,
+ # just that the constraint was maintained
+ fixed = model_args['constraints'].get('fixed', None)
+ if fixed:
+ for param, value in fixed.items():
+ expected = model_args['parameters'][param]
+ assert getattr(model_lin, param).value == expected
+ else:
+ assert_allclose(model_lin.parameters, model.parameters,
+ atol=0.2)
+
+ @pytest.mark.parametrize('model_class,constraints',
+ product(linear1d.keys(), (False, True)))
+ def test_non_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
- parameters = linear1d[model_class]['parameters']
- kwargs = linear1d[model_class]['kwargs']
- model = model_class(*parameters, **kwargs)
+ model_args = linear1d[model_class]
+ kwargs = {}
+ kwargs.update(model_args['kwargs'])
+ kwargs.update(model_args['parameters'])
+
+ if constraints:
+ kwargs.update(model_args['constraints'])
+
+ model = model_class(*model_args['args'], **kwargs)
+
y1 = model(self.x1)
model_nlin = self.non_linear_fitter(model, self.x1, y1 + self.n1)
- utils.assert_allclose(model_nlin.parameters, model.parameters,
- atol=0.2)
- @pytest.mark.parametrize(('model_class'), linear2d.keys())
- def test_linear_fitter_2D(self, model_class):
+ if constraints:
+ fixed = model_args['constraints'].get('fixed', None)
+ if fixed:
+ for param, value in fixed.items():
+ expected = model_args['parameters'][param]
+ assert getattr(model_nlin, param).value == expected
+ else:
+ assert_allclose(model_nlin.parameters, model.parameters,
+ atol=0.2)
+
+ @pytest.mark.parametrize('model_class,constraints',
+ product(linear2d.keys(), (False, True)))
+ def test_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
- parameters = linear2d[model_class]['parameters']
- kwargs = linear2d[model_class]['kwargs']
- model = model_class(*parameters, **kwargs)
+ model_args = linear2d[model_class]
+ kwargs = {}
+ kwargs.update(model_args['kwargs'])
+ kwargs.update(model_args['parameters'])
+
+ if constraints:
+ kwargs.update(model_args['constraints'])
+
+ model = model_class(*model_args['args'], **kwargs)
+
z = model(self.x2, self.y2)
model_lin = self.linear_fitter(model, self.x2, self.y2, z + self.n2)
- utils.assert_allclose(model_lin.parameters, model.parameters,
- atol=0.2)
- @pytest.mark.parametrize(('model_class'), linear2d.keys())
- def test_non_linear_fitter_2D(self, model_class):
+ if constraints:
+ fixed = model_args['constraints'].get('fixed', None)
+ if fixed:
+ for param, value in fixed.items():
+ expected = model_args['parameters'][param]
+ assert getattr(model_lin, param).value == expected
+ else:
+ assert_allclose(model_lin.parameters, model.parameters,
+ atol=0.2)
+
+ @pytest.mark.parametrize('model_class,constraints',
+ product(linear2d.keys(), (False, True)))
+ def test_non_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
- parameters = linear2d[model_class]['parameters']
- kwargs = linear2d[model_class]['kwargs']
- model = model_class(*parameters, **kwargs)
+ model_args = linear2d[model_class]
+ kwargs = {}
+ kwargs.update(model_args['kwargs'])
+ kwargs.update(model_args['parameters'])
+
+ if constraints:
+ kwargs.update(model_args['constraints'])
+
+ model = model_class(*model_args['args'], **kwargs)
+
z = model(self.x2, self.y2)
model_nlin = self.non_linear_fitter(model, self.x2, self.y2,
z + self.n2)
- utils.assert_allclose(model_nlin.parameters, model.parameters,
- atol=0.2)
+
+ if constraints:
+ fixed = model_args['constraints'].get('fixed', None)
+ if fixed:
+ for param, value in fixed.items():
+ expected = model_args['parameters'][param]
+ assert getattr(model_nlin, param).value == expected
+ else:
+ assert_allclose(model_nlin.parameters, model.parameters,
+ atol=0.2)
+
+
+ at pytest.mark.parametrize('model_class',
+ [cls for cls in list(linear1d) + list(linear2d)
+ if isinstance(cls, PolynomialBase)])
+def test_polynomial_init_with_constraints(model_class):
+ """
+ Test that polynomial models can be instantiated with constraints, but no
+ parameters specified.
+
+ Regression test for https://github.com/astropy/astropy/issues/3606
+ """
+
+ # Just determine which parameter to place a constraint on; it doesn't
+ # matter which parameter it is to exhibit the problem so long as it's a
+ # valid parameter for the model
+ if '1D' in model_class.__name__:
+ param = 'c0'
+ else:
+ param = 'c0_0'
+
+ if issubclass(model_class, OrthoPolynomialBase):
+ degree = (2, 2)
+ else:
+ degree = (2,)
+
+ m = model_class(*degree, fixed={param: True})
+
+ assert m.fixed[param] is True
+ assert getattr(m, param).fixed is True
def test_sip_hst():
@@ -132,7 +260,7 @@ def test_sip_hst():
coords = [1, 1]
rel_coords = [1 - crpix1, 1 - crpix2]
astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords
- utils.assert_allclose(sip(1, 1), astwcs_result)
+ assert_allclose(sip(1, 1), astwcs_result)
def test_sip_irac():
@@ -161,14 +289,61 @@ def test_sip_irac():
foc = wobj.sip_pix2foc([pix], 1)
newpix = wobj.sip_foc2pix(foc, 1)[0]
- utils.assert_allclose(sip(*pix), foc[0] - rel_pix)
- utils.assert_allclose(sip.inverse(*foc[0]) +
- foc[0] - rel_pix, newpix - pix)
+ assert_allclose(sip(*pix), foc[0] - rel_pix)
+ assert_allclose(sip.inverse(*foc[0]) +
+ foc[0] - rel_pix, newpix - pix)
def test_sip_no_coeff():
sip = SIP([10,12], 2, 2)
- utils.assert_allclose(sip.sip1d_a.parameters, [0., 0., 0])
- utils.assert_allclose(sip.sip1d_b.parameters, [0., 0., 0])
+ assert_allclose(sip.sip1d_a.parameters, [0., 0., 0])
+ assert_allclose(sip.sip1d_b.parameters, [0., 0., 0])
with pytest.raises(NotImplementedError):
sip.inverse
+
+
+ at pytest.mark.parametrize('cls', (Polynomial1D, Chebyshev1D, Legendre1D,
+ Polynomial2D, Chebyshev2D, Legendre2D))
+def test_zero_degree_polynomial(cls):
+ """
+ A few tests that degree=0 polynomials are correctly evaluated and
+ fitted.
+
+ Regression test for https://github.com/astropy/astropy/pull/3589
+ """
+
+ if cls.n_inputs == 1: # Test 1D polynomials
+ p1 = cls(degree=0, c0=1)
+ assert p1(0) == 1
+ assert np.all(p1(np.zeros(5)) == np.ones(5))
+
+ x = np.linspace(0, 1, 100)
+ # Add a little noise along a straight line
+ y = 1 + np.random.uniform(0, 0.1, len(x))
+
+ p1_init = cls(degree=0)
+ fitter = fitting.LinearLSQFitter()
+ p1_fit = fitter(p1_init, x, y)
+
+ # The fit won't be exact of course, but it should get close to within
+ # 1%
+ assert_allclose(p1_fit.c0, 1, atol=0.10)
+ elif cls.n_inputs == 2: # Test 2D polynomials
+ if issubclass(cls, OrthoPolynomialBase):
+ p2 = cls(x_degree=0, y_degree=0, c0_0=1)
+ else:
+ p2 = cls(degree=0, c0_0=1)
+ assert p2(0, 0) == 1
+ assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5))
+
+ y, x = np.mgrid[0:1:100j,0:1:100j]
+ z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100)
+
+ if issubclass(cls, OrthoPolynomialBase):
+ p2_init = cls(x_degree=0, y_degree=0)
+ else:
+ p2_init = cls(degree=0)
+ fitter = fitting.LinearLSQFitter()
+ p2_fit = fitter(p2_init, x, y, z)
+
+ assert_allclose(p2_fit.c0_0, 1, atol=0.10)
diff --git a/astropy/nddata/mixins/tests/__init__.py b/astropy/nddata/mixins/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/astropy/nddata/mixins/tests/test_ndarithmetic.py b/astropy/nddata/mixins/tests/test_ndarithmetic.py
new file mode 100644
index 0000000..94c6395
--- /dev/null
+++ b/astropy/nddata/mixins/tests/test_ndarithmetic.py
@@ -0,0 +1,468 @@
+# Licensed under a 3-clause BSD style license - see LICENSE.rst
+
+# TEST_UNICODE_LITERALS
+
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+import numpy as np
+from numpy.testing import assert_array_equal
+
+from ...compat import NDDataArray
+from ...nduncertainty import (StdDevUncertainty,
+ IncompatibleUncertaintiesException,
+ NDUncertainty)
+from ....tests.helper import pytest
+from .... import units as u
+from ....utils import NumpyRNGContext
+
+
+class FakeUncertainty(NDUncertainty):
+
+ def __init__(self, *arg, **kwd):
+ self._unit = None
+ pass
+
+ def propagate_add(self, data, final_data):
+ pass
+
+ def propagate_subtract(self, data, final_data):
+ pass
+
+ def propagate_multiply(self, data, final_data):
+ pass
+
+ def propagate_divide(self, data, final_data):
+ pass
+
+ def array(self):
+ pass
+
+
+# Uncertainty tests
+def test_nddata_uncertainty_init_invalid_shape_1():
+ u = StdDevUncertainty(array=np.ones((6, 6)))
+ with pytest.raises(ValueError) as exc:
+ NDDataArray(np.ones((5, 5)), uncertainty=u)
+ assert exc.value.args[0] == 'parent shape does not match array data shape'
+
+
+def test_nddata_uncertainty_init_invalid_shape_2():
+ u = StdDevUncertainty()
+ NDDataArray(np.ones((5, 5)), uncertainty=u)
+ with pytest.raises(ValueError) as exc:
+ u.array = np.ones((6, 6))
+ assert exc.value.args[0] == 'array shape does not match parent data shape'
+
+
+ at pytest.mark.parametrize(('uncertainty'), [1., 'spam', np.ones((5, 5))])
+def test_nddata_uncertainty_invalid_type(uncertainty):
+ with pytest.raises(TypeError) as exc:
+ NDDataArray(np.ones((5, 5)), uncertainty=uncertainty)
+ assert exc.value.args[0] == ('Uncertainty must be an instance of '
+ 'a NDUncertainty object')
+
+
+# slicing tests
+def test_simple_slicing():
+ u1 = StdDevUncertainty(array=np.ones((5, 5)) * 3)
+ d1 = NDDataArray(np.ones((5, 5)), uncertainty=u1)
+ assert d1.data.shape == (5, 5)
+ d2 = d1[2:3, 2:3]
+ assert d2.data.shape == (1, 1)
+ d3 = d1[2, 2]
+ assert d3.data.shape == ()
+
+
+def test_slicing_reference():
+ u1 = StdDevUncertainty(array=np.ones((5, 5)) * 3)
+ d1 = NDDataArray(np.ones((5, 5)), uncertainty=u1)
+ d2 = d1[2:3, 2:3]
+ # asserting that the new nddata contains references to the original nddata
+ assert d2.data.base is d1.data
+ assert d2.uncertainty.array.base is d1.uncertainty.array
+
+
+def test_slicing_with_mask_or_flag():
+ # Regression test for #2170
+ ndd = NDDataArray(np.array([1, 2, 3]),
+ mask=np.array([False, False, False]))
+ assert ndd[0].data.shape == ()
+ assert not ndd[0].mask
+
+
+def test_nddata_add():
+ d1 = NDDataArray(np.ones((5, 5)))
+ d2 = NDDataArray(np.ones((5, 5)))
+ d3 = d1.add(d2)
+ assert np.all(d3.data == 2.)
+
+
+def test_nddata_add_mismatch_wcs():
+ d1 = NDDataArray(np.ones((5, 5)), wcs=1.)
+ d2 = NDDataArray(np.ones((5, 5)), wcs=2.)
+ with pytest.raises(ValueError) as exc:
+ d1.add(d2)
+ assert exc.value.args[0] == "WCS properties do not match"
+
+
+def test_nddata_add_mismatch_units():
+ d1 = NDDataArray(np.ones((5, 5)), unit='Jy')
+ d2 = NDDataArray(np.ones((5, 5)), unit='erg/s')
+ with pytest.raises(ValueError) as exc:
+ d1.add(d2)
+ assert exc.value.args[0] == "operand units do not match"
+
+
+def test_nddata_add_mismatch_shape():
+ d1 = NDDataArray(np.ones((5, 5)))
+ d2 = NDDataArray(np.ones((6, 6)))
+ with pytest.raises(ValueError) as exc:
+ d1.add(d2)
+ assert exc.value.args[0] == "operand shapes do not match"
+
+
+def test_nddata_add_with_masks():
+ # numpy masked arrays mask the result of binary operations if the
+ # mask of either operand is set.
+ # Does NDData?
+ ndd1 = NDDataArray(np.array([1, 2]), mask=np.array([True, False]))
+ other_mask = ~ ndd1.mask
+ ndd2 = NDDataArray(np.array([1, 2]), mask=other_mask)
+ result = ndd1.add(ndd2)
+ # The result should have all entries masked...
+ assert result.mask.all()
+
+
+def test_nddata_add_uncertainties():
+ u1 = StdDevUncertainty(array=np.ones((5, 5)) * 3)
+ u2 = StdDevUncertainty(array=np.ones((5, 5)))
+ d1 = NDDataArray(np.ones((5, 5)), uncertainty=u1)
+ d2 = NDDataArray(np.ones((5, 5)), uncertainty=u2)
+ d3 = d1.add(d2)
+ assert np.all(d3.data == 2.)
+ assert_array_equal(d3.uncertainty.array, np.sqrt(10.))
+
+
+def test_nddata_add_uncertainties_mismatch():
+ u1 = StdDevUncertainty(array=np.ones((5, 5)) * 3)
+ u2 = FakeUncertainty()
+ d1 = NDDataArray(np.ones((5, 5)), uncertainty=u1)
+ d2 = NDDataArray(np.ones((5, 5)), uncertainty=u2)
+ with pytest.raises(IncompatibleUncertaintiesException) as exc:
+ d3 = d1.add(d2)
+ assert exc.value.args[0] == ('Cannot propagate uncertainties of type '
+ 'StdDevUncertainty with uncertainties of '
+ 'type FakeUncertainty for addition')
+
+
+def test_initializing_nduncertainty_from_quantity():
+ # Until nddata and quantity are integrated initializing with a quantity
+ # should raise an error.
+ input_ndd_unit = u.kg
+ ndd = NDDataArray(np.array([1, 2, 3]), unit=input_ndd_unit)
+ std_data = np.array([1, 2, 3])
+
+ # Unit of the uncertainty not convertible to unit of ndd, should raise
+ # an error.
+ std_error = StdDevUncertainty(u.adu * std_data)
+ assert std_error._unit is u.adu
+ with pytest.raises(u.UnitsError):
+ ndd.uncertainty = std_error
+
+ # Uncertainty should be settable without any change in its values
+ # because uncertainty unit is same as data unit.
+ std_error = StdDevUncertainty(u.kg * std_data)
+ ndd.uncertainty = std_error
+ assert_array_equal(std_data, ndd.uncertainty.array)
+
+ # If the uncertainty unit is grams there should be no error, but the
+ # values of the uncertainty should be scaled.
+ std_error = StdDevUncertainty(u.g * std_data)
+ ndd.uncertainty = std_error
+ assert_array_equal(std_data, 1000 * ndd.uncertainty.array)
+
+ # If ndd has no unit but the uncertainty does an error should be raised.
+ ndd = NDDataArray(np.array([1, 2, 3]), unit=None)
+ with pytest.raises(ValueError):
+ ndd.uncertainty = std_error
+
+
+# Mask tests
+def test_unmasked_masked_array_input():
+ # Test for #2784
+ marr = np.ma.array([1, 2, 5]) # Masked array with no masked entries
+ nd = NDDataArray(marr) # Before fix this raised a ValueError
+
+ # Check that masks are correct
+ assert marr.mask is np.ma.nomask
+ # Internal representation is np.ma.nomask but getter returns None.
+ assert nd.mask is None
+
+
+def test_nddata_unmasked_in_operation_with_masked_numpy_array():
+ # test for #2417
+ ndd = NDDataArray(np.array([1, 2, 3]))
+ np_data = -np.ones_like(ndd)
+ np_mask = np.array([True, False, True])
+ np_arr_masked = np.ma.masked_array(np_data, mask=np_mask, copy=True)
+ # check multiplication in both orders as in test above
+ result1 = ndd * np_arr_masked
+ result2 = np_arr_masked * ndd
+ for result in [result1, result2]:
+ # multiplying by a masked numpy array should return a masked array
+ assert isinstance(result, np.ma.MaskedArray)
+ assert np.all(result.mask == np_mask)
+ assert np.all(result[~result.mask] == -ndd.data[~np_mask])
+
+
+ at pytest.mark.parametrize(('shape'), [(10,), (5, 5), (3, 10, 10)])
+def test_nddata_mask_invalid_shape(shape):
+ with pytest.raises(ValueError) as exc:
+ with NumpyRNGContext(789):
+ NDDataArray(np.random.random((10, 10)),
+ mask=np.random.random(shape) > 0.5)
+ assert exc.value.args[0] == 'dimensions of mask do not match data'
+
+
+ at pytest.mark.parametrize('mask_in', [
+ np.array([True, False]),
+ np.array([1, 0]),
+ [True, False],
+ [1, 0]])
+def test_nddata_mask_init_without_np_array(mask_in):
+ ndd = NDDataArray(np.array([1, 1]), mask=mask_in)
+ assert (ndd.mask == mask_in).all()
+
+
+def test_ndddata_with_mask_acts_like_masked_array():
+ # test for #2414
+ input_mask = np.array([True, False, False])
+ input_data = np.array([1, 2, 3])
+ ndd_masked = NDDataArray(input_data.copy(), mask=input_mask.copy())
+ other = - np.ones_like(input_data)
+ result1 = ndd_masked * other
+ result2 = other * ndd_masked
+ # Test for both orders of multiplication -- if multiplication is
+ # eventually overridden for NDData the result can depend on order.
+ for result in [result1, result2]:
+ # Result should be a masked array because input NDData was masked
+ assert isinstance(result, np.ma.MaskedArray)
+ # Result mask should match input mask because other has no mask
+ assert np.all(result.mask == input_mask)
+ assert np.all(result[~result.mask] == - input_data[~input_mask])
+
+
+# Arithmetic tests
+
+def test_nddata_subtract():
+ d1 = NDDataArray(np.ones((5, 5)))
+ d2 = NDDataArray(np.ones((5, 5)) * 2.)
+ d3 = d1.subtract(d2)
+ assert np.all(d3.data == -1.)
+
+
+def test_nddata_subtract_mismatch_wcs():
+ d1 = NDDataArray(np.ones((5, 5)), wcs=1.)
+ d2 = NDDataArray(np.ones((5, 5)) * 2., wcs=2.)
+ with pytest.raises(ValueError) as exc:
+ d1.subtract(d2)
+ assert exc.value.args[0] == "WCS properties do not match"
+
+
+def test_nddata_subtract_mismatch_units():
+ d1 = NDDataArray(np.ones((5, 5)), unit='Jy')
+ d2 = NDDataArray(np.ones((5, 5)) * 2., unit='erg/s')
+ with pytest.raises(ValueError) as exc:
+ d1.subtract(d2)
+ assert exc.value.args[0] == "operand units do not match"
+
+
+def test_nddata_subtract_mismatch_shape():
+ d1 = NDDataArray(np.ones((5, 5)))
+ d2 = NDDataArray(np.ones((6, 6)) * 2.)
+ with pytest.raises(ValueError) as exc:
+ d1.subtract(d2)
+ assert exc.value.args[0] == "operand shapes do not match"
+
+
+def test_nddata_subtract_uncertainties():
+ u1 = StdDevUncertainty(array=np.ones((5, 5)) * 3)
+ u2 = StdDevUncertainty(array=np.ones((5, 5)))
+ d1 = NDDataArray(np.ones((5, 5)), uncertainty=u1)
+ d2 = NDDataArray(np.ones((5, 5)) * 2., uncertainty=u2)
+ d3 = d1.subtract(d2)
+ assert np.all(d3.data == -1.)
+ assert_array_equal(d3.uncertainty.array, np.sqrt(10.))
+
+
+def test_nddata_multiply_uncertainties():
+ u1 = StdDevUncertainty(array=np.ones((5, 5)) * 3)
+ u2 = StdDevUncertainty(array=np.ones((5, 5)))
+ d1 = NDDataArray(np.ones((5, 5)), uncertainty=u1)
+ d2 = NDDataArray(np.ones((5, 5)) * 2., uncertainty=u2)
+ d3 = d1.multiply(d2)
+ assert np.all(d3.data == 2.)
+ assert_array_equal(d3.uncertainty.array, 2 * np.sqrt(9.25))
+
+
+def test_nddata_divide_uncertainties():
+ u1 = StdDevUncertainty(array=np.ones((5, 5)) * 3)
+ u2 = StdDevUncertainty(array=np.ones((5, 5)))
+ d1 = NDDataArray(np.ones((5, 5)), uncertainty=u1)
+ d2 = NDDataArray(np.ones((5, 5)) * 2., uncertainty=u2)
+ d3 = d1.divide(d2)
+ assert np.all(d3.data == 0.5)
+ assert_array_equal(d3.uncertainty.array, 0.5 * np.sqrt(9.25))
+
+
+def test_nddata_subtract_uncertainties_mismatch():
+ u1 = StdDevUncertainty(array=np.ones((5, 5)) * 3)
+ u2 = FakeUncertainty()
+ d1 = NDDataArray(np.ones((5, 5)), uncertainty=u1)
+ d2 = NDDataArray(np.ones((5, 5)) * 2., uncertainty=u2)
+ with pytest.raises(IncompatibleUncertaintiesException) as exc:
+ d3 = d1.subtract(d2)
+ assert exc.value.args[0] == ('Cannot propagate uncertainties of type '
+ 'StdDevUncertainty with uncertainties of '
+ 'type FakeUncertainty for subtraction')
+
+
+ at pytest.mark.parametrize('op1_unc,op2_unc', [
+ (None, None),
+ (StdDevUncertainty([1]), None),
+ (None, StdDevUncertainty([1])),
+ (StdDevUncertainty([1]), StdDevUncertainty([1]))
+ ])
+def test_arithmetic_result_not_tied_to_operands_uncertainty(op1_unc, op2_unc):
+ # Expectation is that the result of an arithmetic operation should be a
+ # new object whose members are not tied to the members of the operand.
+ # The only reliable test of this is to change elements of the result and
+ # see if the corresponding elements of the operands change.
+ # All four of the cases parametrized in this test do need to be checked
+ # because each of the four cases is handled separately in the code (well,
+ # except for the None, None case).
+ # Only one of the arithmetic operations need to be checked because the
+ # logic for propagating the uncertainties is common to all of the
+ # operations.
+ op1 = NDDataArray(np.array([1]), uncertainty=op1_unc)
+ op2 = NDDataArray(np.array([1]), uncertainty=op2_unc)
+
+ result = op1.add(op2)
+ if result.uncertainty:
+ result.uncertainty.array[0] = 0
+
+ if op1_unc:
+ assert op1.uncertainty.array[0] == 1
+ if op2_unc:
+ assert op2.uncertainty.array[0] == 1
+
+ result.data[0] = np.pi
+ assert op1.data[0] == 1
+ assert op2.data[0] == 1
+
+
+ at pytest.mark.parametrize('op1_mask,op2_mask', [
+ (None, None),
+ (None, np.array([False])),
+ (np.array([False]), None),
+ (np.array([False]), np.array([False]))])
+def test_arithmetic_result_not_tied_to_operands_mask(op1_mask, op2_mask):
+ # See test_arithmetic_result_not_tied_to_operands_uncertainty for comments
+ op1 = NDDataArray(np.array([1]), mask=op1_mask)
+ op2 = NDDataArray(np.array([1]), mask=op2_mask)
+ result = op1.add(op2)
+
+ if result.mask is not None:
+ result.mask[0] = True
+
+ if op1_mask is not None:
+ assert op1.mask[0] == (not result.mask[0])
+
+ if op2_mask is not None:
+ assert op2.mask[0] == (not result.mask[0])
+
+
+def test_arithmetic_result_not_tied_to_operands_wcs():
+ # unit is no longer settable, so test that result unit is different object
+ # than operands was removed.
+
+ # Unlike the previous two tests, we only need to check a case where both
+ # operands have the same wcs because operands with different wcs is not
+ # supported
+ op1 = NDDataArray(np.array([1]), wcs=np.array([1]), unit='m')
+ op2 = NDDataArray(np.array([1]), wcs=np.array([1]), unit='m')
+ result = op1.add(op2)
+ result.wcs[0] = 12345
+ assert op1.wcs[0] != result.wcs[0]
+ assert op2.wcs[0] != result.wcs[0]
+
+
+# first operand has unit km, second has unit m
+ at pytest.mark.parametrize('operation,result_unit', [
+ ('add', u.km),
+ ('subtract', u.km),
+ ('multiply', u.km * u.m),
+ ('divide', u.km / u.m)])
+def test_uncertainty_unit_conversion_add_subtract(operation, result_unit):
+ in_km = NDDataArray(np.array([1, 1]), unit=u.km,
+ uncertainty=StdDevUncertainty([.1, .1]))
+ in_m = NDDataArray(in_km.data * 1000, unit=u.m)
+ in_m.uncertainty = StdDevUncertainty(in_km.uncertainty.array * 1000)
+ operator_km = in_km.__getattribute__(operation)
+ combined = operator_km(in_m)
+ assert combined.unit == result_unit
+ if operation in ['add', 'subtract']:
+ # uncertainty is not scaled by result values
+ assert_array_equal(combined.uncertainty.array,
+ np.sqrt(2) * in_km.uncertainty.array)
+ else:
+ # uncertainty is scaled by result
+ assert_array_equal(combined.uncertainty.array,
+ np.sqrt(2) * in_km.uncertainty.array * combined.data)
+
+
+ at pytest.mark.parametrize('unit1,unit2,op,result_unit', [
+ (None, None, 'add', None),
+ (None, None, 'multiply', None),
+ (None, u.m, 'multiply', u.m),
+ (u.dimensionless_unscaled, None, 'multiply',
+ u.dimensionless_unscaled),
+ (u.adu, u.adu, 'add', u.adu),
+ (u.adu, u.adu, 'subtract', u.adu),
+ (u.adu, u.adu, 'divide', u.dimensionless_unscaled),
+ (u.adu, u.m, 'multiply', u.m * u.adu)
+ ])
+def test_arithmetic_unit_calculation(unit1, unit2, op, result_unit):
+ # Test for #2413
+ ndd1 = NDDataArray(np.array([1]), unit=unit1)
+ ndd2 = NDDataArray(np.array([1]), unit=unit2)
+ ndd1_method = ndd1.__getattribute__(op)
+ result = ndd1_method(ndd2)
+ assert result.unit == result_unit
+
+
+# check that subclasses can require wcs and/or unit to be present and use
+# _arithmetic and convert_unit_to
+class SubNDData(NDDataArray):
+ """
+ Subclass for test initialization of subclasses in NDData._arithmetic and
+ NDData.convert_unit_to
+ """
+ def __init__(self, *arg, **kwd):
+ super(SubNDData, self).__init__(*arg, **kwd)
+ if self.unit is None:
+ raise ValueError("Unit for subclass must be specified")
+ if self.wcs is None:
+ raise ValueError("WCS for subclass must be specified")
+
+
+def test_init_of_subclasses_in_arithmetic():
+ with NumpyRNGContext(12345):
+ data = np.ones([10, 10])
+ # The wcs only needs to be not None for this test to succeed
+ arr1 = SubNDData(data, unit='adu', wcs=5)
+ arr2 = SubNDData(data, unit='adu', wcs=5)
+ result = arr1.add(arr2)
+ assert result.unit == arr1.unit
+ assert result.wcs == arr1.wcs
diff --git a/astropy/nddata/mixins/tests/test_ndio.py b/astropy/nddata/mixins/tests/test_ndio.py
new file mode 100644
index 0000000..6b7faba
--- /dev/null
+++ b/astropy/nddata/mixins/tests/test_ndio.py
@@ -0,0 +1,16 @@
+from __future__ import (absolute_import, division, print_function,
+ unicode_literals)
+
+from ...nddata import NDData
+from ...mixins.ndio import NDIOMixin
+
+
+# Define minimal class that uses the I/O mixin
+class NDDataIO(NDIOMixin, NDData):
+ pass
+
+
+def test_simple_write_read(tmpdir):
+ ndd = NDDataIO([1, 2, 3])
+ assert hasattr(ndd, 'read')
+ assert hasattr(ndd, 'write')
diff --git a/astropy/stats/funcs.py b/astropy/stats/funcs.py
index 6744e50..ef23e30 100644
--- a/astropy/stats/funcs.py
+++ b/astropy/stats/funcs.py
@@ -537,7 +537,7 @@ def biweight_location(a, c=6.0, M=None):
--------
This will generate random variates from a Gaussian distribution and return
- the median absolute deviation for that distribution::
+ the biweight location of the distribution::
>>> from astropy.stats.funcs import biweight_location
>>> from numpy.random import randn
@@ -622,7 +622,7 @@ def biweight_midvariance(a, c=9.0, M=None):
--------
This will generate random variates from a Gaussian distribution and return
- the median absolute deviation for that distribution::
+ the biweight midvariance of the distribution::
>>> from astropy.stats.funcs import biweight_midvariance
>>> from numpy.random import randn
diff --git a/astropy/stats/sigma_clipping.py b/astropy/stats/sigma_clipping.py
index e3dd42a..44a93fe 100644
--- a/astropy/stats/sigma_clipping.py
+++ b/astropy/stats/sigma_clipping.py
@@ -9,7 +9,7 @@ import numpy as np
__all__ = ['sigma_clip', 'sigma_clipped_stats']
-def sigma_clip(data, sig=3, iters=1, cenfunc=np.ma.median, varfunc=np.var,
+def sigma_clip(data, sig=3.0, iters=1, cenfunc=np.ma.median, varfunc=np.var,
axis=None, copy=True):
"""Perform sigma-clipping on the provided data.
diff --git a/astropy/table/table.py b/astropy/table/table.py
index 656ef85..1790a51 100644
--- a/astropy/table/table.py
+++ b/astropy/table/table.py
@@ -247,6 +247,11 @@ class Table(object):
default_names = None
+ if (isinstance(data, np.ndarray) and
+ data.shape == (0,) and
+ not data.dtype.names):
+ data = None
+
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
@@ -264,6 +269,10 @@ class Table(object):
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
+ if data.shape == ():
+ raise ValueError('Can not initialize a Table with a scalar')
+ elif len(data.shape) == 1:
+ data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, dict):
@@ -2062,3 +2071,8 @@ class QTable(Table):
If ``col`` is a string then it refers to a column name in this table.
"""
return super(QTable, self)._is_mixin_column(col, quantity_is_mixin=True)
+
+ def __getstate__(self):
+ columns = dict((key, col if isinstance(col, BaseColumn) else col_copy(col))
+ for key, col in self.columns.items())
+ return (columns, self.meta)
diff --git a/astropy/table/tests/test_pickle.py b/astropy/table/tests/test_pickle.py
index 4a81920..2c3fd07 100644
--- a/astropy/table/tests/test_pickle.py
+++ b/astropy/table/tests/test_pickle.py
@@ -3,7 +3,10 @@ from ...extern.six.moves import cPickle as pickle
import numpy as np
import pytest
-from ...table import Table, Column, MaskedColumn
+from ...table import Table, Column, MaskedColumn, QTable
+from ...units import Quantity, deg
+from ...time import Time
+from ...coordinates import SkyCoord
def test_pickle_column(protocol):
@@ -43,6 +46,29 @@ def test_pickle_table(protocol):
assert tp['b'].attrs_equal(t['b'])
assert tp.meta == t.meta
+def test_pickle_qtable(protocol):
+ a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
+ b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',
+ meta={'b': 1})
+ t = QTable([a, b], meta={'a': 1, 'b':Quantity(10,unit='s')})
+ t['c'] = Quantity([1, 2], unit='m')
+ t['d'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
+ t['e'] = SkyCoord([125.0,180.0]*deg, [-45.0,36.5]*deg)
+ ts = pickle.dumps(t)
+ tp = pickle.loads(ts)
+
+ assert np.all(tp['a'] == t['a'])
+ assert np.all(tp['b'] == t['b'])
+ # test mixin columns
+ assert np.all(tp['c'] == t['c'])
+ assert np.all(tp['d'] == t['d'])
+ assert np.all(tp['e'].ra == t['e'].ra)
+ assert np.all(tp['e'].dec == t['e'].dec)
+ assert type(tp['c']) == type(t['c'])
+ assert type(tp['d']) == type(t['d'])
+ assert type(tp['e']) == type(t['e'])
+ assert tp.meta == t.meta
+ assert type(tp) == type(t)
def test_pickle_masked_table(protocol):
a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
diff --git a/astropy/table/tests/test_table.py b/astropy/table/tests/test_table.py
index 76141f2..0ca04a6 100644
--- a/astropy/table/tests/test_table.py
+++ b/astropy/table/tests/test_table.py
@@ -1364,3 +1364,14 @@ def test_nested_iteration():
for r2 in t:
out.append((r1['a'], r2['a']))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
+
+
+def test_table_init_from_degenerate_arrays(table_types):
+ t = table_types.Table(np.array([]))
+ assert len(t.columns) == 0
+
+ with pytest.raises(ValueError):
+ t = table_types.Table(np.array(0))
+
+ t = table_types.Table(np.array([1, 2, 3]))
+ assert len(t.columns) == 3
diff --git a/astropy/tests/helper.py b/astropy/tests/helper.py
index 3136056..a8a08ca 100644
--- a/astropy/tests/helper.py
+++ b/astropy/tests/helper.py
@@ -107,13 +107,6 @@ class TestRunner(object):
"""
The docstring for this method lives in astropy/__init__.py:test
"""
- try:
- get_ipython()
- except NameError:
- pass
- else:
- raise RuntimeError(
- "Running astropy tests inside of IPython is not supported.")
if coverage:
warnings.warn(
@@ -615,15 +608,33 @@ def check_pickling_recovery(original, protocol):
class_history)
-def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None, err_msg='', verbose=True):
+def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None,
+ **kwargs):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`.
"""
+ import numpy as np
+ np.testing.assert_allclose(*_unquantify_allclose_arguments(actual, desired,
+ rtol, atol),
+ **kwargs)
+
+
+def quantity_allclose(a, b, rtol=1.e-5, atol=None, **kwargs):
+ """
+ Returns True if two arrays are element-wise equal within a tolerance.
+ This is a :class:`~astropy.units.Quantity`-aware version of
+ :func:`numpy.allclose`.
+ """
import numpy as np
+ return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
+ **kwargs)
+
+
+def _unquantify_allclose_arguments(actual, desired, rtol, atol):
from .. import units as u
actual = u.Quantity(actual, subok=True, copy=False)
@@ -632,7 +643,9 @@ def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None, err_msg='',
try:
desired = desired.to(actual.unit)
except u.UnitsError:
- raise u.UnitsError("Units for 'desired' ({0}) and 'actual' ({1}) are not convertible".format(desired.unit, actual.unit))
+ raise u.UnitsError("Units for 'desired' ({0}) and 'actual' ({1}) "
+ "are not convertible"
+ .format(desired.unit, actual.unit))
if atol is None:
# by default, we assume an absolute tolerance of 0
@@ -642,7 +655,9 @@ def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None, err_msg='',
try:
atol = atol.to(actual.unit)
except u.UnitsError:
- raise u.UnitsError("Units for 'atol' ({0}) and 'actual' ({1}) are not convertible".format(atol.unit, actual.unit))
+ raise u.UnitsError("Units for 'atol' ({0}) and 'actual' ({1}) "
+ "are not convertible"
+ .format(atol.unit, actual.unit))
rtol = u.Quantity(rtol, subok=True, copy=False)
try:
@@ -650,6 +665,4 @@ def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None, err_msg='',
except:
raise u.UnitsError("`rtol` should be dimensionless")
- np.testing.assert_allclose(actual.value, desired.value,
- rtol=rtol.value, atol=atol.value,
- err_msg=err_msg, verbose=verbose)
+ return actual.value, desired.value, rtol.value, atol.value
diff --git a/astropy/tests/output_checker.py b/astropy/tests/output_checker.py
index 4751d9f..58f0fac 100644
--- a/astropy/tests/output_checker.py
+++ b/astropy/tests/output_checker.py
@@ -45,7 +45,9 @@ class AstropyOutputChecker(doctest.OutputChecker):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
- got_floats = r'(\d+\.\d*|\.\d+)(?:e[+-]?\d+)?'
+ exp = r'(?:e[+-]?\d+)'
+
+ got_floats = r'(\d+\.\d*%s?|\.\d+%s?|\d+%s)' % (exp, exp, exp)
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
diff --git a/astropy/tests/pytest_plugins.py b/astropy/tests/pytest_plugins.py
index 7342fb5..87201be 100644
--- a/astropy/tests/pytest_plugins.py
+++ b/astropy/tests/pytest_plugins.py
@@ -462,17 +462,31 @@ PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'),
('Matplotlib', 'matplotlib'),
('h5py', 'h5py')])
+# This always returns with Astropy's version
+from .. import __version__
+
+TESTED_VERSIONS = OrderedDict([('Astropy', __version__)])
+
def pytest_report_header(config):
- from .. import __version__
stdoutencoding = getattr(sys.stdout, 'encoding') or 'ascii'
- s = "\nRunning tests with Astropy version {0}.\n".format(__version__)
if six.PY2:
args = [x.decode('utf-8') for x in config.args]
elif six.PY3:
args = config.args
+
+ # TESTED_VERSIONS can contain the affiliated package version, too
+ if len(TESTED_VERSIONS) > 1:
+ for pkg, version in TESTED_VERSIONS.items():
+ if pkg != 'Astropy':
+ s = "\nRunning tests with {0} version {1}.\n".format(
+ pkg, version)
+ else:
+ s = "\nRunning tests with Astropy version {0}.\n".format(
+ TESTED_VERSIONS['Astropy'])
+
s += "Running tests in {0}.\n\n".format(" ".join(args))
from platform import platform
@@ -675,3 +689,26 @@ def pytest_unconfigure():
# turn_off_internet previously called)
# this is harmless / does nothing if socket connections were never disabled
turn_on_internet()
+
+
+def pytest_terminal_summary(terminalreporter):
+ """Output a warning to IPython users in case any tests failed."""
+
+ try:
+ get_ipython()
+ except NameError:
+ return
+
+ if not terminalreporter.stats.get('failed'):
+ # Only issue the warning when there are actually failures
+ return
+
+ terminalreporter.ensure_newline()
+ terminalreporter.write_line(
+ 'Some tests are known to fail when run from the IPython prompt; '
+ 'especially, but not limited to tests involving logging and warning '
+ 'handling. Unless you are certain as to the cause of the failure, '
+ 'please check that the failure occurs outside IPython as well. See '
+ 'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
+ 'tests-when-running-the-tests-in-ipython for more information.',
+ yellow=True, bold=True)
diff --git a/astropy/time/core.py b/astropy/time/core.py
index 7b29645..ff42934 100644
--- a/astropy/time/core.py
+++ b/astropy/time/core.py
@@ -1329,15 +1329,14 @@ class TimeFormat(object):
raise TypeError('Input values for {0} class must be finite doubles'
.format(self.name))
- if hasattr(val1, 'to'):
+ if getattr(val1, 'unit', None) is not None:
# set possibly scaled unit any quantities should be converted to
_unit = u.CompositeUnit(getattr(self, 'unit', 1.), [u.day], [1])
val1 = val1.to(_unit).value
if val2 is not None:
val2 = val2.to(_unit).value
- else:
- if hasattr(val2, 'to'):
- raise TypeError('Cannot mix float and Quantity inputs')
+ elif getattr(val2, 'unit', None) is not None:
+ raise TypeError('Cannot mix float and Quantity inputs')
if val2 is None:
val2 = np.zeros_like(val1)
diff --git a/astropy/time/tests/test_quantity_interaction.py b/astropy/time/tests/test_quantity_interaction.py
index 6f73eca..aae5b26 100644
--- a/astropy/time/tests/test_quantity_interaction.py
+++ b/astropy/time/tests/test_quantity_interaction.py
@@ -6,6 +6,7 @@ import numpy as np
from ...tests.helper import pytest
from .. import Time, TimeDelta, OperandTypeError
from ... import units as u
+from ...table import Column
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
@@ -53,6 +54,20 @@ class TestTimeQuantity():
with pytest.raises(u.UnitsError):
Time(2450000.*u.dimensionless_unscaled, format='jd', scale='utc')
+ def test_column_with_and_without_units(self):
+ """Ensure a Column without a unit is treated as an array [#3648]"""
+ a = np.arange(50000., 50010.)
+ ta = Time(a, format='mjd')
+ c1 = Column(np.arange(50000., 50010.), name='mjd')
+ tc1 = Time(c1, format='mjd')
+ assert np.all(ta == tc1)
+ c2 = Column(np.arange(50000., 50010.), name='mjd', unit='day')
+ tc2 = Time(c2, format='mjd')
+ assert np.all(ta == tc2)
+ c3 = Column(np.arange(50000., 50010.), name='mjd', unit='m')
+ with pytest.raises(u.UnitsError):
+ Time(c3, format='mjd')
+
def test_no_quantity_input_allowed(self):
"""Time formats that are not allowed to take Quantity input."""
qy = 1990.*u.yr
diff --git a/astropy/units/core.py b/astropy/units/core.py
index 3186847..973dac2 100644
--- a/astropy/units/core.py
+++ b/astropy/units/core.py
@@ -785,7 +785,7 @@ class UnitBase(object):
return False
- def _apply_equivalences(self, unit, other, equivalencies):
+ def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
@@ -845,7 +845,7 @@ class UnitBase(object):
try:
scale = self._to(other)
except UnitsError:
- return self._apply_equivalences(
+ return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies))
return lambda val: scale * _condition_arg(val)
diff --git a/astropy/units/quantity.py b/astropy/units/quantity.py
index c4b098b..ab6c17e 100644
--- a/astropy/units/quantity.py
+++ b/astropy/units/quantity.py
@@ -292,23 +292,23 @@ class Quantity(np.ndarray):
# should be multiplied before being passed to the ufunc, as well as
# the unit the output from the ufunc will have.
if function in UFUNC_HELPERS:
- scales, result_unit = UFUNC_HELPERS[function](function, *units)
+ converters, result_unit = UFUNC_HELPERS[function](function, *units)
else:
raise TypeError("Unknown ufunc {0}. Please raise issue on "
"https://github.com/astropy/astropy"
.format(function.__name__))
- if any(scale == 0. for scale in scales):
+ if any(converter is False for converter in converters):
# for two-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
- maybe_arbitrary_arg = args[scales.index(0.)]
+ maybe_arbitrary_arg = args[converters.index(False)]
try:
if _can_have_arbitrary_unit(maybe_arbitrary_arg):
- scales = [1., 1.]
+ converters = [None, None]
else:
raise UnitsError("Can only apply '{0}' function to "
"dimensionless quantities when other "
@@ -363,8 +363,10 @@ class Quantity(np.ndarray):
# decomposed, which involves being scaled by a float, but since
# the array is an integer the output then gets converted to an int
# and truncated.
- if(any(not np.can_cast(arg, obj.dtype) for arg in args) or
- np.any(np.array(scales, dtype=obj.dtype) != np.array(scales))):
+ result_dtype = np.result_type(*(args + tuple(
+ (float if converter and converter(1.) % 1. != 0. else int)
+ for converter in converters)))
+ if not np.can_cast(result_dtype, obj.dtype):
raise TypeError("Arguments cannot be cast safely to inplace "
"output with dtype={0}".format(self.dtype))
@@ -382,7 +384,7 @@ class Quantity(np.ndarray):
# the issue is that we can't actually scale the inputs since that
# would be changing the objects passed to the ufunc, which would not
# be expected by the user.
- if any(scale != 1. for scale in scales):
+ if any(converters):
# If self is both output and input (which happens for in-place
# operations), input will get overwritten with junk. To avoid
@@ -409,7 +411,7 @@ class Quantity(np.ndarray):
result._contiguous = self.copy()
# ensure we remember the scales we need
- result._scales = scales
+ result._converters = converters
# unit output will get (setting _unit could prematurely change input
# if obj is self, which happens for in-place operations; see above)
@@ -432,10 +434,10 @@ class Quantity(np.ndarray):
# We now need to re-calculate quantities for which the input
# needed to be scaled.
- if hasattr(obj, '_scales'):
+ if hasattr(obj, '_converters'):
- scales = obj._scales
- del obj._scales
+ converters = obj._converters
+ del obj._converters
# For in-place operations, input will get overwritten with
# junk. To avoid that, we hid it in a new object in
@@ -456,19 +458,18 @@ class Quantity(np.ndarray):
# Set the inputs, rescaling as necessary
inputs = []
- for arg, scale in zip(args, scales):
- if scale != 1.:
- inputs.append(arg.value * scale)
- else: # for scale==1, input is not necessarily a Quantity
+ for arg, converter in zip(args, converters):
+ if converter:
+ inputs.append(converter(arg.value))
+ else: # with no conversion, input can be non-Quantity.
inputs.append(getattr(arg, 'value', arg))
# For output arrays that require scaling, we can reuse the
# output array to perform the scaling in place, as long as the
# array is not integral. Here, we set the obj_array to `None`
# when it can not be used to store the scaled result.
- if(result_unit is not None and
- any(not np.can_cast(scaled_arg, obj_array.dtype)
- for scaled_arg in inputs)):
+ if not (result_unit is None or
+ np.can_cast(np.result_type(*inputs), obj_array.dtype)):
obj_array = None
# Re-compute the output using the ufunc
diff --git a/astropy/units/quantity_helper.py b/astropy/units/quantity_helper.py
index 405fd88..04e959a 100644
--- a/astropy/units/quantity_helper.py
+++ b/astropy/units/quantity_helper.py
@@ -2,7 +2,8 @@
# quantities (http://pythonhosted.org/quantities/) package.
import numpy as np
-from .core import UnitsError, dimensionless_unscaled
+from .core import (UnitsError, dimensionless_unscaled,
+ get_current_unit_registry)
from ..utils.compat.fractions import Fraction
@@ -12,6 +13,21 @@ def _d(unit):
else:
return unit
+
+def get_converter(from_unit, to_unit):
+ """Like Unit._get_converter, except returns None if no scaling is needed,
+ i.e., if the inferred scale is unity."""
+ try:
+ scale = from_unit._to(to_unit)
+ except UnitsError:
+ return from_unit._apply_equivalencies(
+ from_unit, to_unit, get_current_unit_registry().equivalencies)
+ if scale == 1.:
+ return None
+ else:
+ return lambda val: scale * val
+
+
UFUNC_HELPERS = {}
# In this file, we implement the logic that determines for a given ufunc and
@@ -33,7 +49,7 @@ UNSUPPORTED_UFUNCS = set([np.bitwise_and, np.bitwise_or,
# ufunc, and the unit the output will be in.
# ufuncs that return a boolean and do not care about the unit
-helper_onearg_test = lambda f, unit: ([1.], None)
+helper_onearg_test = lambda f, unit: ([None], None)
UFUNC_HELPERS[np.isfinite] = helper_onearg_test
UFUNC_HELPERS[np.isinf] = helper_onearg_test
@@ -43,7 +59,7 @@ UFUNC_HELPERS[np.signbit] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
-helper_invariant = lambda f, unit: ([1.], _d(unit))
+helper_invariant = lambda f, unit: ([None], _d(unit))
UFUNC_HELPERS[np.absolute] = helper_invariant
UFUNC_HELPERS[np.fabs] = helper_invariant
@@ -58,36 +74,37 @@ UFUNC_HELPERS[np.trunc] = helper_invariant
# ufuncs handled as special cases
-UFUNC_HELPERS[np.sqrt] = lambda f, unit: ([1.], unit ** 0.5 if unit is not None
- else dimensionless_unscaled)
-UFUNC_HELPERS[np.square] = lambda f, unit: ([1.], unit ** 2 if unit is not None
- else dimensionless_unscaled)
-UFUNC_HELPERS[np.reciprocal] = lambda f, unit: ([1.], unit ** -1
- if unit is not None
- else dimensionless_unscaled)
+UFUNC_HELPERS[np.sqrt] = lambda f, unit: (
+ [None], unit ** 0.5 if unit is not None else dimensionless_unscaled)
+UFUNC_HELPERS[np.square] = lambda f, unit: (
+ [None], unit ** 2 if unit is not None else dimensionless_unscaled)
+UFUNC_HELPERS[np.reciprocal] = lambda f, unit: (
+ [None], unit ** -1 if unit is not None else dimensionless_unscaled)
# cbrt only was added in numpy 1.10
if isinstance(getattr(np, 'cbrt', None), np.ufunc):
- UFUNC_HELPERS[np.cbrt] = lambda f, unit: ([1.], unit ** Fraction(1, 3)
- if unit is not None
- else dimensionless_unscaled)
+ UFUNC_HELPERS[np.cbrt] = lambda f, unit: (
+ [None], (unit ** Fraction(1, 3) if unit is not None
+ else dimensionless_unscaled))
# ones_like was not private in numpy <= 1.6
if isinstance(getattr(np.core.umath, 'ones_like', None), np.ufunc):
UFUNC_HELPERS[np.core.umath.ones_like] = (lambda f, unit:
- ([1.], dimensionless_unscaled))
+ ([None], dimensionless_unscaled))
if isinstance(getattr(np.core.umath, '_ones_like', None), np.ufunc):
UFUNC_HELPERS[np.core.umath._ones_like] = (lambda f, unit:
- ([1.], dimensionless_unscaled))
-
+ ([None], dimensionless_unscaled))
# ufuncs that require dimensionless input and and give dimensionless output
def helper_dimensionless_to_dimensionless(f, unit):
+ if unit is None:
+ return [None], dimensionless_unscaled
+
try:
- scale = unit.to(dimensionless_unscaled) if unit is not None else 1.
+ return ([get_converter(unit, dimensionless_unscaled)],
+ dimensionless_unscaled)
except UnitsError:
raise TypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
- return [scale], dimensionless_unscaled
UFUNC_HELPERS[np.exp] = helper_dimensionless_to_dimensionless
UFUNC_HELPERS[np.expm1] = helper_dimensionless_to_dimensionless
@@ -102,13 +119,15 @@ UFUNC_HELPERS[np.modf] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
def helper_dimensionless_to_radian(f, unit):
from .si import radian
+ if unit is None:
+ return [None], radian
+
try:
- scale = unit.to(dimensionless_unscaled) if unit is not None else 1.
+ return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise TypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
- return [scale], radian
UFUNC_HELPERS[np.arccos] = helper_dimensionless_to_radian
UFUNC_HELPERS[np.arcsin] = helper_dimensionless_to_radian
@@ -122,12 +141,11 @@ UFUNC_HELPERS[np.arctanh] = helper_dimensionless_to_radian
def helper_degree_to_radian(f, unit):
from .si import degree, radian
try:
- scale = unit.to(degree)
+ return [get_converter(unit, degree)], radian
except UnitsError:
raise TypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
- return [scale], radian
UFUNC_HELPERS[np.radians] = helper_degree_to_radian
UFUNC_HELPERS[np.deg2rad] = helper_degree_to_radian
@@ -137,12 +155,11 @@ UFUNC_HELPERS[np.deg2rad] = helper_degree_to_radian
def helper_radian_to_degree(f, unit):
from .si import degree, radian
try:
- scale = unit.to(radian)
+ return [get_converter(unit, radian)], degree
except UnitsError:
raise TypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
- return [scale], degree
UFUNC_HELPERS[np.degrees] = helper_radian_to_degree
UFUNC_HELPERS[np.rad2deg] = helper_radian_to_degree
@@ -152,12 +169,11 @@ UFUNC_HELPERS[np.rad2deg] = helper_radian_to_degree
def helper_radian_to_dimensionless(f, unit):
from .si import radian
try:
- scale = unit.to(radian)
+ return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise TypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
- return [scale], dimensionless_unscaled
UFUNC_HELPERS[np.cos] = helper_radian_to_dimensionless
UFUNC_HELPERS[np.sin] = helper_radian_to_dimensionless
@@ -173,16 +189,16 @@ def helper_dimensionless_to_none(f, unit):
raise TypeError("Can only apply '{0}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
- return [1.], None
+ return [None], None
UFUNC_HELPERS[np.frexp] = helper_dimensionless_to_none
# TWO ARGUMENT UFUNCS
UFUNC_HELPERS[np.multiply] = lambda f, unit1, unit2: (
- [1., 1.], _d(unit1) * _d(unit2))
+ [None, None], _d(unit1) * _d(unit2))
-helper_division = lambda f, unit1, unit2: ([1., 1.], _d(unit1) / _d(unit2))
+helper_division = lambda f, unit1, unit2: ([None, None], _d(unit1) / _d(unit2))
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
@@ -190,18 +206,15 @@ UFUNC_HELPERS[np.floor_divide] = helper_division
def helper_power(f, unit1, unit2):
- if unit2 is not None:
- try:
- scale2 = unit2.to(dimensionless_unscaled)
- except UnitsError:
- raise TypeError("Can only raise something to a "
- "dimensionless quantity")
- else:
- scale2 = 1.
+ if unit2 is None:
+ return [None, None], _d(unit1)
# TODO: find a better way to do this, currently
# need to raise power of unit1 in main code
- return [1., scale2], _d(unit1)
+ try:
+ return [None, get_converter(unit2, dimensionless_unscaled)], _d(unit1)
+ except UnitsError:
+ raise TypeError("Can only raise something to a dimensionless quantity")
UFUNC_HELPERS[np.power] = helper_power
@@ -211,7 +224,7 @@ def helper_ldexp(f, unit1, unit2):
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
- return [1., 1.], _d(unit1)
+ return [None, None], _d(unit1)
UFUNC_HELPERS[np.ldexp] = helper_ldexp
@@ -219,59 +232,63 @@ UFUNC_HELPERS[np.ldexp] = helper_ldexp
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
- return [1., 1.], None
+ return [None, None], None
else:
- return [1., 1.], unit1
+ return [None, None], unit1
UFUNC_HELPERS[np.copysign] = helper_copysign
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
- scale1 = unit1.to(dimensionless_unscaled) if unit1 is not None else 1.
- scale2 = unit2.to(dimensionless_unscaled) if unit2 is not None else 1.
+ converter1 = (get_converter(unit1, dimensionless_unscaled)
+ if unit1 is not None else None)
+ converter2 = (get_converter(unit2, dimensionless_unscaled)
+ if unit2 is not None else None)
except UnitsError:
raise TypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
- return [scale1, scale2], dimensionless_unscaled
+ return ([converter1, converter2], dimensionless_unscaled)
UFUNC_HELPERS[np.logaddexp] = helper_two_arg_dimensionless
UFUNC_HELPERS[np.logaddexp2] = helper_two_arg_dimensionless
-def find_scales(f, *units):
+def get_converters_and_unit(f, *units):
- scales = [1., 1.]
+ converters = [None, None]
# no units for any input -- e.g., np.add(a1, a2, out=q)
if all(unit is None for unit in units):
- return scales, dimensionless_unscaled
+ return converters, dimensionless_unscaled
fixed, changeable = (1, 0) if units[1] is None else (0, 1)
if units[fixed] is None:
try:
- scales[changeable] = units[changeable].to(dimensionless_unscaled)
+ converters[changeable] = get_converter(units[changeable],
+ dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
- scales[fixed] = 0.
- return scales, units[changeable]
+ converters[fixed] = False
+ return converters, units[changeable]
else:
- return scales, dimensionless_unscaled
+ return converters, dimensionless_unscaled
else:
try:
- scales[changeable] = units[changeable].to(units[fixed])
+ converters[changeable] = get_converter(units[changeable],
+ units[fixed])
except UnitsError:
raise UnitsError(
"Can only apply '{0}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
- return scales, units[fixed]
+ return converters, units[fixed]
def helper_twoarg_invariant(f, unit1, unit2):
- return find_scales(f, unit1, unit2)
+ return get_converters_and_unit(f, unit1, unit2)
UFUNC_HELPERS[np.add] = helper_twoarg_invariant
UFUNC_HELPERS[np.subtract] = helper_twoarg_invariant
@@ -287,8 +304,8 @@ UFUNC_HELPERS[np.fmod] = helper_twoarg_invariant
def helper_twoarg_comparison(f, unit1, unit2):
- scales, _ = find_scales(f, unit1, unit2)
- return scales, None
+ converters, _ = get_converters_and_unit(f, unit1, unit2)
+ return converters, None
UFUNC_HELPERS[np.greater] = helper_twoarg_comparison
UFUNC_HELPERS[np.greater_equal] = helper_twoarg_comparison
@@ -300,7 +317,7 @@ UFUNC_HELPERS[np.equal] = helper_twoarg_comparison
def helper_twoarg_invtrig(f, unit1, unit2):
from .si import radian
- scales, _ = find_scales(f, unit1, unit2)
+ scales, _ = get_converters_and_unit(f, unit1, unit2)
return scales, radian
UFUNC_HELPERS[np.arctan2] = helper_twoarg_invtrig
diff --git a/astropy/units/si.py b/astropy/units/si.py
index 039e0e0..acea76f 100644
--- a/astropy/units/si.py
+++ b/astropy/units/si.py
@@ -36,7 +36,7 @@ def_unit(['micron'], um, namespace=_ns,
def_unit(['Angstrom', 'AA', 'angstrom'], 0.1 * nm, namespace=_ns,
doc="ångström: 10 ** -10 m",
- format={'latex': r'\overset{\circ}{A}', 'unicode': 'Å',
+ format={'latex': r'\mathring{A}', 'unicode': 'Å',
'vounit': 'Angstrom'})
diff --git a/astropy/units/tests/test_equivalencies.py b/astropy/units/tests/test_equivalencies.py
index f75b078..f796339 100644
--- a/astropy/units/tests/test_equivalencies.py
+++ b/astropy/units/tests/test_equivalencies.py
@@ -476,6 +476,14 @@ def test_equivalency_context():
assert all(eq in set(eq_on) for eq in eq_off)
assert set(eq_off) < set(eq_on)
+ # Check the equivalency manager also works in ufunc evaluations,
+ # not just using (wrong) scaling. [#2496]
+ l2v = u.doppler_optical(6000 * u.angstrom)
+ l1 = 6010 * u.angstrom
+ assert l1.to(u.km/u.s, equivalencies=l2v) > 100. * u.km / u.s
+ with u.set_enabled_equivalencies(l2v):
+ assert l1 > 100. * u.km / u.s
+ assert abs((l1 - 500. * u.km / u.s).to(u.angstrom)) < 1. * u.km/u.s
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
@@ -519,11 +527,10 @@ def test_temperature():
def test_temperature_energy():
- from ... import constants
x = 1000 * u.K
y = (x * constants.k_B).to(u.keV)
- assert_allclose(x.to(u.keV, u.temperature_energy()).value, y)
- assert_allclose(y.to(u.K, u.temperature_energy()).value, x)
+ assert_allclose(x.to(u.keV, u.temperature_energy()).value, y.value)
+ assert_allclose(y.to(u.K, u.temperature_energy()).value, x.value)
def test_compose_equivalencies():
diff --git a/astropy/units/tests/test_quantity_ufuncs.py b/astropy/units/tests/test_quantity_ufuncs.py
index cb89e84..7e5b9bb 100644
--- a/astropy/units/tests/test_quantity_ufuncs.py
+++ b/astropy/units/tests/test_quantity_ufuncs.py
@@ -134,7 +134,7 @@ class TestQuantityTrigonometricFuncs(object):
q3 = q1 / q2
q4 = 1.
at2 = np.arctan2(q3, q4)
- assert_allclose(at2, np.arctan2(q3.to(1).value, q4))
+ assert_allclose(at2.value, np.arctan2(q3.to(1).value, q4))
def test_arctan2_invalid(self):
with pytest.raises(u.UnitsError) as exc:
diff --git a/astropy/utils/compat/numpycompat.py b/astropy/utils/compat/numpycompat.py
index 4486c14..c3447ed 100644
--- a/astropy/utils/compat/numpycompat.py
+++ b/astropy/utils/compat/numpycompat.py
@@ -45,5 +45,37 @@ def _monkeypatch_unicode_mask_fill_values():
ma_core._check_fill_value = _check_fill_value
+def _register_patched_dtype_reduce():
+ """
+ Numpy < 1.7 has a bug when copying/pickling dtype objects with a
+ zero-width void type--i.e. ``np.dtype('V0')``. Specifically, although
+ creating a void type is perfectly valid, it crashes when instantiating
+ a dtype using a format string of 'V0', which is what is normally returned
+ by dtype.__reduce__() for these dtypes.
+
+ See https://github.com/astropy/astropy/pull/3283#issuecomment-81667461
+ """
+
+ if NUMPY_LT_1_7:
+ import numpy as np
+ import copy_reg
+
+ # Originally this created an alternate constructor that fixed this
+ # issue, and returned that constructor from the new reduce_dtype;
+ # however that broke pickling since functions can't be pickled, so now
+ # we fix the issue directly within the custom __reduce__
+
+ def reduce_dtype(obj):
+ info = obj.__reduce__()
+ args = info[1]
+ if args[0] == 'V0':
+ args = ('V',) + args[1:]
+ info = (info[0], args) + info[2:]
+ return info
+
+ copy_reg.pickle(np.dtype, reduce_dtype)
+
+
if not _ASTROPY_SETUP_:
_monkeypatch_unicode_mask_fill_values()
+ _register_patched_dtype_reduce()
diff --git a/astropy/version.py b/astropy/version.py
index ec2098e..5fde8fc 100644
--- a/astropy/version.py
+++ b/astropy/version.py
@@ -1,17 +1,17 @@
-# Autogenerated by Astropy's setup.py on 2015-03-06 18:40:04.115278
+# Autogenerated by Astropy's setup.py on 2015-04-16 12:07:06.013861
from __future__ import unicode_literals
import datetime
-version = "1.0.1"
-githash = "9ceec21e4c3755f52932940a35af613858e14eb9"
+version = "1.0.2"
+githash = "c36b85aedcf1f3375c6a6a32daf912a0196d8b4a"
major = 1
minor = 0
-bugfix = 1
+bugfix = 2
release = True
-timestamp = datetime.datetime(2015, 3, 6, 18, 40, 4, 115278)
+timestamp = datetime.datetime(2015, 4, 16, 12, 7, 6, 13861)
debug = False
try:
diff --git a/astropy/vo/samp/__init__.py b/astropy/vo/samp/__init__.py
index a87a25f..fef22c6 100644
--- a/astropy/vo/samp/__init__.py
+++ b/astropy/vo/samp/__init__.py
@@ -31,4 +31,8 @@ class Conf(_config.ConfigNamespace):
"Whether to allow `astropy.vo.samp` to use "
"the internet, if available.",
aliases=['astropy.vo.samp.utils.use_internet'])
+
+ n_retries = _config.ConfigItem(10,
+ "How many times to retry communications when they fail")
+
conf = Conf()
diff --git a/astropy/vo/samp/client.py b/astropy/vo/samp/client.py
index 9ba9150..2c47012 100644
--- a/astropy/vo/samp/client.py
+++ b/astropy/vo/samp/client.py
@@ -238,6 +238,8 @@ class SAMPClient(object):
if read_ready:
self.client.handle_request()
+ self.client.server_close()
+
def _ping(self, private_key, sender_id, msg_id, msg_mtype, msg_params,
message):
diff --git a/astropy/vo/samp/hub.py b/astropy/vo/samp/hub.py
index f0d7366..40c35b5 100644
--- a/astropy/vo/samp/hub.py
+++ b/astropy/vo/samp/hub.py
@@ -30,7 +30,8 @@ from .constants import SSL_SUPPORT
if SSL_SUPPORT:
import ssl
- from .ssl_utils import SafeTransport, SecureXMLRPCServer
+ from .ssl_utils import (SafeTransport, SecureXMLRPCServer,
+ get_ssl_version_name)
__all__ = ['SAMPHubServer', 'WebProfileDialog']
@@ -507,8 +508,8 @@ class SAMPHubServer(object):
params['hub.ssl.certificate'] = cert_reqs_types[self._cert_reqs]
# SSL protocol version
- ssl_protocol_types = ["SSLv2", "SSLv3", "SSLv23", "TLSv1"]
- params['hub.ssl.protocol'] = ssl_protocol_types[self._ssl_version]
+ params['hub.ssl.protocol'] = get_ssl_version_name(
+ self._ssl_version)
return params
@@ -620,6 +621,8 @@ class SAMPHubServer(object):
if read_ready:
self._web_profile_server.handle_request()
+ self._server.server_close()
+
def _notify_shutdown(self):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown")
for mtype in msubs:
@@ -1050,50 +1053,16 @@ class SAMPHubServer(object):
recipient_public_id))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
+ arg_params = (sender_public_id, message)
+ samp_method_name = "receiveNotification"
- if recipient_private_key is None:
- raise SAMPHubError("Invalid client ID")
-
- for attempt in range(10):
-
- if not self._is_running:
- time.sleep(0.01)
- continue
-
- try:
-
- if (self._web_profile and
- recipient_private_key in self._web_profile_callbacks):
-
- # Web Profile
- callback = {"samp.methodName": "receiveNotification",
- "samp.params": [sender_public_id, message]}
- self._web_profile_callbacks[recipient_private_key].put(callback)
-
- else:
-
- # Standard Profile
- hub = self._xmlrpc_endpoints[recipient_public_id][1]
- hub.samp.client.receiveNotification(recipient_private_key,
- sender_public_id,
- message)
-
- except xmlrpc.Fault as exc:
- log.debug("%s XML-RPC endpoint error (attempt %d): %s"
- % (recipient_public_id, attempt + 1,
- exc.faultString))
- time.sleep(0.01)
- else:
- return
-
- # If we are here, then the above attempts failed
- raise SAMPHubError("notification failed after 10 attempts")
+ self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("%s notification from client %s to client %s failed [%s]"
% (message["samp.mtype"], sender_public_id,
recipient_public_id, exc),
- SAMPWarning)
+ SAMPWarning)
def _notify_all(self, private_key, message):
self._update_last_activity_time(private_key)
@@ -1156,44 +1125,10 @@ class SAMPHubServer(object):
recipient_public_id, message["samp.mtype"]))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
+ arg_params = (sender_public_id, msg_id, message)
+ samp_methodName = "receiveCall"
- if recipient_private_key is None:
- raise SAMPHubError("Invalid client ID")
-
- for attempt in range(10):
-
- if not self._is_running:
- time.sleep(0.01)
- continue
-
- try:
-
- if (self._web_profile and
- recipient_private_key in self._web_profile_callbacks):
-
- # Web Profile
- callback = {"samp.methodName": "receiveCall",
- "samp.params": [sender_public_id, msg_id, message]}
- self._web_profile_callbacks[recipient_private_key].put(callback)
-
- else:
-
- # Standard Profile
- hub = self._xmlrpc_endpoints[recipient_public_id][1]
- hub.samp.client.receiveCall(recipient_private_key,
- sender_public_id, msg_id,
- message)
-
- except xmlrpc.Fault as exc:
- log.debug("%s XML-RPC endpoint error (attempt %d): %s"
- % (recipient_public_id, attempt + 1,
- exc.faultString))
- time.sleep(0.01)
- else:
- return
-
- # If we are here, then the above attempts failed
- raise SAMPHubError("call failed after 10 attempts")
+ self._retry_method(recipient_private_key, recipient_public_id, samp_methodName, arg_params)
except Exception as exc:
warnings.warn("%s call %s from client %s to client %s failed [%s,%s]"
@@ -1302,47 +1237,10 @@ class SAMPHubServer(object):
else:
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
+ arg_params = (responder_public_id, recipient_msg_tag, response)
+ samp_method_name = "receiveResponse"
- if recipient_private_key is None:
- raise SAMPHubError("Invalid client ID")
-
- for attempt in range(10):
-
- if not self._is_running:
- time.sleep(0.01)
- continue
-
- try:
-
- if (self._web_profile and
- recipient_private_key in self._web_profile_callbacks):
-
- # Web Profile
- callback = {"samp.methodName": "receiveResponse",
- "samp.params": [responder_public_id,
- recipient_msg_tag,
- response]}
- self._web_profile_callbacks[recipient_private_key].put(callback)
-
- else:
-
- # Standard Profile
- hub = self._xmlrpc_endpoints[recipient_public_id][1]
- hub.samp.client.receiveResponse(recipient_private_key,
- responder_public_id,
- recipient_msg_tag,
- response)
-
- except xmlrpc.Fault as exc:
- log.debug("%s XML-RPC endpoint error (attempt %d): %s"
- % (recipient_public_id, attempt + 1,
- exc.faultString))
- time.sleep(0.01)
- else:
- return
-
- # If we are here, then the above attempts failed
- raise SAMPHubError("reply failed after 10 attempts")
+ self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("%s reply from client %s to client %s failed [%s]"
@@ -1350,6 +1248,62 @@ class SAMPHubServer(object):
recipient_public_id, exc),
SAMPWarning)
+ def _retry_method(self, recipient_private_key, recipient_public_id, samp_method_name, arg_params):
+ """
+ This method is used to retry a SAMP call several times.
+
+ Parameters
+ ----------
+ recipient_private_key
+ The private key of the receiver of the call
+ recipient_public_key
+ The public key of the receiver of the call
+ samp_method_name : str
+ The name of the SAMP method to call
+ arg_params : tuple
+ Any additonal arguments to be passed to the SAMP method
+ """
+
+ if recipient_private_key is None:
+ raise SAMPHubError("Invalid client ID")
+
+ from . import conf
+
+ for attempt in range(conf.n_retries):
+
+ if not self._is_running:
+ time.sleep(0.01)
+ continue
+
+ try:
+
+ if (self._web_profile and
+ recipient_private_key in self._web_profile_callbacks):
+
+ # Web Profile
+ callback = {"samp.methodName": samp_method_name,
+ "samp.params": arg_params}
+ self._web_profile_callbacks[recipient_private_key].put(callback)
+
+ else:
+
+ # Standard Profile
+ hub = self._xmlrpc_endpoints[recipient_public_id][1]
+ getattr(hub.samp.client, samp_method_name)(recipient_private_key, *arg_params)
+
+ except xmlrpc.Fault as exc:
+ log.debug("%s XML-RPC endpoint error (attempt %d): %s"
+ % (recipient_public_id, attempt + 1,
+ exc.faultString))
+ time.sleep(0.01)
+ else:
+ return
+
+ # If we are here, then the above attempts failed
+ error_message = method_name + " failed after " + tries + " attempts"
+ raise SAMPHubError(error_message)
+
+
def _public_id_to_private_key(self, public_id):
for private_key in self._private_keys.keys():
diff --git a/astropy/vo/samp/hub_proxy.py b/astropy/vo/samp/hub_proxy.py
index d055b50..9d29a7b 100644
--- a/astropy/vo/samp/hub_proxy.py
+++ b/astropy/vo/samp/hub_proxy.py
@@ -148,6 +148,9 @@ class SAMPHubProxy(object):
self._connected = False
self.lockfile = {}
+ def server_close(self):
+ self.proxy.server_close()
+
@property
def _samp_hub(self):
"""
diff --git a/astropy/vo/samp/ssl_utils.py b/astropy/vo/samp/ssl_utils.py
index 94acc16..05e5f10 100644
--- a/astropy/vo/samp/ssl_utils.py
+++ b/astropy/vo/samp/ssl_utils.py
@@ -19,47 +19,52 @@ from .standard_profile import ThreadingXMLRPCServer
__all__ = []
-if six.PY2:
+from ...extern.six.moves.http_client import HTTPConnection, HTTPS_PORT
- from ...extern.six.moves.http_client import HTTPConnection, HTTP, HTTPS_PORT
- class HTTPSConnection(HTTPConnection):
- """
- This class allows communication via SSL.
- """
+class HTTPSConnection(HTTPConnection):
+ """
+ This class allows communication via SSL.
+ """
- default_port = HTTPS_PORT
+ default_port = HTTPS_PORT
- def __init__(self, host, port=None, key_file=None, cert_file=None,
- cert_reqs=ssl.CERT_NONE, ca_certs=None,
- ssl_version=None, strict=None):
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ cert_reqs=ssl.CERT_NONE, ca_certs=None,
+ ssl_version=None):
- HTTPConnection.__init__(self, host, port, strict)
+ HTTPConnection.__init__(self, host, port)
- self.key_file = key_file
- self.cert_file = cert_file
- self.cert_reqs = cert_reqs
- self.ca_certs = ca_certs
- self.ssl_version = ssl_version
-
- def connect(self):
- "Connect to a host on a given (SSL) port."
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.connect((self.host, self.port))
- # We have to explicitly not pass the ssl_version to
- # `ssl.wrap_socket` if it's None.
- kwargs = {
- 'server_size': False,
- 'certfile': self.cert_file,
- 'keyfile': self.key_file,
- 'cert_reqs': self.cert_reqs,
- 'ca_certs': self.ca_certs,
- }
- if self.ssl_version is not None:
- kwargs['ssl_version'] = self.ssl_version
- sslconn = ssl.wrap_socket(sock, **args)
- self.sock = sslconn
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.ca_certs = ca_certs
+ self.ssl_version = ssl_version
+
+ def connect(self):
+ "Connect to a host on a given (SSL) port."
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect((self.host, self.port))
+ # We have to explicitly not pass the ssl_version to
+ # `ssl.wrap_socket` if it's None.
+ kwargs = {
+ 'server_side': False,
+ 'certfile': self.cert_file,
+ 'keyfile': self.key_file,
+ 'cert_reqs': self.cert_reqs,
+ 'ca_certs': self.ca_certs,
+ }
+ if self.ssl_version is not None:
+ kwargs['ssl_version'] = self.ssl_version
+ else:
+ kwargs['ssl_version'] = ssl.PROTOCOL_TLSv1
+ sslconn = ssl.wrap_socket(sock, **kwargs)
+ self.sock = sslconn
+
+
+if six.PY2:
+ from ...extern.six.moves.http_client import HTTP
class HTTPS(HTTP):
"""
@@ -80,7 +85,7 @@ if six.PY2:
self._setup(self._connection_class(host, port, key_file,
cert_file, cert_reqs,
- ca_certs, ssl_version, None))
+ ca_certs, ssl_version))
# we never actually use these for anything, but we keep them
# here for compatibility with post-1.5.2 CVS.
@@ -91,10 +96,6 @@ if six.PY2:
"Get the response from the server."
return self._conn.getresponse(buffering)
-else:
-
- from ...extern.six.moves.http_client import HTTPSConnection
-
class SafeTransport(xmlrpc.Transport):
"""
@@ -168,5 +169,33 @@ class SecureXMLRPCServer(ThreadingXMLRPCServer):
}
if self.ssl_version is not None:
kwargs['ssl_version'] = self.ssl_version
+ else:
+ kwargs['ssl_version'] = ssl.PROTOCOL_TLSv1
sslconn = ssl.wrap_socket(sock, **kwargs)
return sslconn, addr
+
+
+def get_ssl_version_name(ssl_version):
+ if ssl_version is None:
+ # create_default_context added after the OpenSSL bugfix in
+ # Python 2.7.9 etc. It's the best way to get the default SSL
+ # protocol from Python ... otherwise, we just assume it's the
+ # old default of PROTOCOL_SSLv23.
+ if hasattr(ssl, 'create_default_context'):
+ context = ssl.create_default_context()
+ ssl_version = context.protocol
+ else:
+ ssl_version = ssl.PROTOCOL_TLSv1
+
+ # get_protocol_name is an undocumented method
+ if hasattr(ssl, 'get_protocol_name'):
+ return ssl.get_protocol_name(ssl_version)
+ else:
+ # Not all versions of Python support all protocols,
+ # so we have to only accept those that are present.
+ for protocol in ['SSLv2', 'SSLv23', 'SSLv3', 'TLSv1', 'TLSv1_1',
+ 'TLSv1_2']:
+ value = getattr(ssl, 'PROTOCOL_' + protocol, None)
+ if ssl_version == value:
+ return protocol
+ return '<unknown>'
diff --git a/astropy/vo/samp/tests/test_standard_profile.py b/astropy/vo/samp/tests/test_standard_profile.py
index 21507bb..86cec81 100644
--- a/astropy/vo/samp/tests/test_standard_profile.py
+++ b/astropy/vo/samp/tests/test_standard_profile.py
@@ -252,7 +252,6 @@ class TestStandardProfile(object):
# https://github.com/astropy/astropy/issues/2126
# https://github.com/astropy/astropy/issues/2321
- at pytest.mark.xfail
class TestStandardProfileHTTPSHub(TestStandardProfile):
conf = 'https_hub'
@@ -266,7 +265,6 @@ class TestStandardProfileHTTPSHub(TestStandardProfile):
}
- at pytest.mark.xfail
class TestStandardProfileHTTPSHubClient(TestStandardProfile):
conf = 'https_hub_client'
diff --git a/astropy/vo/samp/tests/web_profile_test_helpers.py b/astropy/vo/samp/tests/web_profile_test_helpers.py
index a5fce3d..2ce47e5 100644
--- a/astropy/vo/samp/tests/web_profile_test_helpers.py
+++ b/astropy/vo/samp/tests/web_profile_test_helpers.py
@@ -173,6 +173,8 @@ class SAMPWebClient(SAMPClient):
self.receive_response(self._private_key,
*result['samp.params'])
+ self.hub.server_close()
+
def register(self):
"""
Register the client to the SAMP Hub.
diff --git a/astropy/wcs/tests/test_wcs.py b/astropy/wcs/tests/test_wcs.py
index 1217b24..b919ed1 100644
--- a/astropy/wcs/tests/test_wcs.py
+++ b/astropy/wcs/tests/test_wcs.py
@@ -368,6 +368,15 @@ def test_to_fits():
assert header_string == wfits[0].header[-8:]
+def test_to_header_warning():
+ fits_name = get_pkg_data_filename('data/sip.fits')
+ x = wcs.WCS(fits_name)
+ with catch_warnings() as w:
+ x.to_header()
+ assert len(w) == 1
+ assert 'A_ORDER' in str(w[0])
+
+
@raises(wcs.InvalidTransformError)
def test_find_all_wcs_crash():
"""
diff --git a/astropy/wcs/wcs.py b/astropy/wcs/wcs.py
index b6184f2..70b16be 100644
--- a/astropy/wcs/wcs.py
+++ b/astropy/wcs/wcs.py
@@ -2292,12 +2292,11 @@ naxis kwarg.
return hdulist
- def to_header(self, relax=False, key=None):
- """
- Generate an `astropy.io.fits.Header` object with the basic WCS and SIP
- information stored in this object. This should be logically
- identical to the input FITS file, but it will be normalized in
- a number of ways.
+ def to_header(self, relax=None, key=None):
+ """Generate an `astropy.io.fits.Header` object with the basic WCS
+ and SIP information stored in this object. This should be
+ logically identical to the input FITS file, but it will be
+ normalized in a number of ways.
.. warning::
@@ -2319,6 +2318,12 @@ naxis kwarg.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
+
+ If the ``relax`` keyword argument is not given and any
+ keywords were omitted from the output, an
+ `~astropy.utils.exceptions.AstropyWarning` is displayed.
+ To override this, explicitly pass a value to ``relax``.
+
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
@@ -2360,8 +2365,12 @@ naxis kwarg.
8. Keyword order may be changed.
-
"""
+ display_warning = False
+ if relax is None:
+ display_warning = True
+ relax = False
+
if key is not None:
self.wcs.alt = key
@@ -2381,9 +2390,23 @@ naxis kwarg.
for key, val in self._write_sip_kw().items():
header[key] = val
+ if display_warning:
+ full_header = self.to_header(relax=True, key=key)
+ missing_keys = []
+ for key, val in full_header.items():
+ if key not in header:
+ missing_keys.append(key)
+
+ if len(missing_keys):
+ warnings.warn(
+ "Some non-standard WCS keywords were excluded: {0} "
+ "Use the ``relax`` kwarg to control this.".format(
+ ', '.join(missing_keys)),
+ AstropyWarning)
+
return header
- def to_header_string(self, relax=False):
+ def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
diff --git a/astropy_helpers/CHANGES.rst b/astropy_helpers/CHANGES.rst
index fa5b062..b91565b 100644
--- a/astropy_helpers/CHANGES.rst
+++ b/astropy_helpers/CHANGES.rst
@@ -1,6 +1,25 @@
astropy-helpers Changelog
=========================
+1.0.2 (2015-04-02)
+------------------
+
+- Various fixes enabling the astropy-helpers Sphinx build command and
+ Sphinx extensions to work with Sphinx 1.3. [#148]
+
+- More improvement to the ability to handle multiple versions of
+ astropy-helpers being imported in the same Python interpreter session
+ in the (somewhat rare) case of nested installs. [#147]
+
+- To better support high resolution displays, use SVG for the astropy
+ logo and linkout image, falling back to PNGs for browsers that
+ support it. [#150, #151]
+
+- Improve ``setup_helpers.get_compiler_version`` to work with more compilers,
+ and to return more info. This will help fix builds of Astropy on less
+ common compilers, like Sun C. [#153]
+
+
1.0.1 (2015-03-04)
------------------
diff --git a/astropy_helpers/ah_bootstrap.py b/astropy_helpers/ah_bootstrap.py
index e9521ea..7e145e3 100644
--- a/astropy_helpers/ah_bootstrap.py
+++ b/astropy_helpers/ah_bootstrap.py
@@ -282,6 +282,19 @@ class _Bootstrapper(object):
strategies = ['local_directory', 'local_file', 'index']
dist = None
+ # First, remove any previously imported versions of astropy_helpers;
+ # this is necessary for nested installs where one package's installer
+ # is installing another package via setuptools.sandbox.run_setup, as in
+ # the case of setup_requires
+ for key in list(sys.modules):
+ try:
+ if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
+ del sys.modules[key]
+ except AttributeError:
+ # Sometimes mysterious non-string things can turn up in
+ # sys.modules
+ continue
+
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
@@ -311,19 +324,6 @@ class _Bootstrapper(object):
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
- # But first, remove any previously imported versions of
- # astropy_helpers; this is necessary for nested installs where one
- # package's installer is installing another package via
- # setuptools.sandbox.run_set, as in the case of setup_requires
- for key in list(sys.modules):
- try:
- if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
- del sys.modules[key]
- except AttributeError:
- # Sometimes mysterious non-string things can turn up in
- # sys.modules
- continue
-
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
diff --git a/astropy_helpers/astropy_helpers.egg-info/PKG-INFO b/astropy_helpers/astropy_helpers.egg-info/PKG-INFO
index 2076753..59537e5 100644
--- a/astropy_helpers/astropy_helpers.egg-info/PKG-INFO
+++ b/astropy_helpers/astropy_helpers.egg-info/PKG-INFO
@@ -1,12 +1,12 @@
Metadata-Version: 1.1
Name: astropy-helpers
-Version: 1.0.1
+Version: 1.0.2
Summary: Utilities for building and installing Astropy, Astropy affiliated packages, and their respective documentation.
Home-page: http://astropy.org
Author: The Astropy Developers
Author-email: astropy.team at gmail.com
License: BSD
-Download-URL: http://pypi.python.org/packages/source/a/astropy-helpers/astropy-helpers-1.0.1.tar.gz
+Download-URL: http://pypi.python.org/packages/source/a/astropy-helpers/astropy-helpers-1.0.2.tar.gz
Description: astropy-helpers
===============
diff --git a/astropy_helpers/astropy_helpers.egg-info/SOURCES.txt b/astropy_helpers/astropy_helpers.egg-info/SOURCES.txt
index 77066e0..0fcd151 100644
--- a/astropy_helpers/astropy_helpers.egg-info/SOURCES.txt
+++ b/astropy_helpers/astropy_helpers.egg-info/SOURCES.txt
@@ -27,6 +27,7 @@ astropy_helpers/commands/install.py
astropy_helpers/commands/install_lib.py
astropy_helpers/commands/register.py
astropy_helpers/commands/setup_package.py
+astropy_helpers/commands/src/compiler.c
astropy_helpers/compat/__init__.py
astropy_helpers/compat/subprocess.py
astropy_helpers/compat/_subprocess_py2/__init__.py
@@ -52,9 +53,26 @@ astropy_helpers/sphinx/ext/tocdepthfix.py
astropy_helpers/sphinx/ext/traitsdoc.py
astropy_helpers/sphinx/ext/utils.py
astropy_helpers/sphinx/ext/viewcode.py
+astropy_helpers/sphinx/ext/templates/autosummary_core/base.rst
+astropy_helpers/sphinx/ext/templates/autosummary_core/class.rst
+astropy_helpers/sphinx/ext/templates/autosummary_core/module.rst
astropy_helpers/sphinx/ext/tests/__init__.py
astropy_helpers/sphinx/ext/tests/test_autodoc_enhancements.py
astropy_helpers/sphinx/ext/tests/test_automodapi.py
astropy_helpers/sphinx/ext/tests/test_automodsumm.py
astropy_helpers/sphinx/ext/tests/test_docscrape.py
-astropy_helpers/sphinx/ext/tests/test_utils.py
\ No newline at end of file
+astropy_helpers/sphinx/ext/tests/test_utils.py
+astropy_helpers/sphinx/local/python3links.inv
+astropy_helpers/sphinx/themes/bootstrap-astropy/globaltoc.html
+astropy_helpers/sphinx/themes/bootstrap-astropy/layout.html
+astropy_helpers/sphinx/themes/bootstrap-astropy/localtoc.html
+astropy_helpers/sphinx/themes/bootstrap-astropy/searchbox.html
+astropy_helpers/sphinx/themes/bootstrap-astropy/theme.conf
+astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout.svg
+astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout_20.png
+astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.ico
+astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.svg
+astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo_32.png
+astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.css
+astropy_helpers/sphinx/themes/bootstrap-astropy/static/copybutton.js
+astropy_helpers/sphinx/themes/bootstrap-astropy/static/sidebar.js
\ No newline at end of file
diff --git a/astropy_helpers/astropy_helpers/commands/build_sphinx.py b/astropy_helpers/astropy_helpers/commands/build_sphinx.py
index c52115a..4517e92 100644
--- a/astropy_helpers/astropy_helpers/commands/build_sphinx.py
+++ b/astropy_helpers/astropy_helpers/commands/build_sphinx.py
@@ -10,8 +10,11 @@ import textwrap
from distutils import log
from distutils.cmd import DistutilsOptionError
+import sphinx
from sphinx.setup_command import BuildDoc as SphinxBuildDoc
+from ..utils import minversion
+
PY3 = sys.version_info[0] >= 3
@@ -146,6 +149,12 @@ class AstropyBuildSphinx(SphinxBuildDoc):
subproccode[i] = repr(val)
subproccode = ''.join(subproccode)
+ # This is a quick gross hack, but it ensures that the code grabbed from
+ # SphinxBuildDoc.run will work in Python 2 if it uses the print
+ # function
+ if minversion(sphinx, '1.3'):
+ subproccode = 'from __future__ import print_function' + subproccode
+
if self.no_intersphinx:
# the confoverrides variable in sphinx.setup_command.BuildDoc can
# be used to override the conf.py ... but this could well break
diff --git a/astropy_helpers/astropy_helpers/setup_helpers.py b/astropy_helpers/astropy_helpers/setup_helpers.py
index 7e50d46..eec83f1 100644
--- a/astropy_helpers/astropy_helpers/setup_helpers.py
+++ b/astropy_helpers/astropy_helpers/setup_helpers.py
@@ -49,7 +49,8 @@ _module_state = {
'registered_commands': None,
'have_cython': False,
'have_sphinx': False,
- 'package_cache': None
+ 'package_cache': None,
+ 'compiler_version_cache': {}
}
try:
@@ -199,15 +200,43 @@ def adjust_compiler(package):
def get_compiler_version(compiler):
+ if compiler in _module_state['compiler_version_cache']:
+ return _module_state['compiler_version_cache'][compiler]
- process = subprocess.Popen(
- shlex.split(compiler) + ['--version'], stdout=subprocess.PIPE)
+ # Different flags to try to get the compiler version
+ # TODO: It might be worth making this configurable to support
+ # arbitrary odd compilers; though all bets may be off in such
+ # cases anyway
+ flags = ['--version', '--Version', '-version', '-Version',
+ '-v', '-V']
- output = process.communicate()[0].strip()
- try:
- version = output.split()[0]
- except IndexError:
- return 'unknown'
+ def try_get_version(flag):
+ process = subprocess.Popen(
+ shlex.split(compiler) + [flag],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ stdout, stderr = process.communicate()
+
+ if process.returncode != 0:
+ return 'unknown'
+
+ output = stdout.strip()
+ if not output:
+ # Some compilers return their version info on stderr
+ output = stderr.strip()
+
+ if not output:
+ output = 'unknown'
+
+ return output
+
+ for flag in flags:
+ version = try_get_version(flag)
+ if version != 'unknown':
+ break
+
+ # Cache results to speed up future calls
+ _module_state['compiler_version_cache'][compiler] = version
return version
diff --git a/astropy_helpers/astropy_helpers/sphinx/conf.py b/astropy_helpers/astropy_helpers/sphinx/conf.py
index d6809b5..bca96b9 100644
--- a/astropy_helpers/astropy_helpers/sphinx/conf.py
+++ b/astropy_helpers/astropy_helpers/sphinx/conf.py
@@ -157,6 +157,9 @@ automodapi_toctreedirnm = 'api'
# the __init__ docstring
autoclass_content = "both"
+# Render inheritance diagrams in SVG
+graphviz_output_format = "svg"
+
# -- Options for HTML output -------------------------------------------------
diff --git a/astropy_helpers/astropy_helpers/sphinx/ext/autodoc_enhancements.py b/astropy_helpers/astropy_helpers/sphinx/ext/autodoc_enhancements.py
index ee63814..9a1b64f 100644
--- a/astropy_helpers/astropy_helpers/sphinx/ext/autodoc_enhancements.py
+++ b/astropy_helpers/astropy_helpers/sphinx/ext/autodoc_enhancements.py
@@ -43,13 +43,16 @@ def type_object_attrgetter(obj, attr, *defargs):
of autodoc.
"""
- if attr in obj.__dict__ and isinstance(obj.__dict__[attr], property):
- # Note, this should only be used for properties--for any other type of
- # descriptor (classmethod, for example) this can mess up existing
- # expectcations of what getattr(cls, ...) returns
- return obj.__dict__[attr]
- else:
- return getattr(obj, attr, *defargs)
+ for base in obj.__mro__:
+ if attr in base.__dict__:
+ if isinstance(base.__dict__[attr], property):
+ # Note, this should only be used for properties--for any other
+ # type of descriptor (classmethod, for example) this can mess
+ # up existing expectations of what getattr(cls, ...) returns
+ return base.__dict__[attr]
+ break
+
+ return getattr(obj, attr, *defargs)
def setup(app):
diff --git a/astropy_helpers/astropy_helpers/sphinx/ext/automodapi.py b/astropy_helpers/astropy_helpers/sphinx/ext/automodapi.py
index db7c4c0..19b628d 100644
--- a/astropy_helpers/astropy_helpers/sphinx/ext/automodapi.py
+++ b/astropy_helpers/astropy_helpers/sphinx/ext/automodapi.py
@@ -85,6 +85,11 @@ import sys
from .utils import find_mod_objs
+if sys.version_info[0] == 3:
+ text_type = str
+else:
+ text_type = unicode
+
automod_templ_modheader = """
{modname} {pkgormod}
@@ -296,7 +301,7 @@ def automodapi_replace(sourcestr, app, dotoctree=True, docname=None,
if app.config.automodapi_writereprocessed:
# sometimes they are unicode, sometimes not, depending on how
# sphinx has processed things
- if isinstance(newsourcestr, unicode):
+ if isinstance(newsourcestr, text_type):
ustr = newsourcestr
else:
ustr = newsourcestr.decode(app.config.source_encoding)
@@ -304,10 +309,16 @@ def automodapi_replace(sourcestr, app, dotoctree=True, docname=None,
if docname is None:
with open(os.path.join(app.srcdir, 'unknown.automodapi'), 'a') as f:
f.write('\n**NEW DOC**\n\n')
- f.write(ustr.encode('utf8'))
+ f.write(ustr)
else:
- with open(os.path.join(app.srcdir, docname + '.automodapi'), 'w') as f:
- f.write(ustr.encode('utf8'))
+ env = app.builder.env
+ # Determine the filename associated with this doc (specifically
+ # the extension)
+ filename = docname + os.path.splitext(env.doc2path(docname))[1]
+ filename += '.automodapi'
+
+ with open(os.path.join(app.srcdir, filename), 'w') as f:
+ f.write(ustr)
return newsourcestr
else:
@@ -330,8 +341,11 @@ def _mod_info(modname, toskip=[], onlylocals=True):
break
# find_mod_objs has already imported modname
+ # TODO: There is probably a cleaner way to do this, though this is pretty
+ # reliable for all Python versions for most cases that we care about.
pkg = sys.modules[modname]
- ispkg = '__init__.' in os.path.split(pkg.__name__)[1]
+ ispkg = (hasattr(pkg, '__file__') and isinstance(pkg.__file__, str) and
+ os.path.split(pkg.__file__)[1].startswith('__init__.py'))
return ispkg, hascls, hasfunc
diff --git a/astropy_helpers/astropy_helpers/sphinx/ext/automodsumm.py b/astropy_helpers/astropy_helpers/sphinx/ext/automodsumm.py
index 9a43680..4d0e596 100644
--- a/astropy_helpers/astropy_helpers/sphinx/ext/automodsumm.py
+++ b/astropy_helpers/astropy_helpers/sphinx/ext/automodsumm.py
@@ -172,18 +172,23 @@ class Automodsumm(BaseAutosummary):
self.content = cont
- #for some reason, even though ``currentmodule`` is substituted in, sphinx
- #doesn't necessarily recognize this fact. So we just force it
- #internally, and that seems to fix things
+ # for some reason, even though ``currentmodule`` is substituted in,
+ # sphinx doesn't necessarily recognize this fact. So we just force
+ # it internally, and that seems to fix things
env.temp_data['py:module'] = modname
- #can't use super because Sphinx/docutils has trouble
- #return super(Autosummary,self).run()
+ # can't use super because Sphinx/docutils has trouble return
+ # super(Autosummary,self).run()
nodelist.extend(Autosummary.run(self))
+
return self.warnings + nodelist
finally: # has_content = False for the Automodsumm
self.content = []
+ def get_items(self, names):
+ self.genopt['imported-members'] = True
+ return Autosummary.get_items(self, names)
+
#<-------------------automod-diagram stuff------------------------------------>
class Automoddiagram(InheritanceDiagram):
@@ -220,10 +225,12 @@ class Automoddiagram(InheritanceDiagram):
#<---------------------automodsumm generation stuff--------------------------->
def process_automodsumm_generation(app):
env = app.builder.env
- ext = app.config.source_suffix
- filestosearch = [x + ext for x in env.found_docs
- if os.path.isfile(env.doc2path(x))]\
+ filestosearch = []
+ for docname in env.found_docs:
+ filename = env.doc2path(docname)
+ if os.path.isfile(filename):
+ filestosearch.append(docname + os.path.splitext(filename)[1])
liness = []
for sfn in filestosearch:
@@ -238,10 +245,11 @@ def process_automodsumm_generation(app):
f.write('\n')
for sfn, lines in zip(filestosearch, liness):
+ suffix = os.path.splitext(sfn)[1]
if len(lines) > 0:
generate_automodsumm_docs(lines, sfn, builder=app.builder,
warn=app.warn, info=app.info,
- suffix=app.config.source_suffix,
+ suffix=suffix,
base_path=app.srcdir)
#_automodsummrex = re.compile(r'^(\s*)\.\. automodsumm::\s*([A-Za-z0-9_.]+)\s*'
@@ -281,6 +289,7 @@ def automodsumm_to_autosummary_lines(fn, app):
"""
+
fullfn = os.path.join(app.builder.env.srcdir, fn)
with open(fullfn) as fr:
@@ -288,7 +297,8 @@ def automodsumm_to_autosummary_lines(fn, app):
from astropy_helpers.sphinx.ext.automodapi import automodapi_replace
# Must do the automodapi on the source to get the automodsumm
# that might be in there
- filestr = automodapi_replace(fr.read(), app, True, fn, False)
+ docname = os.path.splitext(fn)[0]
+ filestr = automodapi_replace(fr.read(), app, True, docname, False)
else:
filestr = fr.read()
@@ -353,6 +363,9 @@ def automodsumm_to_autosummary_lines(fn, app):
continue
newlines.append(allindent + nm)
+ # add one newline at the end of the autosummary block
+ newlines.append('')
+
return newlines
diff --git a/astropy_helpers/astropy_helpers/sphinx/ext/tests/test_automodsumm.py b/astropy_helpers/astropy_helpers/sphinx/ext/tests/test_automodsumm.py
index cd8afa3..aec7039 100644
--- a/astropy_helpers/astropy_helpers/sphinx/ext/tests/test_automodsumm.py
+++ b/astropy_helpers/astropy_helpers/sphinx/ext/tests/test_automodsumm.py
@@ -66,7 +66,8 @@ ams_to_asmry_expected = """\
automodsumm_to_autosummary_lines
generate_automodsumm_docs
process_automodsumm_generation
- setup"""
+ setup
+"""
def test_ams_to_asmry(tmpdir):
@@ -97,7 +98,8 @@ ams_cython_expected = """\
.. autosummary::
:p:
- pilot"""
+ pilot
+"""
def test_ams_cython(tmpdir, cython_testpackage):
diff --git a/astropy_helpers/astropy_helpers/sphinx/ext/viewcode.py b/astropy_helpers/astropy_helpers/sphinx/ext/viewcode.py
index dc428a1..d9fdc61 100644
--- a/astropy_helpers/astropy_helpers/sphinx/ext/viewcode.py
+++ b/astropy_helpers/astropy_helpers/sphinx/ext/viewcode.py
@@ -16,6 +16,7 @@ from docutils import nodes
from sphinx import addnodes
from sphinx.locale import _
from sphinx.pycode import ModuleAnalyzer
+from sphinx.util.inspect import safe_getattr
from sphinx.util.nodes import make_refnode
import sys
@@ -51,12 +52,12 @@ def doctree_read(app, doctree):
value = module
for attr in attribute.split('.'):
if attr:
- value = getattr(value, attr)
+ value = safe_getattr(value, attr)
except AttributeError:
app.warn('Didn\'t find %s in %s' % (attribute, module.__name__))
return None
else:
- return getattr(value, '__module__', None)
+ return safe_getattr(value, '__module__', None)
def has_tag(modname, fullname, docname, refname):
diff --git a/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout.svg b/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout.svg
new file mode 100644
index 0000000..4832496
--- /dev/null
+++ b/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_linkout.svg
@@ -0,0 +1,75 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ version="1.1"
+ id="svg2"
+ height="167.52676"
+ width="771.12372">
+ <defs
+ id="defs4">
+ <linearGradient
+ id="linearGradient3767">
+ <stop
+ style="stop-color:#ffa900;stop-opacity:1;"
+ offset="0"
+ id="stop3800" />
+ <stop
+ style="stop-color:#fd0000;stop-opacity:1;"
+ offset="1"
+ id="stop3775" />
+ </linearGradient>
+ </defs>
+ <g
+ transform="translate(6.1070519,22.476169)"
+ id="layer1">
+ <g
+ id="text3047"
+ style="fill:#000000;fill-opacity:1;stroke:none">
+ <path
+ id="path4164"
+ style="fill:#ffffff"
+ d="m 5.5129443,73.773459 c 0,15.91 11.0999997,26.640001 27.0099997,26.640001 10.73,0 19.98,-5.365001 28.12,-12.210001 l 0.555,0 1.665,9.99 17.39,0 0,-53.835 c 0,-25.345 -11.1,-39.2200004 -33.855,-39.2200004 -14.43,0 -27.195,5.5500004 -37.1849997,11.8400004 l 7.7699997,14.245 c 7.955,-4.81 16.465,-8.88 25.53,-8.88 12.21,0 16.28,8.14 16.465,17.76 -37.185,4.07 -53.4649997,14.245 -53.4649997,33.67 z m 20.7199997,-1.665 c 0,-9.065 8.14,-15.54 32.745,-18.685 l 0,20.35 c -6.66,6.29 -12 [...]
+ <path
+ id="path4166"
+ style="fill:#ffffff"
+ d="m 97.168882,87.833459 c 8.879998,7.215 22.014998,12.580001 34.409998,12.580001 22.94,0 35.335,-12.580001 35.335,-28.120001 0,-17.02 -13.69,-22.94 -26.085,-27.565 -9.805,-3.515 -18.87,-6.29 -18.87,-13.505 0,-5.735 4.255,-9.99 13.32,-9.99 7.215,0 13.69,3.145 19.98,7.77 l 9.805,-12.95 c -7.4,-5.735 -17.575,-10.9150004 -30.155,-10.9150004 -20.35,0 -33.115,11.2850004 -33.115,27.1950004 0,15.17 13.505,22.015 25.53,26.455 9.62,3.7 19.425,7.03 19.425,14.615 0,6.29 -4.625,10.915 -14.4 [...]
+ <path
+ id="path4168"
+ style="fill:#ffffff"
+ d="m 189.2324,67.483459 c 0,19.61 7.77,32.930001 28.86,32.930001 7.215,0 13.505,-1.665001 18.5,-3.330001 l -3.7,-15.725 c -2.59,1.11 -6.29,2.035 -9.435,2.035 -8.695,0 -12.765,-5.18 -12.765,-15.91 l 0,-43.29 23.125,0 0,-16.8350004 -23.125,0 0,-24.7899996 -17.76,0 -2.59,24.7899996 -14.06,0.925 0,15.9100004 12.95,0 0,43.29 z" />
+ <path
+ id="path4170"
+ style="fill:#ffffff"
+ d="m 252.9331,98.193459 21.275,0 0,-55.5 c 5.55,-13.69 14.245,-18.685 21.46,-18.685 3.885,0 6.29,0.555 9.435,1.48 l 3.7,-18.5000004 c -2.775,-1.295 -5.735,-1.85 -10.36,-1.85 -9.62,0 -19.24,6.4750004 -25.715,18.3150004 l -0.74,0 -1.48,-16.0950004 -17.575,0 0,90.8350004 z" />
+ <path
+ id="path4172"
+ style="fill:#ffffff"
+ d="m 314.22013,52.868459 c 0,30.155 20.535,47.545001 43.105,47.545001 22.755,0 43.29,-17.390001 43.29,-47.545001 0,-30.34 -20.535,-47.7300004 -43.29,-47.7300004 -22.57,0 -43.105,17.3900004 -43.105,47.7300004 z m 21.83,0 c 0,-18.315 7.955,-30.34 21.275,-30.34 13.32,0 21.46,12.025 21.46,30.34 0,18.13 -8.14,30.155 -21.46,30.155 -13.32,0 -21.275,-12.025 -21.275,-30.155 z" />
+ <path
+ id="path4174"
+ style="fill:#ff5000;fill-opacity:1"
+ d="m 433.32833,89.498459 c 9.25,6.845 18.315,10.915001 26.64,10.915001 20.905,0 39.22,-17.760001 39.22,-47.915001 0,-27.565 -11.84,-45.3250004 -35.52,-45.3250004 -10.915,0 -21.46,6.4750004 -30.155,13.3200004 l -0.555,0 -0.74,-11.1000004 -7.03,0 0,128.9450014 8.14,0 0,-32.375 0,-16.465001 z m 0,-7.77 0,-52.725 c 10.73,-9.435 20.165,-14.615 29.045,-14.615 20.35,0 28.12,16.28 28.12,38.11 0,24.05 -12.95,40.7 -30.895,40.7 -6.845,0 -16.095,-3.145 -26.27,-11.47 z" />
+ <path
+ id="path4176"
+ style="fill:#ff5000;fill-opacity:1"
+ d="m 515.26888,129.64346 -1.85,7.215 c 2.405,1.11 5.735,1.85 9.065,1.85 15.91,0 24.79,-14.615 29.785,-29.415 l 35.15,-99.9000014 -8.325,0 -19.055,55.8700004 c -2.405,7.4 -5.365,17.02 -8.14,24.79 l -0.74,0 c -3.33,-7.77 -6.845,-17.39 -9.62,-24.79 l -21.46,-55.8700004 -8.88,0 36.445,90.4650004 -2.405,7.770001 c -4.44,13.505 -11.84,23.495 -22.755,23.495 -2.59,0 -5.365,-0.74 -7.215,-1.48 z" />
+ </g>
+ <path
+ id="path3796"
+ d="m 699.71522,8.7813958 c -71.7644,0 -58.16693,0 -58.16693,0 l 0.18886,86.0270502 0,0 89.32779,0 0.37769,-47.195398"
+ style="fill:none;stroke:#ffffff;stroke-width:7.42324734;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ <path
+ id="path3798"
+ d="M 690.16624,51.085891 C 750.77539,-8.008031 750.77539,-8.008031 750.77539,-8.008031"
+ style="fill:none;stroke:#ffffff;stroke-width:10.39999962;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ <path
+ id="path3800"
+ d="M 760.01669,-15.838298 717.15954,-2.7141323 744.40571,24.161702 z"
+ style="fill:#ffffff;fill-opacity:1;stroke:none" />
+ </g>
+</svg>
diff --git a/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.ico b/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.ico
index c2bca3a..16d5af7 100644
Binary files a/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.ico and b/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.ico differ
diff --git a/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.svg b/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.svg
new file mode 100644
index 0000000..1d7e101
--- /dev/null
+++ b/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/astropy_logo.svg
@@ -0,0 +1,87 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ version="1.1"
+ id="svg2"
+ height="220"
+ width="220">
+ <defs
+ id="defs4">
+ <linearGradient
+ id="linearGradient3767">
+ <stop
+ style="stop-color:#ffa900;stop-opacity:1;"
+ offset="0"
+ id="stop3800" />
+ <stop
+ style="stop-color:#fd0000;stop-opacity:1;"
+ offset="1"
+ id="stop3775" />
+ </linearGradient>
+ <linearGradient
+ spreadMethod="repeat"
+ gradientUnits="userSpaceOnUse"
+ y2="298.258"
+ x2="83.651718"
+ y1="163.12114"
+ x1="195.5511"
+ id="linearGradient3798"
+ xlink:href="#linearGradient3767" />
+ </defs>
+ <g
+ transform="translate(2.5547457,54.766869)"
+ id="layer1">
+ <path
+ clip-path="none"
+ mask="none"
+ transform="matrix(1.0651076,-0.90408718,0.89013474,1.0731629,-250.16148,-67.548145)"
+ d="m 221.23922,233.12846 a 80.322945,68.372307 0 0 1 -80.32294,68.37231 80.322945,68.372307 0 0 1 -80.32295,-68.37231 80.322945,68.372307 0 0 1 80.32295,-68.3723 80.322945,68.372307 0 0 1 80.32294,68.3723 z"
+ id="path3837"
+ style="fill:url(#linearGradient3798);fill-opacity:1;fill-rule:nonzero;stroke:none" />
+ <path
+ transform="matrix(-0.71212781,-0.99456325,-0.9777811,0.72852659,474.31246,145.67479)"
+ d="m 238.396,198.78328 c 32.61949,7.06756 15.40308,37.20961 -8.24653,38.06088 -26.00022,0.93589 -44.08994,-24.61831 -41.97511,-48.94224 2.67817,-30.80329 32.53302,-51.80129 62.38812,-48.26147"
+ id="path5413"
+ style="fill:none;stroke:#ffffff;stroke-width:14.33045673;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ <path
+ transform="matrix(1.0512298,0.82294962,-0.97239782,0.94168108,304.90482,-361.44933)"
+ d="m 144.4518,337.67926 a 22.223356,12.626906 0 0 1 -22.22335,12.62691 22.223356,12.626906 0 0 1 -22.22336,-12.62691 22.223356,12.626906 0 0 1 22.22336,-12.62691 22.223356,12.626906 0 0 1 22.22335,12.62691 z"
+ id="path3819"
+ style="fill:#ffffff;fill-opacity:1;stroke:none" />
+ <path
+ id="path3853"
+ d="m 136.88413,-13.10567 13.09858,22.1959915 20.1541,-20.0243955 -25.57713,-10.690029 z"
+ style="fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:2.36031604;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ <g
+ transform="matrix(0.95039189,0,0,0.93299787,-2.487134,-0.56516029)"
+ id="g3804">
+ <path
+ style="fill:none;stroke:#ffffff;stroke-width:3.72635746;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ d="m 142.07904,-24.83606 -2.29208,-13.70422 0.18559,0.452199 0,0"
+ id="path3855-7" />
+ <path
+ style="fill:none;stroke:#ffffff;stroke-width:4.02542162;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ d="m 127.81295,-26.11257 15.12343,0.663566 -0.4755,0.06597 0,0"
+ id="path3855-7-0" />
+ <path
+ style="fill:none;stroke:#ffffff;stroke-width:4.06302309;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ d="m 150.77669,-18.240009 -9.57586,-7.57806 0.46343,0.302121 0,0"
+ id="path3855-7-05" />
+ </g>
+ <path
+ id="path3942"
+ d="m 67.476992,131.73602 c 3.410653,2.96251 14.804165,7.71706 18.766169,12.74152 4.041356,5.12504 3.407665,10.98393 3.371624,14.32634"
+ style="fill:none;stroke:#ffffff;stroke-width:17.45736313;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ <path
+ transform="matrix(0.95949998,0.82835015,0.79893463,-0.95307413,-285.3304,56.888018)"
+ d="m 238.396,198.78328 c 32.61946,7.06755 15.40307,37.20957 -8.24652,38.06085 -26.0002,0.93588 -44.0899,-24.61829 -41.97507,-48.9422 2.70006,-31.05511 32.97695,-52.07076 63.01891,-48.18321 1.11735,0.14459 2.2306,0.32088 3.33799,0.52843"
+ id="path2987-5"
+ style="fill:none;stroke:#ffffff;stroke-width:13.93870544;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ </g>
+</svg>
diff --git a/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.css b/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.css
index ba7fded..360b3da 100644
--- a/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.css
+++ b/astropy_helpers/astropy_helpers/sphinx/themes/bootstrap-astropy/static/bootstrap-astropy.css
@@ -241,6 +241,8 @@ div.topbar a.brand {
padding: 8px 12px 0px 45px;
margin-left: -10px;
background: transparent url("astropy_logo_32.png") no-repeat 10px 4px;
+ background-image: url("astropy_logo.svg"), none;
+ background-size: 32px 32px;
}
#logotext1 {
@@ -300,6 +302,8 @@ div.topbar ul li a.homelink {
height: 20px;
padding: 5px 0px;
background: transparent url("astropy_linkout_20.png") no-repeat 10px 5px;
+ background-image: url("astropy_linkout.svg"), none;
+ background-size: 91px 20px;
}
div.topbar form {
diff --git a/astropy_helpers/astropy_helpers/utils.py b/astropy_helpers/astropy_helpers/utils.py
index bf5bc2b..9c05341 100644
--- a/astropy_helpers/astropy_helpers/utils.py
+++ b/astropy_helpers/astropy_helpers/utils.py
@@ -592,3 +592,70 @@ def deprecated_attribute(name, since, message=None, alternative=None,
delattr(self, private_name)
return property(get, set, delete)
+
+
+def minversion(module, version, inclusive=True, version_path='__version__'):
+ """
+ Returns `True` if the specified Python module satisfies a minimum version
+ requirement, and `False` if not.
+
+ By default this uses `pkg_resources.parse_version` to do the version
+ comparison if available. Otherwise it falls back on
+ `distutils.version.LooseVersion`.
+
+ Parameters
+ ----------
+
+ module : module or `str`
+ An imported module of which to check the version, or the name of
+ that module (in which case an import of that module is attempted--
+ if this fails `False` is returned).
+
+ version : `str`
+ The version as a string that this module must have at a minimum (e.g.
+ ``'0.12'``).
+
+ inclusive : `bool`
+ The specified version meets the requirement inclusively (i.e. ``>=``)
+ as opposed to strictly greater than (default: `True`).
+
+ version_path : `str`
+ A dotted attribute path to follow in the module for the version.
+ Defaults to just ``'__version__'``, which should work for most Python
+ modules.
+
+ Examples
+ --------
+
+ >>> import astropy
+ >>> minversion(astropy, '0.4.4')
+ True
+ """
+
+ if isinstance(module, types.ModuleType):
+ module_name = module.__name__
+ elif isinstance(module, six.string_types):
+ module_name = module
+ try:
+ module = resolve_name(module_name)
+ except ImportError:
+ return False
+ else:
+ raise ValueError('module argument must be an actual imported '
+ 'module, or the import name of the module; '
+ 'got {0!r}'.format(module))
+
+ if '.' not in version_path:
+ have_version = getattr(module, version_path)
+ else:
+ have_version = resolve_name('.'.join([module.__name__, version_path]))
+
+ try:
+ from pkg_resources import parse_version
+ except ImportError:
+ from distutils.version import LooseVersion as parse_version
+
+ if inclusive:
+ return parse_version(have_version) >= parse_version(version)
+ else:
+ return parse_version(have_version) > parse_version(version)
diff --git a/astropy_helpers/astropy_helpers/version.py b/astropy_helpers/astropy_helpers/version.py
index 1d37518..07a6a46 100644
--- a/astropy_helpers/astropy_helpers/version.py
+++ b/astropy_helpers/astropy_helpers/version.py
@@ -1,17 +1,17 @@
-# Autogenerated by Astropy-affiliated package astropy_helpers's setup.py on 2015-03-06 18:40:03.550234
+# Autogenerated by Astropy-affiliated package astropy_helpers's setup.py on 2015-04-16 11:25:55.599810
from __future__ import unicode_literals
import datetime
-version = "1.0.1"
-githash = "483a89fe8d8bbacd52e5831e3897680ff9e23966"
+version = "1.0.2"
+githash = "161773fa72d916c498e0a2a513ecc24460244ac8"
major = 1
minor = 0
-bugfix = 1
+bugfix = 2
release = True
-timestamp = datetime.datetime(2015, 3, 6, 18, 40, 3, 550234)
+timestamp = datetime.datetime(2015, 4, 16, 11, 25, 55, 599810)
debug = False
try:
diff --git a/astropy_helpers/continuous-integration/travis/install_graphviz_osx.sh b/astropy_helpers/continuous-integration/travis/install_graphviz_osx.sh
index 6144d14..e52a8a9 100755
--- a/astropy_helpers/continuous-integration/travis/install_graphviz_osx.sh
+++ b/astropy_helpers/continuous-integration/travis/install_graphviz_osx.sh
@@ -1,4 +1,4 @@
#!/bin/bash
-sudo brew update
-sudo brew install graphviz
\ No newline at end of file
+brew update
+brew install graphviz
diff --git a/astropy_helpers/setup.py b/astropy_helpers/setup.py
index 11e5649..ae82286 100755
--- a/astropy_helpers/setup.py
+++ b/astropy_helpers/setup.py
@@ -8,7 +8,7 @@ from astropy_helpers.setup_helpers import register_commands, get_package_info
from astropy_helpers.version_helpers import generate_version_py
NAME = 'astropy_helpers'
-VERSION = '1.0.1'
+VERSION = '1.0.2'
RELEASE = 'dev' not in VERSION
DOWNLOAD_BASE_URL = 'http://pypi.python.org/packages/source/a/astropy-helpers'
diff --git a/docs/_static/astropy_banner.svg b/docs/_static/astropy_banner.svg
new file mode 100644
index 0000000..a52c65e
--- /dev/null
+++ b/docs/_static/astropy_banner.svg
@@ -0,0 +1,263 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ version="1.1"
+ id="svg2"
+ height="220"
+ width="1200">
+ <defs
+ id="defs4">
+ <linearGradient
+ id="linearGradient3767">
+ <stop
+ style="stop-color:#ffa900;stop-opacity:1;"
+ offset="0"
+ id="stop3800" />
+ <stop
+ style="stop-color:#fd0000;stop-opacity:1;"
+ offset="1"
+ id="stop3775" />
+ </linearGradient>
+ <linearGradient
+ spreadMethod="repeat"
+ gradientUnits="userSpaceOnUse"
+ y2="298.258"
+ x2="83.651718"
+ y1="163.12114"
+ x1="195.5511"
+ id="linearGradient3798"
+ xlink:href="#linearGradient3767" />
+ </defs>
+ <g
+ transform="translate(2.5547457,54.766869)"
+ id="layer1">
+ <g
+ id="text3084"
+ style="line-height:125%">
+ <path
+ id="path4169"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 228.57097,43.364565 c 0,14.62 10.2,24.48 24.82,24.48 9.86,0 18.36,-4.93 25.84,-11.22 l 0.51,0 1.53,9.18 15.98,0 0,-49.47 c 0,-23.2899996 -10.2,-36.04 -31.11,-36.04 -13.26,0 -24.99,5.1 -34.17,10.8800004 l 7.14,13.09 c 7.31,-4.41999997 15.13,-8.16 23.46,-8.16 11.22,0 14.96,7.48 15.13,16.3199996 -34.17,3.74 -49.13,13.09 -49.13,30.94 z m 19.04,-1.53 c 0,-8.33 7.48,-14.28 30.09,-17.17 l 0,18.7 c -6.12,5.78 -11.39,9.18 -18.19,9.18 -6.97,0 -11.9,-3.23 -11.9,-10.71 z" />
+ <path
+ id="path4171"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 312.79535,56.284565 c 8.16,6.63 20.23,11.56 31.62,11.56 21.08,0 32.47,-11.56 32.47,-25.84 0,-15.64 -12.58,-21.08 -23.97,-25.33 -9.01,-3.23 -17.34,-5.78 -17.34,-12.4099996 0,-5.27 3.91,-9.18 12.24,-9.18 6.63,0 12.58,2.89 18.36,7.14 l 9.01,-11.9 c -6.8,-5.2700004 -16.15,-10.0300004 -27.71,-10.0300004 -18.7,0 -30.43,10.3700004 -30.43,24.9900004 0,13.9399996 12.41,20.2299996 23.46,24.3099996 8.84,3.4 17.85,6.46 17.85,13.43 0,5.78 -4.25,10.03 -13.26,10.03 -8.5,0 -15.64,-3.57 -23 [...]
+ <path
+ id="path4173"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 397.39425,37.584565 c 0,18.02 7.14,30.26 26.52,30.26 6.63,0 12.41,-1.53 17,-3.06 l -3.4,-14.45 c -2.38,1.02 -5.78,1.87 -8.67,1.87 -7.99,0 -11.73,-4.76 -11.73,-14.62 l 0,-39.7799996 21.25,0 0,-15.4700004 -21.25,0 0,-22.78 -16.32,0 -2.38,22.78 -12.92,0.85 0,14.6200004 11.9,0 0,39.7799996 z" />
+ <path
+ id="path4175"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 455.93003,65.804565 19.55,0 0,-51 c 5.1,-12.5799996 13.09,-17.1699996 19.72,-17.1699996 3.57,0 5.78,0.51 8.67,1.36 l 3.4,-17.0000004 c -2.55,-1.19 -5.27,-1.7 -9.52,-1.7 -8.84,0 -17.68,5.95 -23.63,16.8300004 l -0.68,0 -1.36,-14.7900004 -16.15,0 0,83.47 z" />
+ <path
+ id="path4177"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 512.24785,24.154565 c 0,27.71 18.87,43.69 39.61,43.69 20.91,0 39.78,-15.98 39.78,-43.69 0,-27.8799996 -18.87,-43.86 -39.78,-43.86 -20.74,0 -39.61,15.9800004 -39.61,43.86 z m 20.06,0 c 0,-16.8299996 7.31,-27.8799996 19.55,-27.8799996 12.24,0 19.72,11.05 19.72,27.8799996 0,16.66 -7.48,27.71 -19.72,27.71 -12.24,0 -19.55,-11.05 -19.55,-27.71 z" />
+ <path
+ id="path4179"
+ style="fill:#ff5000;fill-opacity:1"
+ d="m 621.69863,57.814565 c 8.5,6.29 16.83,10.03 24.48,10.03 19.21,0 36.04,-16.32 36.04,-44.03 0,-25.3299996 -10.88,-41.65 -32.64,-41.65 -10.03,0 -19.72,5.95 -27.71,12.2400004 l -0.51,0 -0.68,-10.2000004 -6.46,0 0,118.490005 7.48,0 0,-29.750005 0,-15.13 z m 0,-7.14 0,-48.4499996 c 9.86,-8.67 18.53,-13.4300004 26.69,-13.4300004 18.7,0 25.84,14.9600004 25.84,35.02 0,22.1 -11.9,37.4 -28.39,37.4 -6.29,0 -14.79,-2.89 -24.14,-10.54 z" />
+ <path
+ id="path4181"
+ style="fill:#ff5000;fill-opacity:1"
+ d="m 696.99535,94.704565 -1.7,6.630005 c 2.21,1.02 5.27,1.7 8.33,1.7 14.62,0 22.78,-13.430005 27.37,-27.030005 l 32.3,-91.8 -7.65,0 -17.51,51.34 c -2.21,6.8 -4.93,15.64 -7.48,22.78 l -0.68,0 c -3.06,-7.14 -6.29,-15.98 -8.84,-22.78 l -19.72,-51.34 -8.16,0 33.49,83.13 -2.21,7.14 c -4.08,12.41 -10.88,21.59 -20.91,21.59 -2.38,0 -4.93,-0.68 -6.63,-1.36 z" />
+ </g>
+ <g
+ id="text3858"
+ style="line-height:125%">
+ <path
+ id="path4184"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 238.5584,128.98857 1.488,-4.8 c 1.152,-3.504 2.16,-6.96 3.12,-10.608 l 0.192,0 c 1.008,3.648 2.016,7.104 3.168,10.608 l 1.488,4.8 -9.456,0 z m 13.44,12.816 4.272,0 -10.656,-31.488 -4.512,0 -10.656,31.488 4.08,0 3.024,-9.6 11.472,0 2.976,9.6 z" />
+ <path
+ id="path4186"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 268.6109,126.06057 c 0,10.272 5.856,16.32 13.776,16.32 4.032,0 7.104,-1.68 9.6,-4.56 l -2.256,-2.448 c -2.016,2.256 -4.224,3.504 -7.248,3.504 -5.952,0 -9.744,-4.992 -9.744,-12.912 0,-7.824 3.936,-12.72 9.936,-12.72 2.64,0 4.656,1.152 6.288,2.928 l 2.256,-2.544 c -1.824,-2.064 -4.8,-3.888 -8.592,-3.888 -8.064,0 -14.016,6.192 -14.016,16.32 z" />
+ <path
+ id="path4188"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 295.6979,130.18857 c 0,7.728 5.088,12.192 10.8,12.192 5.712,0 10.8,-4.464 10.8,-12.192 0,-7.824 -5.088,-12.288 -10.8,-12.288 -5.712,0 -10.8,4.464 -10.8,12.288 z m 4.08,0 c 0,-5.376 2.736,-9.024 6.72,-9.024 4.032,0 6.72,3.648 6.72,9.024 0,5.328 -2.688,8.928 -6.72,8.928 -3.984,0 -6.72,-3.6 -6.72,-8.928 z" />
+ <path
+ id="path4190"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 323.4884,141.80457 3.936,0 0,-16.896 c 2.16,-2.4 4.128,-3.6 5.856,-3.6 2.976,0 4.368,1.872 4.368,6.24 l 0,14.256 3.936,0 0,-16.896 c 2.16,-2.4 4.032,-3.6 5.856,-3.6 2.928,0 4.32,1.872 4.32,6.24 l 0,14.256 3.936,0 0,-14.784 c 0,-5.952 -2.304,-9.12 -7.104,-9.12 -2.832,0 -5.232,1.824 -7.68,4.464 -0.96,-2.784 -2.88,-4.464 -6.432,-4.464 -2.784,0 -5.232,1.728 -7.248,3.936 l -0.144,0 -0.336,-3.36 -3.264,0 0,23.328 z" />
+ <path
+ id="path4192"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 363.2384,141.80457 3.936,0 0,-16.896 c 2.16,-2.4 4.128,-3.6 5.856,-3.6 2.976,0 4.368,1.872 4.368,6.24 l 0,14.256 3.936,0 0,-16.896 c 2.16,-2.4 4.032,-3.6 5.856,-3.6 2.928,0 4.32,1.872 4.32,6.24 l 0,14.256 3.936,0 0,-14.784 c 0,-5.952 -2.304,-9.12 -7.104,-9.12 -2.832,0 -5.232,1.824 -7.68,4.464 -0.96,-2.784 -2.88,-4.464 -6.432,-4.464 -2.784,0 -5.232,1.728 -7.248,3.936 l -0.144,0 -0.336,-3.36 -3.264,0 0,23.328 z" />
+ <path
+ id="path4194"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 402.6524,133.26057 c 0,5.952 2.208,9.12 7.152,9.12 3.216,0 5.52,-1.68 7.68,-4.224 l 0.144,0 0.336,3.648 3.264,0 0,-23.328 -3.936,0 0,16.56 c -2.208,2.736 -3.888,3.936 -6.288,3.936 -3.072,0 -4.368,-1.872 -4.368,-6.24 l 0,-14.256 -3.984,0 0,14.784 z" />
+ <path
+ id="path4196"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 429.14465,141.80457 3.936,0 0,-16.896 c 2.352,-2.352 3.984,-3.6 6.384,-3.6 3.072,0 4.416,1.872 4.416,6.24 l 0,14.256 3.936,0 0,-14.784 c 0,-5.952 -2.208,-9.12 -7.104,-9.12 -3.216,0 -5.616,1.728 -7.824,3.936 l -0.144,0 -0.336,-3.36 -3.264,0 0,23.328 z" />
+ <path
+ id="path4198"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 457.41065,113.67657 c 1.536,0 2.736,-1.056 2.736,-2.544 0,-1.536 -1.2,-2.544 -2.736,-2.544 -1.536,0 -2.736,1.008 -2.736,2.544 0,1.488 1.2,2.544 2.736,2.544 z m -2.016,28.128 3.936,0 0,-23.328 -3.936,0 0,23.328 z" />
+ <path
+ id="path4200"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 467.87915,134.60457 c 0,4.608 1.68,7.776 6.672,7.776 1.44,0 3.024,-0.432 4.32,-0.864 l -0.768,-2.976 c -0.768,0.288 -1.824,0.624 -2.64,0.624 -2.688,0 -3.6,-1.632 -3.6,-4.512 l 0,-12.96 6.288,0 0,-3.216 -6.288,0 0,-6.528 -3.312,0 -0.48,6.528 -3.648,0.24 0,2.976 3.456,0 0,12.912 z" />
+ <path
+ id="path4202"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 481.8419,148.23657 -0.768,3.12 c 0.768,0.288 1.68,0.48 2.736,0.48 4.704,0 7.104,-3.504 8.736,-8.112 l 8.784,-25.248 -3.84,0 -4.176,12.912 c -0.624,2.064 -1.296,4.464 -1.968,6.576 l -0.192,0 c -0.72,-2.16 -1.536,-4.56 -2.256,-6.576 l -4.752,-12.912 -4.08,0 9.36,23.376 -0.528,1.728 c -0.96,2.88 -2.64,4.992 -5.28,4.992 -0.576,0 -1.296,-0.192 -1.776,-0.336 z" />
+ <path
+ id="path4204"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 515.8724,141.80457 3.984,0 0,-12.48 5.664,0 c 6.912,0 11.616,-3.12 11.616,-9.744 0,-6.912 -4.656,-9.264 -11.616,-9.264 l -9.648,0 0,31.488 z m 3.984,-15.744 0,-12.528 5.184,0 c 5.424,0 8.112,1.488 8.112,6.048 0,4.464 -2.64,6.48 -8.112,6.48 l -5.184,0 z" />
+ <path
+ id="path4206"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 541.56065,148.23657 -0.768,3.12 c 0.768,0.288 1.68,0.48 2.736,0.48 4.704,0 7.104,-3.504 8.736,-8.112 l 8.784,-25.248 -3.84,0 -4.176,12.912 c -0.624,2.064 -1.296,4.464 -1.968,6.576 l -0.192,0 c -0.72,-2.16 -1.536,-4.56 -2.256,-6.576 l -4.752,-12.912 -4.08,0 9.36,23.376 -0.528,1.728 c -0.96,2.88 -2.64,4.992 -5.28,4.992 -0.576,0 -1.296,-0.192 -1.776,-0.336 z" />
+ <path
+ id="path4208"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 566.2229,134.60457 c 0,4.608 1.68,7.776 6.672,7.776 1.44,0 3.024,-0.432 4.32,-0.864 l -0.768,-2.976 c -0.768,0.288 -1.824,0.624 -2.64,0.624 -2.688,0 -3.6,-1.632 -3.6,-4.512 l 0,-12.96 6.288,0 0,-3.216 -6.288,0 0,-6.528 -3.312,0 -0.48,6.528 -3.648,0.24 0,2.976 3.456,0 0,12.912 z" />
+ <path
+ id="path4210"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 581.76965,141.80457 3.936,0 0,-16.896 c 2.352,-2.352 3.984,-3.6 6.384,-3.6 3.072,0 4.416,1.872 4.416,6.24 l 0,14.256 3.936,0 0,-14.784 c 0,-5.952 -2.208,-9.12 -7.104,-9.12 -3.216,0 -5.568,1.728 -7.776,3.84 l 0.144,-4.8 0,-9.312 -3.936,0 0,34.176 z" />
+ <path
+ id="path4212"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 606.1979,130.18857 c 0,7.728 5.088,12.192 10.8,12.192 5.712,0 10.8,-4.464 10.8,-12.192 0,-7.824 -5.088,-12.288 -10.8,-12.288 -5.712,0 -10.8,4.464 -10.8,12.288 z m 4.08,0 c 0,-5.376 2.736,-9.024 6.72,-9.024 4.032,0 6.72,3.648 6.72,9.024 0,5.328 -2.688,8.928 -6.72,8.928 -3.984,0 -6.72,-3.6 -6.72,-8.928 z" />
+ <path
+ id="path4214"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 633.9884,141.80457 3.936,0 0,-16.896 c 2.352,-2.352 3.984,-3.6 6.384,-3.6 3.072,0 4.416,1.872 4.416,6.24 l 0,14.256 3.936,0 0,-14.784 c 0,-5.952 -2.208,-9.12 -7.104,-9.12 -3.216,0 -5.616,1.728 -7.824,3.936 l -0.144,0 -0.336,-3.36 -3.264,0 0,23.328 z" />
+ <path
+ id="path4216"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 670.27865,141.80457 17.76,0 0,-3.408 -13.776,0 0,-28.08 -3.984,0 0,31.488 z" />
+ <path
+ id="path4218"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 695.2544,113.67657 c 1.536,0 2.736,-1.056 2.736,-2.544 0,-1.536 -1.2,-2.544 -2.736,-2.544 -1.536,0 -2.736,1.008 -2.736,2.544 0,1.488 1.2,2.544 2.736,2.544 z m -2.016,28.128 3.936,0 0,-23.328 -3.936,0 0,23.328 z" />
+ <path
+ id="path4220"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 705.0509,141.80457 3.168,0 0.336,-2.304 0.144,0 c 2.064,1.824 4.512,2.88 6.672,2.88 5.328,0 10.08,-4.656 10.08,-12.624 0,-7.2 -3.216,-11.856 -9.264,-11.856 -2.592,0 -5.184,1.392 -7.296,3.264 l 0.096,-4.224 0,-9.312 -3.936,0 0,34.176 z m 3.936,-5.184 0,-12.24 c 2.208,-2.064 4.272,-3.168 6.24,-3.168 4.416,0 6.144,3.456 6.144,8.592 0,5.76 -2.832,9.264 -6.672,9.264 -1.536,0 -3.648,-0.672 -5.712,-2.448 z" />
+ <path
+ id="path4222"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 731.6759,141.80457 3.936,0 0,-14.976 c 1.584,-3.936 3.936,-5.376 5.904,-5.376 0.96,0 1.488,0.144 2.256,0.384 l 0.768,-3.456 c -0.768,-0.336 -1.488,-0.48 -2.496,-0.48 -2.64,0 -5.04,1.872 -6.624,4.8 l -0.144,0 -0.336,-4.224 -3.264,0 0,23.328 z" />
+ <path
+ id="path4224"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 745.99265,135.75657 c 0,4.176 2.928,6.624 6.864,6.624 2.88,0 5.472,-1.536 7.728,-3.36 l 0.096,0 0.336,2.784 3.264,0 0,-14.304 c 0,-5.808 -2.4,-9.6 -8.112,-9.6 -3.744,0 -7.008,1.632 -9.168,3.024 l 1.584,2.736 c 1.824,-1.248 4.224,-2.496 6.96,-2.496 3.792,0 4.8,2.88 4.8,5.856 -9.936,1.104 -14.352,3.648 -14.352,8.736 z m 3.888,-0.288 c 0,-2.976 2.64,-4.896 10.464,-5.856 l 0,6.48 c -2.256,2.016 -4.128,3.12 -6.384,3.12 -2.304,0 -4.08,-1.104 -4.08,-3.744 z" />
+ <path
+ id="path4226"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 771.70715,141.80457 3.936,0 0,-14.976 c 1.584,-3.936 3.936,-5.376 5.904,-5.376 0.96,0 1.488,0.144 2.256,0.384 l 0.768,-3.456 c -0.768,-0.336 -1.488,-0.48 -2.496,-0.48 -2.64,0 -5.04,1.872 -6.624,4.8 l -0.144,0 -0.336,-4.224 -3.264,0 0,23.328 z" />
+ <path
+ id="path4228"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 787.74815,148.23657 -0.768,3.12 c 0.768,0.288 1.68,0.48 2.736,0.48 4.704,0 7.104,-3.504 8.736,-8.112 l 8.784,-25.248 -3.84,0 -4.176,12.912 c -0.624,2.064 -1.296,4.464 -1.968,6.576 l -0.192,0 c -0.72,-2.16 -1.536,-4.56 -2.256,-6.576 l -4.752,-12.912 -4.08,0 9.36,23.376 -0.528,1.728 c -0.96,2.88 -2.64,4.992 -5.28,4.992 -0.576,0 -1.296,-0.192 -1.776,-0.336 z" />
+ <path
+ id="path4230"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 832.77065,107.82057 c -1.056,-0.432 -2.448,-0.768 -3.888,-0.768 -4.656,0 -6.816,2.976 -6.816,7.728 l 0,3.696 -3.168,0.24 0,2.976 3.168,0 0,20.112 3.936,0 0,-20.112 4.944,0 0,-3.216 -4.944,0 0,-3.696 c 0,-2.976 1.056,-4.512 3.264,-4.512 0.864,0 1.776,0.192 2.64,0.576 l 0.864,-3.024 z" />
+ <path
+ id="path4232"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 833.2604,130.18857 c 0,7.728 5.088,12.192 10.8,12.192 5.712,0 10.8,-4.464 10.8,-12.192 0,-7.824 -5.088,-12.288 -10.8,-12.288 -5.712,0 -10.8,4.464 -10.8,12.288 z m 4.08,0 c 0,-5.376 2.736,-9.024 6.72,-9.024 4.032,0 6.72,3.648 6.72,9.024 0,5.328 -2.688,8.928 -6.72,8.928 -3.984,0 -6.72,-3.6 -6.72,-8.928 z" />
+ <path
+ id="path4234"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 861.0509,141.80457 3.936,0 0,-14.976 c 1.584,-3.936 3.936,-5.376 5.904,-5.376 0.96,0 1.488,0.144 2.256,0.384 l 0.768,-3.456 c -0.768,-0.336 -1.488,-0.48 -2.496,-0.48 -2.64,0 -5.04,1.872 -6.624,4.8 l -0.144,0 -0.336,-4.224 -3.264,0 0,23.328 z" />
+ <path
+ id="path4236"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 891.71465,128.98857 1.488,-4.8 c 1.152,-3.504 2.16,-6.96 3.12,-10.608 l 0.192,0 c 1.008,3.648 2.016,7.104 3.168,10.608 l 1.488,4.8 -9.456,0 z m 13.44,12.816 4.272,0 -10.656,-31.488 -4.512,0 -10.656,31.488 4.08,0 3.024,-9.6 11.472,0 2.976,9.6 z" />
+ <path
+ id="path4238"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 910.9589,139.16457 c 2.208,1.824 5.376,3.216 8.688,3.216 5.52,0 8.544,-3.168 8.544,-6.96 0,-4.416 -3.744,-5.808 -7.104,-7.056 -2.64,-1.008 -5.184,-1.824 -5.184,-4.032 0,-1.776 1.344,-3.36 4.272,-3.36 2.064,0 3.744,0.864 5.328,2.064 l 1.872,-2.496 c -1.776,-1.44 -4.32,-2.64 -7.248,-2.64 -5.04,0 -8.016,2.88 -8.016,6.624 0,3.936 3.648,5.52 6.96,6.72 2.544,0.96 5.328,2.016 5.328,4.416 0,2.016 -1.536,3.648 -4.608,3.648 -2.784,0 -4.848,-1.152 -6.864,-2.784 l -1.968,2.64 z" />
+ <path
+ id="path4240"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 934.37915,134.60457 c 0,4.608 1.68,7.776 6.672,7.776 1.44,0 3.024,-0.432 4.32,-0.864 l -0.768,-2.976 c -0.768,0.288 -1.824,0.624 -2.64,0.624 -2.688,0 -3.6,-1.632 -3.6,-4.512 l 0,-12.96 6.288,0 0,-3.216 -6.288,0 0,-6.528 -3.312,0 -0.48,6.528 -3.648,0.24 0,2.976 3.456,0 0,12.912 z" />
+ <path
+ id="path4242"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 949.9259,141.80457 3.936,0 0,-14.976 c 1.584,-3.936 3.936,-5.376 5.904,-5.376 0.96,0 1.488,0.144 2.256,0.384 l 0.768,-3.456 c -0.768,-0.336 -1.488,-0.48 -2.496,-0.48 -2.64,0 -5.04,1.872 -6.624,4.8 l -0.144,0 -0.336,-4.224 -3.264,0 0,23.328 z" />
+ <path
+ id="path4244"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 964.41665,130.18857 c 0,7.728 5.088,12.192 10.8,12.192 5.712,0 10.8,-4.464 10.8,-12.192 0,-7.824 -5.088,-12.288 -10.8,-12.288 -5.712,0 -10.8,4.464 -10.8,12.288 z m 4.08,0 c 0,-5.376 2.736,-9.024 6.72,-9.024 4.032,0 6.72,3.648 6.72,9.024 0,5.328 -2.688,8.928 -6.72,8.928 -3.984,0 -6.72,-3.6 -6.72,-8.928 z" />
+ <path
+ id="path4246"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 992.20715,141.80457 3.936,0 0,-16.896 c 2.352,-2.352 3.98395,-3.6 6.38395,-3.6 3.072,0 4.416,1.872 4.416,6.24 l 0,14.256 3.936,0 0,-14.784 c 0,-5.952 -2.208,-9.12 -7.104,-9.12 -3.216,0 -5.61595,1.728 -7.82395,3.936 l -0.144,0 -0.336,-3.36 -3.264,0 0,23.328 z" />
+ <path
+ id="path4248"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 1016.7291,130.18857 c 0,7.728 5.088,12.192 10.8,12.192 5.712,0 10.8,-4.464 10.8,-12.192 0,-7.824 -5.088,-12.288 -10.8,-12.288 -5.712,0 -10.8,4.464 -10.8,12.288 z m 4.08,0 c 0,-5.376 2.736,-9.024 6.72,-9.024 4.032,0 6.72,3.648 6.72,9.024 0,5.328 -2.688,8.928 -6.72,8.928 -3.984,0 -6.72,-3.6 -6.72,-8.928 z" />
+ <path
+ id="path4250"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 1044.5196,141.80457 3.936,0 0,-16.896 c 2.16,-2.4 4.128,-3.6 5.856,-3.6 2.976,0 4.368,1.872 4.368,6.24 l 0,14.256 3.936,0 0,-16.896 c 2.16,-2.4 4.032,-3.6 5.856,-3.6 2.928,0 4.32,1.872 4.32,6.24 l 0,14.256 3.936,0 0,-14.784 c 0,-5.952 -2.304,-9.12 -7.104,-9.12 -2.832,0 -5.232,1.824 -7.68,4.464 -0.96,-2.784 -2.88,-4.464 -6.432,-4.464 -2.784,0 -5.232,1.728 -7.248,3.936 l -0.144,0 -0.336,-3.36 -3.264,0 0,23.328 z" />
+ <path
+ id="path4252"
+ style="fill:#4d4d4d;fill-opacity:1"
+ d="m 1082.6856,148.23657 -0.768,3.12 c 0.768,0.288 1.68,0.48 2.736,0.48 4.704,0 7.104,-3.504 8.736,-8.112 l 8.784,-25.248 -3.84,0 -4.176,12.912 c -0.624,2.064 -1.296,4.464 -1.968,6.576 l -0.192,0 c -0.72,-2.16 -1.536,-4.56 -2.256,-6.576 l -4.752,-12.912 -4.08,0 9.36,23.376 -0.528,1.728 c -0.96,2.88 -2.64,4.992 -5.28,4.992 -0.576,0 -1.296,-0.192 -1.776,-0.336 z" />
+ </g>
+ <path
+ clip-path="none"
+ mask="none"
+ transform="matrix(1.0651076,-0.90408718,0.89013474,1.0731629,-250.16148,-67.548145)"
+ d="m 221.23922,233.12846 a 80.322945,68.372307 0 0 1 -80.32294,68.37231 80.322945,68.372307 0 0 1 -80.32295,-68.37231 80.322945,68.372307 0 0 1 80.32295,-68.3723 80.322945,68.372307 0 0 1 80.32294,68.3723 z"
+ id="path3837"
+ style="fill:url(#linearGradient3798);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-opacity:1" />
+ <path
+ transform="matrix(-0.71212781,-0.99456325,-0.9777811,0.72852659,474.31246,145.67479)"
+ d="m 238.396,198.78328 c 32.61949,7.06756 15.40308,37.20961 -8.24653,38.06088 -26.00022,0.93589 -44.08994,-24.61831 -41.97511,-48.94224 2.67817,-30.80329 32.53302,-51.80129 62.38812,-48.26147"
+ id="path5413"
+ style="fill:none;stroke:#ffffff;stroke-width:14.33045673;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ <path
+ transform="matrix(1.0512298,0.82294962,-0.97239782,0.94168108,304.90482,-361.44933)"
+ d="m 144.4518,337.67926 a 22.223356,12.626906 0 0 1 -22.22335,12.62691 22.223356,12.626906 0 0 1 -22.22336,-12.62691 22.223356,12.626906 0 0 1 22.22336,-12.62691 22.223356,12.626906 0 0 1 22.22335,12.62691 z"
+ id="path3819"
+ style="fill:#ffffff;fill-opacity:1;stroke:none" />
+ <path
+ id="path3853"
+ d="m 136.88413,-13.10567 13.09858,22.1959915 20.1541,-20.0243955 -25.57713,-10.690029 z"
+ style="fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:2.36031604;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ <g
+ transform="matrix(0.95039189,0,0,0.93299787,-2.487134,-0.56516029)"
+ id="g3804">
+ <path
+ style="fill:none;stroke:#ffffff;stroke-width:3.72635746;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ d="m 142.07904,-24.83606 -2.29208,-13.70422 0.18559,0.452199 0,0"
+ id="path3855-7" />
+ <path
+ style="fill:none;stroke:#ffffff;stroke-width:4.02542162;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ d="m 127.81295,-26.11257 15.12343,0.663566 -0.4755,0.06597 0,0"
+ id="path3855-7-0" />
+ <path
+ style="fill:none;stroke:#ffffff;stroke-width:4.06302309;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ d="m 150.77669,-18.240009 -9.57586,-7.57806 0.46343,0.302121 0,0"
+ id="path3855-7-05" />
+ </g>
+ <path
+ id="path3942"
+ d="m 67.476992,131.73602 c 3.410653,2.96251 14.804165,7.71706 18.766169,12.74152 4.041356,5.12504 3.407665,10.98393 3.371624,14.32634"
+ style="fill:none;stroke:#ffffff;stroke-width:17.45736313;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ <path
+ transform="matrix(0.95949998,0.82835015,0.79893463,-0.95307413,-285.3304,56.888018)"
+ d="m 238.396,198.78328 c 32.61946,7.06755 15.40307,37.20957 -8.24652,38.06085 -26.0002,0.93588 -44.0899,-24.61829 -41.97507,-48.9422 2.70006,-31.05511 32.97695,-52.07076 63.01891,-48.18321 1.11735,0.14459 2.2306,0.32088 3.33799,0.52843"
+ id="path2987-5"
+ style="fill:none;stroke:#ffffff;stroke-width:13.93870544;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none" />
+ </g>
+</svg>
diff --git a/docs/astropy_banner_96.png b/docs/_static/astropy_banner_96.png
similarity index 100%
rename from docs/astropy_banner_96.png
rename to docs/_static/astropy_banner_96.png
diff --git a/docs/_static/dev.png b/docs/_static/dev.png
deleted file mode 100644
index 6433fcf..0000000
Binary files a/docs/_static/dev.png and /dev/null differ
diff --git a/docs/_static/mature.png b/docs/_static/mature.png
deleted file mode 100644
index 9219cc9..0000000
Binary files a/docs/_static/mature.png and /dev/null differ
diff --git a/docs/_static/planned.png b/docs/_static/planned.png
deleted file mode 100644
index fd23a59..0000000
Binary files a/docs/_static/planned.png and /dev/null differ
diff --git a/docs/_static/stable.png b/docs/_static/stable.png
deleted file mode 100644
index 5c08992..0000000
Binary files a/docs/_static/stable.png and /dev/null differ
diff --git a/docs/coordinates/representations.rst b/docs/coordinates/representations.rst
index 41634d8..d8e9e3c 100644
--- a/docs/coordinates/representations.rst
+++ b/docs/coordinates/representations.rst
@@ -82,6 +82,7 @@ removed, and the points are still located on a unit sphere:
<CartesianRepresentation (x, y, z) [dimensionless]
(0.424264068712, 0.707106781187, 0.565685424949)>
+
Array values
^^^^^^^^^^^^
diff --git a/docs/coordinates/sgr-example.rst b/docs/coordinates/sgr-example.rst
index f51773b..8a093f6 100644
--- a/docs/coordinates/sgr-example.rst
+++ b/docs/coordinates/sgr-example.rst
@@ -166,7 +166,7 @@ transform from ICRS coordinates to ``Sagittarius``, we simply::
>>> icrs = coord.ICRS(280.161732*u.degree, 11.91934*u.degree)
>>> icrs.transform_to(Sagittarius) # doctest: +SKIP
<Sagittarius Coordinate: (Lambda, Beta, distance) in (deg, deg, )
- (346.818273..., -39.283667..., 1.0)>
+ (346.8182733552503, -39.28366798041541, 1.0)>
The complete code for the above example is included below for reference.
diff --git a/docs/development/affiliated-packages.rst b/docs/development/affiliated-packages.rst
index 9c9477d..6d91c6d 100644
--- a/docs/development/affiliated-packages.rst
+++ b/docs/development/affiliated-packages.rst
@@ -56,7 +56,7 @@ Starting a new package
files, but do not go inside it - instead, create another empty repository
into which we will copy the required files::
- git clone http://github.com/astropy/package-template.git template
+ git clone https://github.com/astropy/package-template.git template
mkdir <packagename>
cd <packagename>
git init
@@ -65,7 +65,7 @@ Starting a new package
package, and we recommend adding this as a sub-module so as to easily be
able to bundle it in releases of affiliated packages::
- git submodule add http://github.com/astropy/astropy-helpers.git astropy_helpers
+ git submodule add https://github.com/astropy/astropy-helpers.git astropy_helpers
#. Copy over the following files from the package template (these define the
bare minimum of what is needed) and add them to the repository::
diff --git a/docs/index.rst b/docs/index.rst
index 13e63c5..79629c5 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -16,10 +16,17 @@
Astropy Core Package Documentation
##################################
-.. image:: astropy_banner_96.png
- :width: 485px
- :height: 96px
- :target: http://www.astropy.org/
+.. |logo_svg| image:: _static/astropy_banner.svg
+
+.. |logo_png| image:: _static/astropy_banner_96.png
+
+.. raw:: html
+
+ <img src="_images/astropy_banner.svg" onerror="this.src='_images/astropy_banner_96.png'; this.onerror=null;" width="485"/>
+
+.. only:: latex
+
+ .. image:: _static/astropy_logo.pdf
Welcome to the Astropy documentation! Astropy is a community-driven
package intended to contain much of the core functionality and some common
@@ -192,6 +199,10 @@ Astropy or affiliated packages, as well as coding, documentation, and
testing guidelines. For the guiding vision of this process and the project
as a whole, see :doc:`development/vision`.
+There are additional tools of use for developers in the
+`astropy/astropy-tools repository
+<http://github.com/astropy/astropy-tools>`__.
+
.. toctree::
:maxdepth: 1
diff --git a/docs/install.rst b/docs/install.rst
index 2ee8ad6..b94e59a 100644
--- a/docs/install.rst
+++ b/docs/install.rst
@@ -29,6 +29,10 @@ Astropy also depends on other packages for optional features:
- `xmllint <http://www.xmlsoft.org/>`_: To validate VOTABLE XML files.
+- `matplotlib <http://matplotlib.org/>`_: To provide plotting functionality that `astropy.visualization` enhances.
+
+- `WCSAxes <http://wcsaxes.readthedocs.org/en/latest/>`_: To use `astropy.wcs` to define projections in Matplotlib.
+
However, note that these only need to be installed if those particular features
are needed. Astropy will import even if these dependencies are not installed.
diff --git a/docs/io/votable/index.rst b/docs/io/votable/index.rst
index 4fc800c..18c567c 100644
--- a/docs/io/votable/index.rst
+++ b/docs/io/votable/index.rst
@@ -103,6 +103,15 @@ example, suppose we had a ``FIELD`` specified as follows:
that is both unique and required, which would be the most
convenient mechanism to uniquely identify a column.
+ When converting from a `astropy.io.votable.tree.Table` object to
+ an `astropy.table.Table` object, one can specify whether to give
+ preference to ``name`` or ``ID`` attributes when naming the
+ columns. By default, ``ID`` is given preference. To give
+ ``name`` preference, pass the keyword argument
+ ``use_names_over_ids=True``::
+
+ >>> votable.get_first_table().to_table(use_names_over_ids=True)
+
This column of data can be extracted from the record array using::
>>> table.array['dec_targ']
diff --git a/docs/known_issues.rst b/docs/known_issues.rst
index fd80255..57cf5f5 100644
--- a/docs/known_issues.rst
+++ b/docs/known_issues.rst
@@ -6,7 +6,7 @@ Known Issues
.. contents::
:local:
- :depth: 1
+ :depth: 2
While most bugs and issues are managed using the `astropy issue
tracker <https://github.com/astropy/astropy/issues>`_, this document
@@ -14,67 +14,22 @@ lists issues that are too difficult to fix, may require some
intervention from the user to workaround, or are due to bugs in other
projects or packages.
-Anaconda users should upgrade with ``conda``, not ``pip``
----------------------------------------------------------
-
-Upgrading Astropy in the anaconda python distribution using ``pip`` can result
-in a corrupted install with a mix of files from the old version and the new
-version. Anaconda users should update with ``conda update astropy``. There
-may be a brief delay between the release of Astropy on PyPI and its release
-via the ``conda`` package manager; users can check the availability of new
-versions with ``conda search astropy``.
-
-Locale errors in MacOS X and Linux
-----------------------------------
-
-On MacOS X, you may see the following error when running ``setup.py``::
-
- ...
- ValueError: unknown locale: UTF-8
+Issues listed on this page are grouped into two categories: The first is known
+issues and shortcomings in actual algorithms and interfaces that currently do
+not have fixes or workarounds, and that users should be aware of when writing
+code that uses Astropy. Some of those issues are still platform-specific,
+while others are very general. The second category is common issues that come
+up when configuring, building, or installing Astropy. This also includes
+cases where the test suite can report false negatives depending on the context/
+platform on which it was run.
-This is due to the ``LC_CTYPE`` environment variable being incorrectly set to
-``UTF-8`` by default, which is not a valid locale setting.
-
-On MacOS X or Linux (or other platforms) you may also encounter the following
-error::
-
- ...
- stderr = stderr.decode(stdio_encoding)
- TypeError: decode() argument 1 must be str, not None
-
-This also indicates that your locale is not set correctly.
-
-To fix either of these issues, set this environment variable, as well as the
-``LANG`` and ``LC_ALL`` environment variables to e.g. ``en_US.UTF-8`` using, in
-the case of ``bash``::
-
- export LANG="en_US.UTF-8"
- export LC_ALL="en_US.UTF-8"
- export LC_CTYPE="en_US.UTF-8"
-
-To avoid any issues in future, you should add this line to your e.g.
-``~/.bash_profile`` or ``.bashrc`` file.
-
-To test these changes, open a new terminal and type ``locale``, and you should
-see something like::
-
- $ locale
- LANG="en_US.UTF-8"
- LC_COLLATE="en_US.UTF-8"
- LC_CTYPE="en_US.UTF-8"
- LC_MESSAGES="en_US.UTF-8"
- LC_MONETARY="en_US.UTF-8"
- LC_NUMERIC="en_US.UTF-8"
- LC_TIME="en_US.UTF-8"
- LC_ALL="en_US.UTF-8"
-
-If so, you can go ahead and try running ``setup.py`` again (in the new
-terminal).
+Known deficiencies
+------------------
.. _quantity_issues:
Quantities lose their units with some operations
-------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Quantities are subclassed from numpy's `~numpy.ndarray` and in some numpy operations
(and in scipy operations using numpy internally) the subclass is ignored, which
@@ -119,8 +74,9 @@ An incomplete list of specific functions which are known to exhibit this behavio
See: https://github.com/astropy/astropy/issues/1274
+
Quantities float comparison with np.isclose fails
--------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Comparing Quantities floats using the numpy function `~numpy.isclose` fails on
numpy 1.9 as the comparison between ``a`` and ``b`` is made using the formula
@@ -142,63 +98,9 @@ An easy solution is::
>>> np.isclose(500* u.km/u.s, 300 * u.km / u.s, atol=1e-8 * u.mm / u.s)
array([False], dtype=bool)
-Some docstrings can not be displayed in IPython < 0.13.2
---------------------------------------------------------
-
-Displaying long docstrings that contain Unicode characters may fail on
-some platforms in the IPython console (prior to IPython version
-0.13.2)::
-
- >>> import astropy.units as u
-
- >>> u.Angstrom?
- ERROR: UnicodeEncodeError: 'ascii' codec can't encode character u'\xe5' in
- position 184: ordinal not in range(128) [IPython.core.page]
-
-This can be worked around by changing the default encoding to ``utf-8``
-by adding the following to your ``sitecustomize.py`` file::
-
- import sys
- sys.setdefaultencoding('utf-8')
-
-Note that in general, `this is not recommended
-<http://ziade.org/2008/01/08/syssetdefaultencoding-is-evil/>`_,
-because it can hide other Unicode encoding bugs in your application.
-However, in general if your application does not deal with text
-processing and you just want docstrings to work, this may be
-acceptable.
-
-The IPython issue: https://github.com/ipython/ipython/pull/2738
-
-Failing logging tests when running the tests in IPython
--------------------------------------------------------
-
-When running the Astropy tests using ``astropy.test()`` in an IPython
-interpreter some of the tests in the ``astropy/tests/test_logger.py`` fail.
-This is due to mutually incompatible behaviors in IPython and py.test, and is
-not due to a problem with the test itself or the feature being tested.
-
-See: https://github.com/astropy/astropy/issues/717
-
-mmap support for ``astropy.io.fits`` on GNU Hurd
-------------------------------------------------
-
-On Hurd and possibly other platforms ``flush()`` on memory-mapped files is not
-implemented, so writing changes to a mmap'd FITS file may not be reliable and is
-thus disabled. Attempting to open a FITS file in writeable mode with mmap will
-result in a warning (and mmap will be disabled on the file automatically).
-
-See: https://github.com/astropy/astropy/issues/968
-
-Color printing on Windows
--------------------------
-
-Colored printing of log messages and other colored text does work in Windows
-but only when running in the IPython console. Colors are not currently
-supported in the basic Python command-line interpreter on Windows.
Table sorting can silently fail on MacOS X or Windows with Python 3 and Numpy < 1.6.2
--------------------------------------------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In Python 3, prior to Numpy 1.6.2, there was a bug (in Numpy) that caused
sorting of structured arrays to silently fail under certain circumstances (for
@@ -208,8 +110,9 @@ internally sort the data, it is also affected by this bug. If you are using
Python 3, and need the sorting functionality for tables, we recommend updating
to a more recent version of Numpy.
+
Remote data utilities in `astropy.utils.data` fail on some Python distributions
--------------------------------------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The remote data utilities in `astropy.utils.data` depend on the Python
standard library `shelve` module, which in some cases depends on the
@@ -227,8 +130,29 @@ such as::
One workaround is to install the ``bsddb3`` module.
+
+mmap support for ``astropy.io.fits`` on GNU Hurd
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+On Hurd and possibly other platforms ``flush()`` on memory-mapped files is not
+implemented, so writing changes to a mmap'd FITS file may not be reliable and is
+thus disabled. Attempting to open a FITS file in writeable mode with mmap will
+result in a warning (and mmap will be disabled on the file automatically).
+
+See: https://github.com/astropy/astropy/issues/968
+
+
+Bug with unicode endianness in ``io.fits`` for big-endian processors
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+On big-endian processors (e.g. SPARC, PowerPC, MIPS), string columnn in FITS
+files may not be correctly read when using the ``Table.read`` interface. This
+will be fixed in a subsequent bug fix release of Astropy (see `bug report here
+<https://github.com/astropy/astropy/issues/3415>`_)
+
+
Error *'buffer' does not have the buffer interface* in ``io.fits``
---------------------------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For Python 2.7.x versions prior to 2.7.4, the `astropy.io.fits` may under
certain circumstances output the following error::
@@ -238,8 +162,9 @@ certain circumstances output the following error::
This can be resolved by upgrading to Python 2.7.4 or later (at the time of
writing, the latest Python 2.7.x version is 2.7.9).
+
Floating point precision issues on Python 2.6 on Microsoft Windows
-------------------------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When converting floating point numbers to strings on Python 2.6 on a
Microsoft Windows platform, some of the requested precision may be
@@ -249,16 +174,160 @@ The easiest workaround is to install Python 2.7.
The Python issue: http://bugs.python.org/issue7117
-Bug with unicode endianness in ``io.fits`` for big-endian processors
---------------------------------------------------------------------
-On big-endian processors (e.g. SPARC, PowerPC, MIPS), string columnn in FITS
-files may not be correctly read when using the ``Table.read`` interface. This
-will be fixed in a subsequent bug fix release of Astropy (see `bug report here
-<https://github.com/astropy/astropy/issues/3415>`_)
+Color printing on Windows
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Colored printing of log messages and other colored text does work in Windows
+but only when running in the IPython console. Colors are not currently
+supported in the basic Python command-line interpreter on Windows.
+
+
+
+Build/installation/test issues
+------------------------------
+
+Anaconda users should upgrade with ``conda``, not ``pip``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Upgrading Astropy in the anaconda python distribution using ``pip`` can result
+in a corrupted install with a mix of files from the old version and the new
+version. Anaconda users should update with ``conda update astropy``. There
+may be a brief delay between the release of Astropy on PyPI and its release
+via the ``conda`` package manager; users can check the availability of new
+versions with ``conda search astropy``.
+
+
+Locale errors in MacOS X and Linux
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+On MacOS X, you may see the following error when running ``setup.py``::
+
+ ...
+ ValueError: unknown locale: UTF-8
+
+This is due to the ``LC_CTYPE`` environment variable being incorrectly set to
+``UTF-8`` by default, which is not a valid locale setting.
+
+On MacOS X or Linux (or other platforms) you may also encounter the following
+error::
+
+ ...
+ stderr = stderr.decode(stdio_encoding)
+ TypeError: decode() argument 1 must be str, not None
+
+This also indicates that your locale is not set correctly.
+
+To fix either of these issues, set this environment variable, as well as the
+``LANG`` and ``LC_ALL`` environment variables to e.g. ``en_US.UTF-8`` using, in
+the case of ``bash``::
+
+ export LANG="en_US.UTF-8"
+ export LC_ALL="en_US.UTF-8"
+ export LC_CTYPE="en_US.UTF-8"
+
+To avoid any issues in future, you should add this line to your e.g.
+``~/.bash_profile`` or ``.bashrc`` file.
+
+To test these changes, open a new terminal and type ``locale``, and you should
+see something like::
+
+ $ locale
+ LANG="en_US.UTF-8"
+ LC_COLLATE="en_US.UTF-8"
+ LC_CTYPE="en_US.UTF-8"
+ LC_MESSAGES="en_US.UTF-8"
+ LC_MONETARY="en_US.UTF-8"
+ LC_NUMERIC="en_US.UTF-8"
+ LC_TIME="en_US.UTF-8"
+ LC_ALL="en_US.UTF-8"
+
+If so, you can go ahead and try running ``setup.py`` again (in the new
+terminal).
+
+
+Creating a Time object fails with ValueError after upgrading Astropy
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In some cases, have users have upgraded Astropy from an older version to v1.0
+or greater they have run into the following crash when trying to create a
+`~astropy.time.Time` object::
+
+ >>> datetime = Time('2012-03-01T13:08:00', scale='utc')
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ File "/usr/lib/python2.7/site-packages/astropy/time/core.py", line 198, in __init__
+ self._init_from_vals(val, val2, format, scale, copy)
+ File "/usr/lib/python2.7/site-packages/astropy/time/core.py", line 240, in _init_from_vals
+ self._time = self._get_time_fmt(val, val2, format, scale)
+ File "/usr/lib/python2.7/site-packages/astropy/time/core.py", line 278, in _get_time_fmt
+ raise ValueError('Input values did not match {0}'.format(err_msg))
+ ValueError: Input values did not match any of the formats where the format keyword is optional [u'astropy_time', u'datetime', u'jyear_str', u'iso', u'isot', u'yday', u'byear_str']
+
+This problem can occur when there is a version mismatch between the compiled
+ERFA library (this is included as part of Astropy in most distributions), and
+the version of the Astropy Python source.
+
+This can have a number of causes. The most likely is that when installing the
+new Astropy version, your previous Astropy version was not fully uninstalled
+first, resulting in a mishmash of versions. Your best bet is to fully remove
+Astropy from its installation path, and reinstall from scratch using your
+preferred installation method. How to remove the old version may be a simple
+matter if removing the entire ``astropy/`` directory from within the
+``site-packages`` directory it is installed in. However, if in doubt, ask
+how best to uninstall packages from your preferred Python distribution.
+
+Another possible cause of this, in particular for people developing on Astropy
+and installing from a source checkout, is simply that your Astropy build
+directory is unclean. To fix this, run ``git clean -dfx``. This removes
+*all* build artifacts from the repository that aren't normally tracked by git.
+Make sure before running this that there are no untracked files in the
+repository you intend to save. Then rebuild/reinstall from the clean repo.
+
+
+Failing logging tests when running the tests in IPython
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When running the Astropy tests using ``astropy.test()`` in an IPython
+interpreter some of the tests in the ``astropy/tests/test_logger.py`` *might*
+fail, depending on the version of IPython or other factors.
+This is due to mutually incompatible behaviors in IPython and py.test, and is
+not due to a problem with the test itself or the feature being tested.
+
+See: https://github.com/astropy/astropy/issues/717
+
+
+Some docstrings can not be displayed in IPython < 0.13.2
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Displaying long docstrings that contain Unicode characters may fail on
+some platforms in the IPython console (prior to IPython version
+0.13.2)::
+
+ >>> import astropy.units as u
+
+ >>> u.Angstrom?
+ ERROR: UnicodeEncodeError: 'ascii' codec can't encode character u'\xe5' in
+ position 184: ordinal not in range(128) [IPython.core.page]
+
+This can be worked around by changing the default encoding to ``utf-8``
+by adding the following to your ``sitecustomize.py`` file::
+
+ import sys
+ sys.setdefaultencoding('utf-8')
+
+Note that in general, `this is not recommended
+<http://ziade.org/2008/01/08/syssetdefaultencoding-is-evil/>`_,
+because it can hide other Unicode encoding bugs in your application.
+However, in general if your application does not deal with text
+processing and you just want docstrings to work, this may be
+acceptable.
+
+The IPython issue: https://github.com/ipython/ipython/pull/2738
+
Installation fails on Mageia-2 or Mageia-3 distributions
---------------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Building may fail with warning messages such as::
@@ -272,8 +341,9 @@ fix the issue, though an immediate workaround is to edit the file::
and search for the line that adds the option ``-Wl,--no-undefined`` to the
``LDFLAGS`` variable and remove that option.
+
Crash on upgrading from Astropy 0.2 to a newer version
-------------------------------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is possible for installation of a new version of Astropy, or upgrading of an
existing installation to crash due to not having permissions on the
diff --git a/docs/modeling/parameters.rst b/docs/modeling/parameters.rst
index 129af6b..684d3a4 100644
--- a/docs/modeling/parameters.rst
+++ b/docs/modeling/parameters.rst
@@ -52,14 +52,14 @@ Parameter examples
when constructing an instance of that model::
>>> g = models.Gaussian1D(1.0, 0.0, 0.1)
- >>> g
- <Gaussian1D(amplitude=1.0, mean=0.0, stddev=0.1...)>
+ >>> g # doctest: +FLOAT_CMP
+ <Gaussian1D(amplitude=1.0, mean=0.0, stddev=0.1)>
However, parameters may also be given as keyword arguments (in any order)::
>>> g = models.Gaussian1D(mean=0.0, amplitude=2.0, stddev=0.2)
- >>> g
- <Gaussian1D(amplitude=2.0, mean=0.0, stddev=0.2...)>
+ >>> g # doctest: +FLOAT_CMP
+ <Gaussian1D(amplitude=2.0, mean=0.0, stddev=0.2)>
So all that really matters is knowing the names (and meanings) of the
parameters that each model accepts. More information about an individual
diff --git a/docs/nitpick-exceptions b/docs/nitpick-exceptions
index ade11a0..2226bbc 100644
--- a/docs/nitpick-exceptions
+++ b/docs/nitpick-exceptions
@@ -16,6 +16,7 @@ py:class astropy.modeling.projections.Projection
# astropy.io.fits
py:class astropy.io.fits.hdu.base.ExtensionHDU
+py:class astropy.io.fits.util.NotifierMixin
# astropy.utils
py:class astropy.extern.six.Iterator
diff --git a/docs/stability.rst b/docs/stability.rst
index 93db2ea..d639670 100644
--- a/docs/stability.rst
+++ b/docs/stability.rst
@@ -8,33 +8,44 @@ sub-packages. This document summarizes the current status of the Astropy
sub-packages, so that users understand where they might expect changes in
future, and which sub-packages they can safely use for production code.
-.. |planned| image:: _static/planned.png
-
-.. |dev| image:: _static/dev.png
-
-.. |stable| image:: _static/stable.png
-
-.. |mature| image:: _static/mature.png
-
The classification is as follows:
.. raw:: html
+ <style>
+ .planned:before {
+ color: #cbcbcb;
+ content: "⬤";
+ }
+ .dev:before {
+ color: #ffad00;
+ content: "⬤";
+ }
+ .stable:before {
+ color: #4e72c3;
+ content: "⬤";
+ }
+ .mature:before {
+ color: #03a913;
+ content: "⬤";
+ }
+ </style>
+
<table align='center'>
<tr>
- <td align='center'><img src='_images/planned.png'></td>
+ <td align='center'><span class="planned"></span></td>
<td>Planned</td>
</tr>
<tr>
- <td align='center'><img src='_images/dev.png'></td>
+ <td align='center'><span class="dev"></span></td>
<td>Actively developed, be prepared for possible significant changes.</td>
</tr>
<tr>
- <td align='center'><img src='_images/stable.png'></td>
+ <td align='center'><span class="stable"></span></td>
<td>Reasonably stable, any significant changes/additions will generally include backwards-compatiblity.</td>
</tr>
<tr>
- <td align='center'><img src='_images/mature.png'></td>
+ <td align='center'><span class="mature"></span></td>
<td>Mature. Additions/improvements possible, but no major changes planned. </td>
</tr>
</table>
@@ -43,13 +54,6 @@ The current planned and existing sub-packages are:
.. raw:: html
- <style>
- .stability img {
- max-width: none;
- margin: 0px;
- }
- </style>
-
<table border="1" class="docutils stability" align='center'>
<tr>
<th class="head">
@@ -67,7 +71,7 @@ The current planned and existing sub-packages are:
astropy.analytic_functions
</td>
<td align='center'>
- <img alt="dev" src="_images/dev.png">
+ <span class="dev"></span>
</td>
<td>
New in v1.0.
@@ -78,7 +82,7 @@ The current planned and existing sub-packages are:
astropy.constants
</td>
<td align='center'>
- <img alt="stable" src="_images/stable.png">
+ <span class="stable"></span>
</td>
<td>
Constants were changed to <tt class="docutils literal"><span class="pre">Quantity</span></tt> objects in v0.2. Since then on, the package has been stable, with occasional additions of new constants.
@@ -89,7 +93,7 @@ The current planned and existing sub-packages are:
astropy.convolution
</td>
<td align='center'>
- <img alt="stable" src="_images/stable.png">
+ <span class="stable"></span>
</td>
<td>
New top-level package in v0.3 (was previously part of
@@ -102,7 +106,7 @@ The current planned and existing sub-packages are:
astropy.coordinates
</td>
<td align='center'>
- <img alt="stable" src="_images/stable.png">
+ <span class="stable"></span>
</td>
<td>
New in v0.2, major changes in v0.4. Subsequent versions should
@@ -114,7 +118,7 @@ The current planned and existing sub-packages are:
astropy.cosmology
</td>
<td align='center'>
- <img alt="stable" src="_images/stable.png">
+ <span class="stable"></span>
</td>
<td>
Incremental improvements since v0.1, but mostly stable API.
@@ -126,7 +130,7 @@ The current planned and existing sub-packages are:
astropy.io.ascii
</td>
<td align='center'>
- <img alt="mature" src="_images/mature.png">
+ <span class="mature"></span>
</td>
<td>
Originally developed as <tt class="docutils literal"><span class="pre">asciitable</span></tt>, and has maintained a stable API.
@@ -137,7 +141,7 @@ The current planned and existing sub-packages are:
astropy.io.fits
</td>
<td align='center'>
- <img alt="mature" src="_images/mature.png">
+ <span class="mature"></span>
</td>
<td>
Originally developed as <tt class="docutils literal"><span class="pre">pyfits</span></tt>, and retains an API consistent with the standalone version.
@@ -159,7 +163,7 @@ The current planned and existing sub-packages are:
astropy.io.votable
</td>
<td align='center'>
- <img alt="mature" src="_images/mature.png">
+ <span class="mature"></span>
</td>
<td>
Originally developed as <tt class="docutils literal"><span class="pre">vo.table</span></tt>, and has a stable API.
@@ -170,7 +174,7 @@ The current planned and existing sub-packages are:
astropy.modeling
</td>
<td align='center'>
- <img alt="dev" src="_images/dev.png">
+ <span class="dev"></span>
</td>
<td>
New in v0.3. Major changes in v1.0, signficant additions planned. Backwards-compatibility likely to be maintained, but not guaranteed.
@@ -181,7 +185,7 @@ The current planned and existing sub-packages are:
astropy.nddata
</td>
<td align='center'>
- <img alt="dev" src="_images/dev.png">
+ <span class="dev"></span>
</td>
<td>
Significantly revised in v1.0 to implement <a href="https://github.com/astropy/astropy-APEs/blob/master/APE7.rst">APE 7</a>. Major changes in the API are not anticipated, broader use may reveal flaws that require API changes.
@@ -192,7 +196,7 @@ The current planned and existing sub-packages are:
astropy.photometry
</td>
<td align='center'>
- <img alt="planned" src="_images/planned.png">
+ <span class="planned"></span>
</td>
<td>
@@ -203,7 +207,7 @@ The current planned and existing sub-packages are:
astropy.stats
</td>
<td align='center'>
- <img alt="dev" src="_images/dev.png">
+ <span class="dev"></span>
</td>
<td>
Likely to maintain backwards-compatibility, but functionality continually being expanded, so significant additions likely in the future.
@@ -214,7 +218,7 @@ The current planned and existing sub-packages are:
astropy.table
</td>
<td align='center'>
- <img alt="stable" src="_images/stable.png">
+ <span class="stable"></span>
</td>
<td>
Incremental improvements since v0.1, but mostly stable API.
@@ -225,7 +229,7 @@ The current planned and existing sub-packages are:
astropy.time
</td>
<td align='center'>
- <img alt="mature" src="_images/mature.png">
+ <span class="mature"></span>
</td>
<td>
Incremental improvements since v0.1, API likely to remain stable
@@ -237,7 +241,7 @@ The current planned and existing sub-packages are:
astropy.units
</td>
<td align='center'>
- <img alt="stable" src="_images/stable.png">
+ <span class="stable"></span>
</td>
<td>
New in v0.2. Adapted from <tt class="docutils literal"><span class="pre">pnbody</span></tt> and integrated into Astropy. Current functionality stable with intent to maintain backwards compatibility. Significant new functionality is likely to be added in future versions.
@@ -248,7 +252,7 @@ The current planned and existing sub-packages are:
astropy.utils
</td>
<td align='center'>
- <img alt="dev" src="_images/dev.png">
+ <span class="dev"></span>
</td>
<td>
Contains mostly utilities destined for internal use with other parts of Astropy. Existing functionality generally stable, but reglar additions and occasional changes.
@@ -259,7 +263,7 @@ The current planned and existing sub-packages are:
astropy.visualization
</td>
<td align='center'>
- <img alt="dev" src="_images/dev.png">
+ <span class="dev"></span>
</td>
<td>
New in v1.0, and in development.
@@ -270,7 +274,7 @@ The current planned and existing sub-packages are:
astropy.vo
</td>
<td align='center'>
- <img alt="stable" src="_images/stable.png">
+ <span class="stable"></span>
</td>
<td>
Virtual Observatory service access and validation. Currently, only Simple Cone Search and SAMP are supported.
@@ -281,11 +285,10 @@ The current planned and existing sub-packages are:
astropy.wcs
</td>
<td align='center'>
- <img alt="stable" src="_images/stable.png">
+ <span class="stable"></span>
</td>
<td>
Originally developed as <tt class="docutils literal"><span class="pre">pywcs</span></tt>, and has a stable API for now. However, there are plans to generalize the WCS interface to accommodate non-FITS WCS transformations, and this may lead to small changes in the user interface.
</td>
</tr>
</table>
-
diff --git a/docs/table/construct_table.rst b/docs/table/construct_table.rst
index 0cfa14d..f6a4e69 100644
--- a/docs/table/construct_table.rst
+++ b/docs/table/construct_table.rst
@@ -316,7 +316,17 @@ Likewise the data type for each column can by changed with ``dtype``::
NumPy homogeneous array
"""""""""""""""""""""""
-A normal `numpy` 2-d array (where all elements have the same type) can be
+A `numpy` 1-d array is treated as a single row table where each element of the
+array corresponds to a column::
+
+ >>> Table(np.array([1, 2, 3]), names=['a', 'b', 'c'], dtype=('i8', 'i8', 'i8'))
+ <Table masked=False length=1>
+ a b c
+ int64 int64 int64
+ ----- ----- -----
+ 1 2 3
+
+A `numpy` 2-d array (where all elements have the same type) can also be
converted into a |Table|. In this case the column names are not specified by
the data and must either be provided by the user or will be automatically
generated as ``col<N>`` where ``<N>`` is the column number.
@@ -442,9 +452,12 @@ for the ``data`` argument.
override the existing ``data`` types.
**numpy ndarray (homogeneous)**
- The ``data`` ndarray must be at least 2-dimensional, with the first
- (left-most) index corresponding to row number (table length) and the
- second index corresponding to column number (table width). Higher
+ If the ``data`` ndarray is 1-dimensional then it is treated as a single row
+ table where each element of the array corresponds to a column.
+
+ If the ``data`` ndarray is at least 2-dimensional then the first
+ (left-most) index corresponds to row number (table length) and the
+ second index corresponds to column number (table width). Higher
dimensions get absorbed in the shape of each table cell.
If provided the ``names`` list must match the "width" of the ``data``
diff --git a/docs/time/index.rst b/docs/time/index.rst
index b862365..276e39f 100644
--- a/docs/time/index.rst
+++ b/docs/time/index.rst
@@ -84,8 +84,8 @@ Finally, some further examples of what is possible. For details, see
the API documentation below.
>>> dt = t[1] - t[0]
- >>> dt
- <TimeDelta object: scale='tai' format='jd' value=4018.0000217...>
+ >>> dt # doctest: +FLOAT_CMP
+ <TimeDelta object: scale='tai' format='jd' value=4018.00002172>
Here, note the conversion of the timescale to TAI. Time differences
can only have scales in which one day is always equal to 86400 seconds.
@@ -306,8 +306,8 @@ and ``jd2`` attributes::
>>> t.jd1, t.jd2
(2455197.5, 0.0)
>>> t2 = t.tai
- >>> t2.jd1, t2.jd2
- (2455197.5, 0.00039351851851851...)
+ >>> t2.jd1, t2.jd2 # doctest: +FLOAT_CMP
+ (2455197.5, 0.0003935185185185185)
Creating a Time object
----------------------
@@ -370,8 +370,8 @@ string-valued formats ignore ``val2`` and all numeric inputs effectively add
the two values in a way that maintains the highest precision. Example::
>>> t = Time(100.0, 0.000001, format='mjd', scale='tt')
- >>> t.jd, t.jd1, t.jd2 # doctest: +SKIP
- (2400100.50000..., 2400100.5, 1e-06)
+ >>> t.jd, t.jd1, t.jd2 # doctest: +FLOAT_CMP
+ (2400100.500001, 2400100.5, 1e-06)
format
^^^^^^
@@ -460,10 +460,10 @@ no explicit longitude is given.
>>> t = Time('2001-03-22 00:01:44.732327132980', scale='utc',
... location=('120d', '40d'))
- >>> t.sidereal_time('apparent', 'greenwich')
- <Longitude 12.00000000000... hourangle>
- >>> t.sidereal_time('apparent')
- <Longitude 20.00000000000... hourangle>
+ >>> t.sidereal_time('apparent', 'greenwich') # doctest: +FLOAT_CMP
+ <Longitude 12.00000000000001 hourangle>
+ >>> t.sidereal_time('apparent') # doctest: +FLOAT_CMP
+ <Longitude 20.00000000000001 hourangle>
.. note:: In future versions, we hope to add the possibility to add observatory
objects and/or names.
@@ -586,7 +586,7 @@ predictions), and set :attr:`~astropy.time.Time.delta_ut1_utc` as described in
>>> from astropy.utils.iers import IERS_A, IERS_A_URL
>>> from astropy.utils.data import download_file
- >>> iers_a_file = download_file(IERS_A_URL, cache=True)) # doctest: +SKIP
+ >>> iers_a_file = download_file(IERS_A_URL, cache=True) # doctest: +SKIP
>>> iers_a = IERS_A.open(iers_a_file) # doctest: +SKIP
>>> t.delta_ut1_utc = t.get_delta_ut1_utc(iers_a) # doctest: +SKIP
@@ -632,16 +632,16 @@ transformations, ERFA C-library routines are used under the hood, which support
calculations following different IAU resolutions. Sample usage::
>>> t = Time('2006-01-15 21:24:37.5', scale='utc', location=('120d', '45d'))
- >>> t.sidereal_time('mean')
- <Longitude 13.089521870640... hourangle>
- >>> t.sidereal_time('apparent')
- <Longitude 13.08950367508... hourangle>
- >>> t.sidereal_time('apparent', 'greenwich')
- <Longitude 5.08950367508... hourangle>
- >>> t.sidereal_time('apparent', '-90d')
- <Longitude 23.08950367508... hourangle>
- >>> t.sidereal_time('apparent', '-90d', 'IAU1994')
- <Longitude 23.08950365423... hourangle>
+ >>> t.sidereal_time('mean') # doctest: +FLOAT_CMP
+ <Longitude 13.089521870640212 hourangle>
+ >>> t.sidereal_time('apparent') # doctest: +FLOAT_CMP
+ <Longitude 13.089503675087027 hourangle>
+ >>> t.sidereal_time('apparent', 'greenwich') # doctest: +FLOAT_CMP
+ <Longitude 5.089503675087027 hourangle>
+ >>> t.sidereal_time('apparent', '-90d') # doctest: +FLOAT_CMP
+ <Longitude 23.08950367508703 hourangle>
+ >>> t.sidereal_time('apparent', '-90d', 'IAU1994') # doctest: +FLOAT_CMP
+ <Longitude 23.08950365423405 hourangle>
Time Deltas
-----------
@@ -732,8 +732,8 @@ object::
i.e., scales for which it is not necessary to know the times that were
differenced::
- >>> dt.tt
- <TimeDelta object: scale='tt' format='jd' value=364.99999974...>
+ >>> dt.tt # doctest: +FLOAT_CMP
+ <TimeDelta object: scale='tt' format='jd' value=364.999999746>
>>> dt.tdb
Traceback (most recent call last):
...
@@ -781,8 +781,8 @@ of time. Usage is most easily illustrated by examples::
array([False, True, True], dtype=bool)
>>> dt + 1.*u.hr # can also add/subtract such quantities
<TimeDelta object: scale='None' format='jd' value=[ 10.04166667 20.04166667 30.04166667]>
- >>> Time(50000., format='mjd', scale='utc') + 1.*u.hr
- <Time object: scale='utc' format='mjd' value=50000.041666...>
+ >>> Time(50000., format='mjd', scale='utc') + 1.*u.hr # doctest: +FLOAT_CMP
+ <Time object: scale='utc' format='mjd' value=50000.0416667>
>>> dt * 10.*u.km/u.s # for multiplication and division with a
... # Quantity, TimeDelta is converted
<Quantity [ 100., 200., 300.] d km / s>
diff --git a/docs/units/equivalencies.rst b/docs/units/equivalencies.rst
index 783b1bf..590ad41 100644
--- a/docs/units/equivalencies.rst
+++ b/docs/units/equivalencies.rst
@@ -77,14 +77,14 @@ dimensionless). For instance, normally the following raise exceptions::
But when passing we pass the proper conversion function,
:func:`~astropy.units.equivalencies.dimensionless_angles`, it works.
- >>> u.deg.to('', equivalencies=u.dimensionless_angles())
- 0.01745329...
+ >>> u.deg.to('', equivalencies=u.dimensionless_angles()) # doctest: +FLOAT_CMP
+ 0.017453292519943295
>>> (0.5e38 * u.kg * u.m**2 * (u.cycle / u.s)**2).to(u.J,
- ... equivalencies=u.dimensionless_angles())
- <Quantity 1.97392...e+39 J>
+ ... equivalencies=u.dimensionless_angles()) # doctest: +FLOAT_CMP
+ <Quantity 1.9739208802178715e+39 J>
>>> import numpy as np
- >>> np.exp((1j*0.125*u.cycle).to('', equivalencies=u.dimensionless_angles()))
- <Quantity (0.707106781186...+0.707106781186...j)>
+ >>> np.exp((1j*0.125*u.cycle).to('', equivalencies=u.dimensionless_angles())) # doctest: +FLOAT_CMP
+ <Quantity (0.7071067811865476+0.7071067811865475j)>
The example with complex numbers is also one may well be doing a fair
number of similar calculations. For such situations, there is the
@@ -105,15 +105,15 @@ energy can be converted.
>>> ([1000, 2000] * u.nm).to(u.Hz, equivalencies=u.spectral())
<Quantity [ 2.99792458e+14, 1.49896229e+14] Hz>
- >>> ([1000, 2000] * u.nm).to(u.eV, equivalencies=u.spectral())
- <Quantity [ 1.239..., 0.619...] eV>
+ >>> ([1000, 2000] * u.nm).to(u.eV, equivalencies=u.spectral()) # doctest: +FLOAT_CMP
+ <Quantity [ 1.23984193, 0.61992096] eV>
These equivalencies even work with non-base units::
>>> # Inches to calories
>>> from astropy.units import imperial
- >>> imperial.inch.to(imperial.Cal, equivalencies=u.spectral())
- 1.8691807591...e-27
+ >>> imperial.inch.to(imperial.Cal, equivalencies=u.spectral()) # doctest: +FLOAT_CMP
+ 1.869180759162485e-27
Spectral (Doppler) equivalencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -157,8 +157,8 @@ location. For example::
... equivalencies=u.spectral_density(3500 * u.AA))
<Quantity 1.5e-23 erg / (cm2 Hz s)>
>>> (1.5 * u.Jy).to(u.erg / u.cm**2 / u.s / u.micron,
- ... equivalencies=u.spectral_density(3500 * u.AA))
- <Quantity 3.670928057142...e-08 erg / (cm2 micron s)>
+ ... equivalencies=u.spectral_density(3500 * u.AA)) # doctest: +FLOAT_CMP
+ <Quantity 3.670928057142856e-08 erg / (cm2 micron s)>
Brightness Temperature / Flux Density Equivalency
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -202,12 +202,12 @@ example converts the FWHM to sigma::
>>> import numpy as np
>>> beam_fwhm = 50*u.arcsec
- >>> fwhm_to_sigma = 1./(8*np.log(2))**0.5
- >>> beam_sigma = beam_fwhm*fwhm_to_sigma
+ >>> fwhm_to_sigma = 1. / (8 * np.log(2))**0.5
+ >>> beam_sigma = beam_fwhm * fwhm_to_sigma
>>> omega_B = 2 * np.pi * beam_sigma**2
>>> freq = 5 * u.GHz
- >>> u.Jy.to(u.K, equivalencies=u.brightness_temperature(omega_B, freq))
- 19.55392833...
+ >>> u.Jy.to(u.K, equivalencies=u.brightness_temperature(omega_B, freq)) # doctest: +FLOAT_CMP
+ 19.553928332631582
Temperature Energy Equivalency
@@ -220,8 +220,8 @@ observations at high-energy, be it for solar or X-ray astronomy. Example::
>>> import astropy.units as u
>>> t_k = 1e6 * u.K
- >>> t_k.to(u.eV, equivalencies=u.temperature_energy())
- <Quantity 86.17332384... eV>
+ >>> t_k.to(u.eV, equivalencies=u.temperature_energy()) # doctest: +FLOAT_CMP
+ <Quantity 86.17332384960955 eV>
Writing new equivalencies
-------------------------
@@ -249,13 +249,13 @@ for them::
Note that the equivalency can be used with any other compatible units::
>>> from astropy.units import imperial
- >>> imperial.gallon.to(imperial.pound, 1, equivalencies=liters_water)
- 8.3454044633335...
+ >>> imperial.gallon.to(imperial.pound, 1, equivalencies=liters_water) # doctest: +FLOAT_CMP
+ 8.345404463333525
And it also works in the other direction::
- >>> imperial.lb.to(imperial.pint, 1, equivalencies=liters_water)
- 0.9586114172355...
+ >>> imperial.lb.to(imperial.pint, 1, equivalencies=liters_water) # doctest: +FLOAT_CMP
+ 0.9586114172355459
A slightly more complicated example: Spectral Doppler Equivalencies
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -270,10 +270,10 @@ but this example is illustrative::
>>> freq_to_vel = [(u.GHz, u.km/u.s,
... lambda x: (restfreq-x) / restfreq * si.c.to('km/s').value,
... lambda x: (1-x/si.c.to('km/s').value) * restfreq )]
- >>> u.Hz.to(u.km / u.s, 116e9, equivalencies=freq_to_vel)
- -1895.432192...
+ >>> u.Hz.to(u.km / u.s, 116e9, equivalencies=freq_to_vel) # doctest: +FLOAT_CMP
+ -1895.4321928669262
>>> (116e9 * u.Hz).to(u.km / u.s, equivalencies=freq_to_vel)
- <Quantity -1895.432192... km / s>
+ <Quantity -1895.4321928669262 km / s>
Note that once this is defined for GHz and km/s, it will work for all other
units of frequency and velocity. ``x`` is converted from the input frequency
@@ -337,8 +337,8 @@ simply do:
>>> import astropy.units as u
>>> u.set_enabled_equivalencies(u.dimensionless_angles())
<astropy.units.core._UnitContext object at ...>
- >>> u.deg.to('')
- 0.01745329...
+ >>> u.deg.to('') # doctest: +FLOAT_CMP
+ 0.017453292519943295
Here, any list of equivalencies could be used, or one could add, e.g.,
:func:`~astropy.units.equivalencies.spectral` and
@@ -355,5 +355,5 @@ a context manager is provided:
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... c = np.exp(1j*phase)
- >>> c
- <Quantity (-1+...j) >
+ >>> c # doctest: +FLOAT_CMP
+ <Quantity (-1+1.2246063538223773e-16j) >
diff --git a/docs/units/index.rst b/docs/units/index.rst
index 2960bdb..0798ee5 100644
--- a/docs/units/index.rst
+++ b/docs/units/index.rst
@@ -48,20 +48,20 @@ value members::
From this simple building block, it's easy to start combining
quantities with different units::
- >>> 15.1 * u.meter / (32.0 * u.second)
- <Quantity 0.47187... m / s>
- >>> 3.0 * u.kilometer / (130.51 * u.meter / u.second)
- <Quantity 0.0229867443... km s / m>
- >>> (3.0 * u.kilometer / (130.51 * u.meter / u.second)).decompose()
- <Quantity 22.9867443... s>
+ >>> 15.1 * u.meter / (32.0 * u.second) # doctest: +FLOAT_CMP
+ <Quantity 0.471875 m / s>
+ >>> 3.0 * u.kilometer / (130.51 * u.meter / u.second) # doctest: +FLOAT_CMP
+ <Quantity 0.022986744310780783 km s / m>
+ >>> (3.0 * u.kilometer / (130.51 * u.meter / u.second)).decompose() # doctest: +FLOAT_CMP
+ <Quantity 22.986744310780782 s>
Unit conversion is done using the
:meth:`~astropy.units.quantity.Quantity.to` method, which returns a new
|quantity| in the given unit::
>>> x = 1.0 * u.parsec
- >>> x.to(u.km)
- <Quantity 30856775814671.9... km>
+ >>> x.to(u.km) # doctest: +FLOAT_CMP
+ <Quantity 30856775814671.914 km>
It is also possible to work directly with units at a lower level, for
example, to create custom units::
@@ -74,8 +74,8 @@ example, to create custom units::
>>> # And do some conversions
>>> q = 42.0 * cms
- >>> q.to(mph)
- <Quantity 0.93951324266284... mi / h>
+ >>> q.to(mph) # doctest: +FLOAT_CMP
+ <Quantity 0.939513242662849 mi / h>
Units that "cancel out" become a special unit called the
"dimensionless unit":
@@ -118,8 +118,8 @@ Format specifiers (like ``0.03f``) in new-style format
strings will used to format the quantity value::
>>> q = 15.1 * u.meter / (32.0 * u.second)
- >>> q
- <Quantity 0.47187... m / s>
+ >>> q # doctest: +FLOAT_CMP
+ <Quantity 0.471875 m / s>
>>> "{0:0.03f}".format(q)
'0.472 m / s'
@@ -127,8 +127,8 @@ The value and unit can also be formatted separately. Format specifiers
used on units can be used to choose the unit formatter::
>>> q = 15.1 * u.meter / (32.0 * u.second)
- >>> q
- <Quantity 0.47187... m / s>
+ >>> q # doctest: +FLOAT_CMP
+ <Quantity 0.471875 m / s>
>>> "{0.value:0.03f} {0.unit:FITS}".format(q)
'0.472 m s-1'
diff --git a/docs/units/quantity.rst b/docs/units/quantity.rst
index 75443b5..b35e75f 100644
--- a/docs/units/quantity.rst
+++ b/docs/units/quantity.rst
@@ -64,16 +64,16 @@ Converting to different units
:meth:`~astropy.units.quantity.Quantity.to` method:
>>> q = 2.3 * u.m / u.s
- >>> q.to(u.km / u.h)
- <Quantity 8.2... km / h>
+ >>> q.to(u.km / u.h) # doctest: +FLOAT_CMP
+ <Quantity 8.28 km / h>
For convenience, the `~astropy.units.quantity.Quantity.si` and
`~astropy.units.quantity.Quantity.cgs` attributes can be used to
convert the |quantity| to base S.I. or c.g.s units:
>>> q = 2.4 * u.m / u.s
- >>> q.si
- <Quantity 2... m / s>
+ >>> q.si # doctest: +FLOAT_CMP
+ <Quantity 2.4 m / s>
>>> q.cgs
<Quantity 240.0 cm / s>
@@ -97,12 +97,12 @@ resulting object **has units of the object on the left**:
>>> 1100.1 * u.m + 13.5 * u.km
<Quantity 14600.1 m>
- >>> 13.5 * u.km + 1100.1 * u.m
- <Quantity 14.600... km>
+ >>> 13.5 * u.km + 1100.1 * u.m # doctest: +FLOAT_CMP
+ <Quantity 14.6001 km>
>>> 1100.1 * u.m - 13.5 * u.km
<Quantity -12399.9 m>
- >>> 13.5 * u.km - 1100.1 * u.m
- <Quantity 12.399... km>
+ >>> 13.5 * u.km - 1100.1 * u.m # doctest: +FLOAT_CMP
+ <Quantity 12.3999 km>
Addition and subtraction is not supported between |quantity| objects and basic
numeric types:
@@ -123,36 +123,36 @@ Multiplication and division is supported between |quantity| objects with any
units, and with numeric types. For these operations between objects with
equivalent units, the **resulting object has composite units**:
- >>> 1.1 * u.m * 140.3 * u.cm
- <Quantity 154.33... cm m>
- >>> 140.3 * u.cm * 1.1 * u.m
- <Quantity 154.33... cm m>
- >>> 1. * u.m / (20. * u.cm)
- <Quantity 0.05... m / cm>
+ >>> 1.1 * u.m * 140.3 * u.cm # doctest: +FLOAT_CMP
+ <Quantity 154.33 cm m>
+ >>> 140.3 * u.cm * 1.1 * u.m # doctest: +FLOAT_CMP
+ <Quantity 154.33 cm m>
+ >>> 1. * u.m / (20. * u.cm) # doctest: +FLOAT_CMP
+ <Quantity 0.05 m / cm>
>>> 20. * u.cm / (1. * u.m)
<Quantity 20.0 cm / m>
For multiplication, you can change how to represent the resulting object by
using the :meth:`~astropy.units.quantity.Quantity.to` method:
- >>> (1.1 * u.m * 140.3 * u.cm).to(u.m**2)
- <Quantity 1.5433... m2>
- >>> (1.1 * u.m * 140.3 * u.cm).to(u.cm**2)
- <Quantity 15433.0... cm2>
+ >>> (1.1 * u.m * 140.3 * u.cm).to(u.m**2) # doctest: +FLOAT_CMP
+ <Quantity 1.5433000000000001 m2>
+ >>> (1.1 * u.m * 140.3 * u.cm).to(u.cm**2) # doctest: +FLOAT_CMP
+ <Quantity 15433.000000000002 cm2>
For division, if the units are equivalent, you may want to make the resulting
object dimensionless by reducing the units. To do this, use the
:meth:`~astropy.units.quantity.Quantity.decompose()` method:
- >>> (20. * u.cm / (1. * u.m)).decompose()
- <Quantity 0.2...>
+ >>> (20. * u.cm / (1. * u.m)).decompose() # doctest: +FLOAT_CMP
+ <Quantity 0.2>
This method is also useful for more complicated arithmetic:
- >>> 15. * u.kg * 32. * u.cm * 15 * u.m / (11. * u.s * 1914.15 * u.ms)
- <Quantity 0.341950972... cm kg m / (ms s)>
- >>> (15. * u.kg * 32. * u.cm * 15 * u.m / (11. * u.s * 1914.15 * u.ms)).decompose()
- <Quantity 3.41950972... kg m2 / s2>
+ >>> 15. * u.kg * 32. * u.cm * 15 * u.m / (11. * u.s * 1914.15 * u.ms) # doctest: +FLOAT_CMP
+ <Quantity 0.3419509727792778 cm kg m / (ms s)>
+ >>> (15. * u.kg * 32. * u.cm * 15 * u.m / (11. * u.s * 1914.15 * u.ms)).decompose() # doctest: +FLOAT_CMP
+ <Quantity 3.4195097277927777 kg m2 / s2>
Numpy functions
@@ -166,22 +166,22 @@ quantities:
>>> q = np.array([1., 2., 3., 4.]) * u.m / u.s
>>> np.mean(q)
<Quantity 2.5 m / s>
- >>> np.std(q)
- <Quantity 1.118033... m / s>
+ >>> np.std(q) # doctest: +FLOAT_CMP
+ <Quantity 1.118033988749895 m / s>
including functions that only accept specific units such as angles:
>>> q = 30. * u.deg
- >>> np.sin(q)
- <Quantity 0.4999999...>
+ >>> np.sin(q) # doctest: +FLOAT_CMP
+ <Quantity 0.49999999999999994>
or dimensionless quantities:
>>> from astropy.constants import h, k_B
>>> nu = 3 * u.GHz
>>> T = 30 * u.K
- >>> np.exp(-h * nu / (k_B * T))
- <Quantity 0.99521225...>
+ >>> np.exp(-h * nu / (k_B * T)) # doctest: +FLOAT_CMP
+ <Quantity 0.995212254618668>
(see `Dimensionless quantities`_ for more details).
@@ -194,8 +194,8 @@ or if they are passed to a Numpy function that takes dimensionless
quantities, the units are simplified so that the quantity is
dimensionless and scale-free. For example:
- >>> 1. + 1. * u.m / u.km
- <Quantity 1.00...>
+ >>> 1. + 1. * u.m / u.km # doctest: +FLOAT_CMP
+ <Quantity 1.001>
which is different from:
@@ -219,15 +219,15 @@ dimensionless quantities:
>>> nu = 3 * u.GHz
>>> T = 30 * u.K
- >>> np.exp(- h * nu / (k_B * T))
- <Quantity 0.99521225...>
+ >>> np.exp(- h * nu / (k_B * T)) # doctest: +FLOAT_CMP
+ <Quantity 0.995212254618668>
The result is independent from the units the different quantities were specified in:
>>> nu = 3.e9 * u.Hz
>>> T = 30 * u.K
- >>> np.exp(- h * nu / (k_B * T))
- <Quantity 0.99521225...>
+ >>> np.exp(- h * nu / (k_B * T)) # doctest: +FLOAT_CMP
+ <Quantity 0.995212254618668>
Converting to plain Python scalars
----------------------------------
diff --git a/docs/warnings.rst b/docs/warnings.rst
index f8dcccf..272a3d0 100644
--- a/docs/warnings.rst
+++ b/docs/warnings.rst
@@ -51,5 +51,6 @@ wish to *squelch* deprecation warnings, you can start Python with
also an Astropy-specific `~astropy.utils.exceptions.AstropyDeprecationWarning`
which can be used to disable deprecation warnings from Astropy only.
-See http://docs.python.org/using/cmdline.html#cmdoption-unittest-discover-W for
-more information on the -W argument.
+See `the CPython documentation
+<http://docs.python.org/2/using/cmdline.html#cmdoption-W>`__ for more
+information on the -W argument.
diff --git a/setup.py b/setup.py
index b168331..0b37934 100755
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,7 @@ from astropy_helpers.version_helpers import generate_version_py
NAME = 'astropy'
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
-VERSION = '1.0.1'
+VERSION = '1.0.2'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-astro/packages/python-astropy.git
More information about the debian-science-commits
mailing list