[h5py] 78/455: Release 0.2.1
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Thu Jul 2 18:19:19 UTC 2015
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to annotated tag 1.3.0
in repository h5py.
commit 90a3e8bd6aa11bc339c124c072a8922b6a711e85
Author: andrewcollette <andrew.collette at gmail.com>
Date: Sat Jul 19 05:29:21 2008 +0000
Release 0.2.1
---
CHANGES.txt | 36 +++++
MANIFEST.in | 1 +
README.txt | 2 +-
h5py/h5.pyx | 4 +-
h5py/highlevel.py | 49 +++---
h5py/tests/__init__.py | 7 +-
h5py/tests/common.py | 5 +-
h5py/tests/test_highlevel.py | 365 ++++++++++++++++++++++++++++++++++++++++++-
h5py/utils.pyx | 2 -
h5py/utils_hl.py | 3 +
setup.py | 4 +-
11 files changed, 445 insertions(+), 33 deletions(-)
diff --git a/CHANGES.txt b/CHANGES.txt
new file mode 100644
index 0000000..e98121d
--- /dev/null
+++ b/CHANGES.txt
@@ -0,0 +1,36 @@
+Version 0.2.1
+=============
+
+General:
+--------
+ - Moved version info into h5py.h5 module
+ - Stub added for 1.8 API conditional compilation
+ - Rewrote unit test framework to correctly use unittest methods
+ - Remove VERSION.txt, add CHANGES.txt
+
+Low-level:
+----------
+ - Normalize keyword naming for property list arguments
+ - Change h5g.GroupID Python extensions to special methods
+ - Additional property list methods
+ - New module h5fd for constants used by property lists
+
+High-level
+----------
+ - Fix Dataset __setitem__ goof (thanks D. Brooks)
+ - Fix issues with singlet dimensions
+ - Fix broken hardlinking in Group __setitem__
+ - Fix off-by-one bug in extended slicing
+ - Add context manager methods to File object (for "with" statement)
+ - Replace File "noclobber" keyword with new mode "w-"
+ - Change default File mode to "a"
+ - Missing self argument to Datatype constructor
+ - Add unit tests for highlevel module
+
+Version 0.2.0
+=============
+
+- Switch to object-based identifiers
+- Officially support compilation with HDF5 1.8.X
+- Redesign high-level component
+- Add more Numpy-like slicing behavior
diff --git a/MANIFEST.in b/MANIFEST.in
index 680f960..aa71e68 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,7 @@
include MANIFEST.in
include LICENSE.txt
include README.txt
+include CHANGES.txt
include docs.cfg
recursive-include h5py *.py *.pyx *.pxd *.pxi *.h *.c *.hdf5
recursive-include licenses *
diff --git a/README.txt b/README.txt
index 1b492ae..31958df 100644
--- a/README.txt
+++ b/README.txt
@@ -5,7 +5,7 @@ Copyright (c) 2008 Andrew Collette
* http://h5py.alfven.org
* mail: "h5py" at the domain "alfven dot org"
-**Version 0.2.0**
+**Version 0.2.1**
* `Introduction`_
* `Features`_
diff --git a/h5py/h5.pyx b/h5py/h5.pyx
index 8ef4afb..9eb055a 100644
--- a/h5py/h5.pyx
+++ b/h5py/h5.pyx
@@ -110,8 +110,8 @@ cdef class ObjectID:
return H5Iget_type(self.id) != H5I_BADID
def __nonzero__(self):
- """ Truth value for object identifiers (like _valid)"""
- return H5Iget_type(self.id) != H5I_BADID
+ """ Truth value for object identifiers (like _valid) """
+ return self._valid
def __cinit__(self, hid_t id_):
""" Object init; simply records the given ID. """
diff --git a/h5py/highlevel.py b/h5py/highlevel.py
index 33c20ea..a81c362 100644
--- a/h5py/highlevel.py
+++ b/h5py/highlevel.py
@@ -80,6 +80,9 @@ class HLObject(object):
def __repr__(self):
return str(self)
+ def __nonzero__(self):
+ return self.id.__nonzero__()
+
class Group(HLObject):
""" Represents an HDF5 group.
@@ -147,7 +150,7 @@ class Group(HLObject):
This limitation is intentional, and may be lifted in the future.
"""
if isinstance(obj, Group) or isinstance(obj, Dataset) or isinstance(obj, Datatype):
- self.id.link(name, h5i.get_name(obj.id), link_type=h5g.LINK_HARD)
+ self.id.link(h5i.get_name(obj.id), name, link_type=h5g.LINK_HARD)
elif isinstance(obj, numpy.dtype):
htype = h5t.py_create(obj)
@@ -166,8 +169,7 @@ class Group(HLObject):
info = self.id.get_objinfo(name)
if info.type == h5g.DATASET:
- dset = Dataset(self, name)
- return dset
+ return Dataset(self, name)
elif info.type == h5g.GROUP:
return Group(self, name)
@@ -205,15 +207,15 @@ class Group(HLObject):
"""
return Group(self, name, create=True)
- def create_dataset(self, name, **kwds):
- """ Create and return a dataset. Keyword arguments:
+ def create_dataset(self, name, *args, **kwds):
+ """ Create and return a dataset. Arguments, in order:
You must specify either "data", or both "type" and "shape".
data: Numpy array from which the dataset is constructed
- type: Numpy dtype giving the datatype
+ dtype: Numpy dtype giving the datatype
shape: Numpy-style shape tuple giving the dataspace
- Additional options (* is default):
+ Additional keyword options (* is default):
chunks: Tuple of chunk dimensions or None*
compression: DEFLATE (gzip) compression level, int or None*
shuffle: Use the shuffle filter (needs compression) T/F*
@@ -248,10 +250,9 @@ class File(Group):
""" Represents an HDF5 file on disk.
- File(name, mode='r', noclobber=False)
+ File(name, mode='a')
- Created with standard Python syntax File(name, mode).
- Legal modes: r, r+, w, w-, a (default 'r')
+ Legal modes: r, r+, w, w-, a (default)
File objects inherit from Group objects; Group-like methods all
operate on the HDF5 root group ('/'). Like Python file objects, you
@@ -264,6 +265,7 @@ class File(Group):
This object supports the Python context manager protocol, when used
in a "with" block:
+
with File(...) as f:
... do stuff with f...
# end block
@@ -279,26 +281,31 @@ class File(Group):
# --- Public interface (File) ---------------------------------------------
- def __init__(self, name, mode='r'):
+ def __init__(self, name, mode='a'):
""" Create a new file object.
Valid modes (like Python's file() modes) are:
- - 'r' Readonly, file must exist
- - 'r+' Read/write, file must exist
- - 'w' Create file, truncate if exists
- - 'w-' Create file, fail if exists
- - 'a' Read/write, file must exist (='r+')
+ - r Readonly, file must exist
+ - r+ Read/write, file must exist
+ - w Create file, truncate if exists
+ - w- Create file, fail if exists
+ - a Read/write if exists, create otherwise (default)
"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_fclose_degree(h5f.CLOSE_STRONG)
if mode == 'r':
self.fid = h5f.open(name, h5f.ACC_RDONLY, fapl=plist)
- elif mode == 'r+' or mode == 'a':
+ elif mode == 'r+':
self.fid = h5f.open(name, h5f.ACC_RDWR, fapl=plist)
elif mode == 'w-':
self.fid = h5f.create(name, h5f.ACC_EXCL, fapl=plist)
elif mode == 'w':
self.fid = h5f.create(name, h5f.ACC_TRUNC, fapl=plist)
+ elif mode == 'a':
+ if not os.path.exists(name):
+ self.fid = h5f.create(name, h5f.ACC_EXCL, fapl=plist)
+ else:
+ self.fid = h5f.open(name, h5f.ACC_RDWR, fapl=plist)
else:
raise ValueError("Invalid mode; must be one of r, r+, w, w-, a")
@@ -435,7 +442,7 @@ class Dataset(HLObject):
raise ValueError('You cannot specify keywords when opening a dataset.')
self.id = h5d.open(group.id, name)
else:
- if ((data is None) and not (shape and dtype)) or \
+ if ((data is None) and (shape is None and dtype is None)) or \
((data is not None) and (shape or dtype)):
raise ValueError("Either data or both shape and dtype must be specified.")
@@ -522,8 +529,8 @@ class Dataset(HLObject):
if len(names) == 1:
# Match Numpy convention for recarray indexing
- return arr[names[0]]
- return arr
+ return arr[names[0]].squeeze()
+ return arr.squeeze()
def __setitem__(self, args, val):
""" Write to the HDF5 dataset from an Numpy array. The shape of the
@@ -538,7 +545,7 @@ class Dataset(HLObject):
if count != val.shape:
# Allow assignments (1,10) => (10,)
if numpy.product(count) != numpy.product(val.shape):
- raise ValueError("Selection shape (%s) must match target shape (%s)" % (str(count), str(val.shape)))
+ raise ValueError("Selection (%s) must be compatible with target (%s)" % (str(count), str(val.shape)))
else:
val = val.reshape(count)
diff --git a/h5py/tests/__init__.py b/h5py/tests/__init__.py
index dcb61b5..cdafb39 100644
--- a/h5py/tests/__init__.py
+++ b/h5py/tests/__init__.py
@@ -14,13 +14,16 @@ import unittest
import sys
import test_h5a, test_h5d, test_h5f, \
test_h5g, test_h5i, test_h5p, \
- test_h5s, test_h5t, test_h5
+ test_h5s, test_h5t, test_h5, \
+ test_highlevel
from h5py import *
TEST_CASES = (test_h5a.TestH5A, test_h5d.TestH5D, test_h5f.TestH5F,
test_h5g.TestH5G, test_h5i.TestH5I, test_h5p.TestH5P,
- test_h5s.TestH5S, test_h5t.TestH5T, test_h5.TestH5)
+ test_h5s.TestH5S, test_h5t.TestH5T, test_h5.TestH5,
+ test_highlevel.TestFile, test_highlevel.TestDataset,
+ test_highlevel.TestGroup)
def buildsuite(cases):
diff --git a/h5py/tests/common.py b/h5py/tests/common.py
index ee34398..020ce9f 100644
--- a/h5py/tests/common.py
+++ b/h5py/tests/common.py
@@ -20,6 +20,9 @@ import h5py
DATADIR = join(dirname(h5py.__file__), 'tests/data')
+def getfullpath(name):
+ return join(DATADIR, name)
+
class TestBase(unittest.TestCase):
"""
@@ -31,7 +34,7 @@ class TestBase(unittest.TestCase):
def __init__(self, *args, **kwds):
unittest.TestCase.__init__(self, *args, **kwds)
- self.HDFNAME = join(DATADIR, self.HDFNAME) # resolve absolute location
+ self.HDFNAME = getfullpath(self.HDFNAME) # resolve absolute location
def setUp(self):
newname = tempfile.mktemp('.hdf5')
diff --git a/h5py/tests/test_highlevel.py b/h5py/tests/test_highlevel.py
index 3f672ee..6a0b774 100644
--- a/h5py/tests/test_highlevel.py
+++ b/h5py/tests/test_highlevel.py
@@ -9,5 +9,368 @@
# $Date$
#
#-
+from __future__ import with_statement
+
+import unittest
+import tempfile
+import shutil
+import os
+import numpy
+
+from h5py.highlevel import *
+from h5py import *
+from h5py.h5 import H5Error
+from common import getfullpath
+
+class SliceFreezer(object):
+ def __getitem__(self, args):
+ return args
+
+
+HDFNAME = getfullpath("smpl_compound_chunked.hdf5")
+
+TYPES1 = \
+ [ "<i1", "<i2", "<i4", "<i8", ">i1", ">i2", ">i4", ">i8", "|i1", "|u1",
+ "<u1", "<u2", "<u4", "<u8", ">u1", ">u2", ">u4", ">u8",
+ "<f4", "<f8", ">f4", ">f8", "<c8", "<c16", ">c8", ">c16"]
+
+TYPES2 = ["|S1", "|S2", "|S33", "|V1", "|V2", "|V33"]
+
+TYPES3 = [[(x, numpy.dtype(x)) for x in TYPES1]]
+
+TYPES = TYPES1 + TYPES2 + TYPES3
+
+SHAPES = [(), (1,), (10,5), (1,10), (10,1), (100,1,100), (51,2,1025)]
+
+
+
+class TestFile(unittest.TestCase):
+
+ def setUp(self):
+ newname = tempfile.mktemp('.hdf5')
+ shutil.copy(HDFNAME, newname)
+ self.fname = newname
+
+ def tearDown(self):
+ os.unlink(self.fname)
+
+ def test_File_init_r(self):
+ with File(self.fname, 'r') as f:
+ self.assert_(isinstance(f["CompoundChunked"], Dataset))
+ self.assertRaises(H5Error, f.create_group, "FooBar")
+ self.assertEqual(f.mode, 'r')
+ self.assertEqual(f.name, self.fname)
+
+ def test_File_init_rp(self):
+ with File(self.fname, 'r+') as f:
+ self.assert_(isinstance(f["CompoundChunked"], Dataset))
+ f.create_group("FooBar")
+ self.assert_(isinstance(f["FooBar"], Group))
+ self.assertEqual(f.mode, 'r+')
+ self.assertEqual(f.name, self.fname)
+
+ def test_File_init_a(self):
+ with File(self.fname, 'a') as f:
+ self.assert_(isinstance(f["CompoundChunked"], Dataset))
+ f.create_group("FooBar")
+ self.assert_(isinstance(f["FooBar"], Group))
+ self.assertEqual(f.mode, 'a')
+ self.assertEqual(f.name, self.fname)
+
+ def test_File_init_w(self):
+ with File(self.fname, 'w') as f:
+ self.assert_("CompoundChunked" not in f)
+ f.create_group("FooBar")
+ self.assert_(isinstance(f["FooBar"], Group))
+ self.assertEqual(f.mode, 'w')
+ self.assertEqual(f.name, self.fname)
+
+ def test_File_init_wm(self):
+ self.assertRaises(H5Error, File, self.fname, 'w-')
+ tmpname = tempfile.mktemp('.hdf5')
+ f = File(tmpname,'w-')
+ f.close()
+ os.unlink(tmpname)
+
+ def test_File_close(self):
+ f = File(self.fname, 'r')
+ self.assert_(f.id)
+ f.close()
+ self.assert_(not f.id)
+
+ def test_File_flush(self):
+ with File(self.fname) as f:
+ f.flush()
+
+ def test_File_str(self):
+ f = File(self.fname, 'r')
+ str(f)
+ f.close()
+ str(f)
+
+ def test_AttributeManager(self):
+
+ shapes = ((), (1,), (1,10), (10,1), (10,1,10), (9,9))
+ attrs = [1, 2.0, 0, "Hello", " 129887A!!\t\t9(){; \t "]
+ for shape in shapes:
+ for dt in TYPES1:
+ attrs += [numpy.arange(numpy.product(shape), dtype=dt).reshape(shape)]
+
+ # Turn the list into a dictionary. Names are str(indices).
+ attrs = dict( [(str(idx), attr) for idx, attr in enumerate(attrs)] )
+
+ with File(self.fname, 'w') as f:
+
+ # Tests __setitem__ and __getitem__
+ grp = f.create_group("Grp")
+ for name, attr in attrs.iteritems():
+ grp.attrs[name] = attr
+ self.assert_(numpy.all(grp.attrs[name] == attr))
+
+ # Test __str__ with attrs present
+ str(grp.attrs)
+
+ # Tests __iter__ for name comparison
+ self.assertEqual(set(grp.attrs), set(attrs))
+
+ # Tests iteritems()
+ hattrs = dict(grp.attrs.iteritems())
+ self.assertEqual(set(hattrs), set(attrs)) # check names
+ for name in attrs:
+ self.assert_(numpy.all(attrs[name] == hattrs[name])) # check vals
+
+ # Test __len__
+ self.assertEqual(len(grp.attrs), len(attrs))
+
+ # Tests __contains__ and __delitem__
+ for name in list(grp.attrs):
+ self.assert_(name in grp.attrs)
+ del grp.attrs[name]
+ self.assert_(not name in grp.attrs)
+
+ self.assertEqual(len(grp.attrs), 0)
+
+ # Test on closed object
+ grp.id._close()
+ str(grp.attrs)
+
+class TestDataset(unittest.TestCase):
+
+ def setUp(self):
+
+ self.fname = tempfile.mktemp('.hdf5')
+ self.f = File(self.fname, 'w')
+
+ def tearDown(self):
+ self.f.close()
+ os.unlink(self.fname)
+
+ def test_Dataset_create_simple(self):
+
+ print ''
+
+ for shape in SHAPES:
+ for dt in TYPES:
+ print " Creating %.20s %.40s" % (shape, dt)
+ dt = numpy.dtype(dt)
+ d = Dataset(self.f, "NewDataset", dtype=dt, shape=shape)
+ self.assertEqual(d.shape, shape)
+ self.assertEqual(d.dtype, dt)
+ del self.f["NewDataset"]
+ if 'V' not in dt.kind:
+ srcarr = numpy.ones(shape, dtype=dt)
+ d = Dataset(self.f, "NewDataset", data=srcarr)
+ self.assertEqual(d.shape, shape)
+ self.assertEqual(d.dtype, dt)
+ self.assert_(numpy.all(d.value == srcarr))
+ del self.f["NewDataset"]
+
+ def test_Dataset_slicing(self):
+
+ print ''
+
+ s = SliceFreezer()
+ slices = [s[0,0,0], s[0,0,:], s[0,:,0], s[0,:,:]]
+ slices += [ s[9,9,49], s[9,:,49], s[9,:,:] ]
+ slices += [ s[0, ..., 49], s[...], s[..., 49], s[9,...] ]
+ slices += [ s[0:7:2,0:9:3,15:43:5], s[2:8:2,...] ]
+
+
+ for dt in TYPES1:
+
+ srcarr = numpy.arange(10*10*50, dtype=dt).reshape(10,10,50)
+ srcarr = srcarr + numpy.sin(srcarr)
+
+
+ fname = tempfile.mktemp('.hdf5')
+ f = File(fname, 'w')
+ try:
+ d = Dataset(f, "NewDataset", data=srcarr)
+ self.assertEqual(d.shape, srcarr.shape)
+ self.assertEqual(d.dtype, srcarr.dtype)
+ for argtpl in slices:
+ # Test read
+ print " Checking read %.20s %s" % (dt, argtpl,)
+ hresult = d[argtpl]
+ nresult = srcarr[argtpl]
+ self.assertEqual(hresult.shape, nresult.shape)
+ self.assertEqual(hresult.dtype, nresult.dtype)
+ self.assert_(numpy.all(hresult == nresult))
+
+ del f["NewDataset"]
+ d = Dataset(f, "NewDataset", data=srcarr)
+ for argtpl in slices:
+ # Test assignment
+ print " Checking write %.20s %s" % (dt, argtpl,)
+ srcarr[argtpl] = numpy.cos(srcarr[argtpl])
+ d[argtpl] = srcarr[argtpl]
+ self.assert_(numpy.all(d.value == srcarr))
+
+ finally:
+ f.close()
+ os.unlink(fname)
+
+class TestGroup(unittest.TestCase):
+
+ def setUp(self):
+
+ self.fname = tempfile.mktemp('.hdf5')
+ self.f = File(self.fname, 'w')
+
+ def tearDown(self):
+ self.f.close()
+ os.unlink(self.fname)
+
+ def test_Group_init(self):
+
+ grp = Group(self.f, "NewGroup", create=True)
+ self.assert_("NewGroup" in self.f)
+ grp2 = Group(self.f, "NewGroup")
+
+ self.assertEqual(grp.name, "/NewGroup")
+
+ def test_Group_create_group(self):
+
+ grp = self.f.create_group("NewGroup")
+ self.assert_("NewGroup" in self.f)
+ self.assertRaises(H5Error, self.f.create_group, "NewGroup")
+
+ def test_Group_create_dataset(self):
+
+ ds = self.f.create_dataset("Dataset", shape=(10,10), dtype='<i4')
+ self.assert_(isinstance(ds, Dataset))
+ self.assert_("Dataset" in self.f)
+
+ def test_Group_special(self):
+
+ subgroups = ["Name1", " Name 1231987&*@&^*&#W 2 \t\t ", "name3",
+ "14", "!"]
+
+ for name in subgroups:
+ self.f.create_group(name)
+
+ # __len__
+ self.assertEqual(len(self.f), len(subgroups))
+
+ # __contains__
+ for name in subgroups:
+ self.assert_(name in self.f)
+
+ # __iter__
+ self.assertEqual(set(self.f), set(subgroups))
+
+ # iteritems()
+ for name, obj in self.f.iteritems():
+ self.assert_(name in subgroups)
+ self.assert_(isinstance(obj, Group))
+
+ # __delitem__
+ for name in subgroups:
+ self.assert_(name in self.f)
+ del self.f[name]
+ self.assert_(not name in self.f)
+
+ self.assertEqual(len(self.f), 0)
+
+ # __str__
+ grp = self.f.create_group("Foobar")
+ str(grp)
+ grp.id._close()
+ str(grp)
+
+ def test_Group_setgetitem(self):
+ # Also tests named types
+
+ print ''
+ for shape in SHAPES:
+ for dt in TYPES1:
+
+ print " Assigning %s %s" % (dt, shape)
+
+ # test arbitrary datasets
+ dt_obj = numpy.dtype(dt)
+ arr = numpy.ones(shape, dtype=dt_obj)
+ self.f["DS"] = arr
+ harr = self.f["DS"]
+ self.assert_(isinstance(harr, Dataset))
+ self.assertEqual(harr.shape, shape)
+ self.assertEqual(harr.dtype, dt_obj)
+ self.assert_(numpy.all(harr.value == arr))
+
+ # test named types
+ self.f["TYPE"] = dt_obj
+ htype = self.f["TYPE"]
+ self.assert_(isinstance(htype, Datatype))
+ self.assertEqual(htype.dtype, dt_obj)
+
+ del self.f["DS"]
+ del self.f["TYPE"]
+
+ # Test creation of array from sequence
+ seq = [1,-42,2,3,4,5,10]
+ self.f["DS"] = seq
+ harr = self.f["DS"]
+ self.assert_(numpy.all(harr.value == numpy.array(seq)))
+ del self.f["DS"]
+
+ # test scalar -> 0-d dataset
+ self.f["DS"] = 42
+ harr = self.f["DS"]
+ self.assert_(isinstance(harr, Dataset))
+ self.assertEqual(harr.shape, ())
+ self.assertEqual(harr.value, 42)
+
+ # test hard linking
+ self.f["DS1"] = self.f["DS"]
+ info1 = self.f.id.get_objinfo("DS")
+ info2 = self.f.id.get_objinfo("DS1")
+ self.assertEqual(info1.fileno, info2.fileno)
+ self.assertEqual(info1.objno, info2.objno)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-# empty for now
diff --git a/h5py/utils.pyx b/h5py/utils.pyx
index 9b98380..9c964ac 100644
--- a/h5py/utils.pyx
+++ b/h5py/utils.pyx
@@ -11,8 +11,6 @@
#-
from python cimport PyTuple_Check, PyList_Check, PyErr_SetString, Py_INCREF
-from numpy cimport ndarray, NPY_WRITEABLE, NPY_ALIGNED, \
- NPY_C_CONTIGUOUS, PyArray_FROM_OF
cdef int require_tuple(object tpl, int none_allowed, int size, char* name) except -1:
# Ensure that tpl is in fact a tuple, or None if none_allowed is nonzero.
diff --git a/h5py/utils_hl.py b/h5py/utils_hl.py
index 412c61b..acdfa01 100644
--- a/h5py/utils_hl.py
+++ b/h5py/utils_hl.py
@@ -3,6 +3,7 @@
Utility functions for high-level modules.
"""
from posixpath import basename, normpath
+import numpy
def hbasename(name):
""" Basename function with more readable handling of trailing slashes"""
@@ -97,6 +98,8 @@ def slicer(shape, args):
if arg.stop < 0:
raise ValueError("Negative dimensions are not allowed")
cc = (arg.stop-ss)/st
+ if ((arg.stop-ss) % st) != 0:
+ cc += 1 # Be careful with integer division!
if cc == 0:
raise ValueError("Zero-length selections are not allowed")
diff --git a/setup.py b/setup.py
index fb512f8..4f90a5f 100644
--- a/setup.py
+++ b/setup.py
@@ -50,7 +50,7 @@
# === Global constants ========================================================
NAME = 'h5py'
-VERSION = '0.2.0'
+VERSION = '0.2.1'
MIN_PYREX = '0.9.8.4' # for compile_multiple
MIN_NUMPY = '1.0.3'
@@ -71,8 +71,6 @@ from distutils.extension import Extension
import os
import sys
import shutil
-import subprocess
-import re
# Distutils tries to use hard links when building source distributions, which
# fails under a wide variety of network filesystems under Linux.
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/h5py.git
More information about the debian-science-commits
mailing list