[h5py] 151/455: More test suite additions

Ghislain Vaillant ghisvail-guest at moszumanska.debian.org
Thu Jul 2 18:19:27 UTC 2015


This is an automated email from the git hooks/post-receive script.

ghisvail-guest pushed a commit to annotated tag 1.3.0
in repository h5py.

commit 15871c79a757f8e3f6cb3cb8d2a8e34e88f2aead
Author: andrewcollette <andrew.collette at gmail.com>
Date:   Sat Nov 1 20:13:30 2008 +0000

    More test suite additions
---
 docs/source/index.rst        |   1 +
 h5py/h5g.pyx                 |   2 +-
 h5py/h5o.pyx                 |  31 ++--
 h5py/h5p_fcid.pxi            |  38 ----
 h5py/highlevel.py            |  20 +-
 h5py/tests/__init__.py       |   2 +
 h5py/tests/common.py         |  16 ++
 h5py/tests/test_h5p.py       |  10 -
 h5py/tests/test_highlevel.py | 424 +++++++++++++++++++++++++++++++++----------
 h5py/tests/testfiles.py      |  53 +++---
 10 files changed, 410 insertions(+), 187 deletions(-)

diff --git a/docs/source/index.rst b/docs/source/index.rst
index ba26fd8..e4c5324 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -39,6 +39,7 @@ Contents:
     build
     quick
     datasets
+    hl
     threads
     licenses
 
diff --git a/h5py/h5g.pyx b/h5py/h5g.pyx
index c110c7b..239c2bf 100644
--- a/h5py/h5g.pyx
+++ b/h5py/h5g.pyx
@@ -128,7 +128,7 @@ def open(ObjectID loc not None, char* name):
 IF H5PY_18API:
     @sync
     def create(ObjectID loc not None, char* name, PropID lcpl=None,
-               PropID gcpl=None, PropID gapl=None):
+               PropID gcpl=None):
         """(ObjectID loc, STRING name, PropLCID lcpl=None, PropGCID gcpl=None)
         => GroupID
 
diff --git a/h5py/h5o.pyx b/h5py/h5o.pyx
index 73420f1..6dbd188 100644
--- a/h5py/h5o.pyx
+++ b/h5py/h5o.pyx
@@ -187,7 +187,7 @@ def link(ObjectID obj not None, GroupID loc not None, char* name,
     PropID lapl=None)
 
     Create a new hard link to an object.  Useful for objects created with
-    h5g.create_anon or h5d.create_anon.
+    h5g.create_anon() or h5d.create_anon().
     """
     H5Olink(obj.id, loc.id, name, pdefault(lcpl), pdefault(lapl))
 
@@ -264,26 +264,33 @@ cdef class _ObjectVisitor:
 cdef herr_t cb_obj_iterate(hid_t obj, char* name, H5O_info_t *info, void* data) except 2:
 
     cdef _ObjectVisitor visit
-    visit = <_ObjectVisitor>data
 
-    visit.objinfo.infostruct = info[0]
+    # HDF5 doesn't respect callback return for ".", so skip it
+    if strcmp(name, ".") == 0:
+        return 0
 
+    visit = <_ObjectVisitor>data
+    visit.objinfo.infostruct = info[0]
     visit.retval = visit.func(name, visit.objinfo)
 
-    if (visit.retval is None) or (not visit.retval):
-        return 0
-    return 1
+    if visit.retval is not None:
+        return 1
+    return 0
 
 cdef herr_t cb_obj_simple(hid_t obj, char* name, H5O_info_t *info, void* data) except 2:
 
     cdef _ObjectVisitor visit
-    visit = <_ObjectVisitor>data
 
+    # HDF5 doesn't respect callback return for ".", so skip it
+    if strcmp(name, ".") == 0:
+        return 0
+
+    visit = <_ObjectVisitor>data
     visit.retval = visit.func(name)
 
-    if (visit.retval is None) or (not visit.retval):
-        return 0
-    return 1
+    if visit.retval is not None:
+        return 1
+    return 0
 
 @sync
 def visit(ObjectID loc not None, object func, *,
@@ -300,8 +307,8 @@ def visit(ObjectID loc not None, object func, *,
     
         func(STRING name, ObjInfo info) => Result
 
-    Returning None or a logical False continues iteration; returning
-    anything else aborts iteration and returns that value.  Keywords:
+    Returning None continues iteration; returning anything else aborts
+    iteration and returns that value.  Keywords:
 
     BOOL info (False)
         Callback is func(STRING, Objinfo)
diff --git a/h5py/h5p_fcid.pxi b/h5py/h5p_fcid.pxi
index 0a7df58..87cc6bf 100644
--- a/h5py/h5p_fcid.pxi
+++ b/h5py/h5p_fcid.pxi
@@ -81,41 +81,3 @@ cdef class PropFCID(PropCreateID):
         H5Pget_sizes(self.id, &addr, &size)
         return (addr, size)
 
-    @sync
-    def set_sym_k(self, unsigned int ik, unsigned int lk):
-        """(INT ik, INT lk)
-
-        Symbol table node settings.  See the HDF5 docs for H5Pset_sym_k.
-        """
-        H5Pset_sym_k(self.id, ik, lk)
-
-    @sync
-    def get_sym_k(self):
-        """() => TUPLE settings
-
-        Determine symbol table node settings.  See the HDF5 docs for
-        H5Pget_sym_k.  Return is a 2-tuple (ik, lk).
-        """
-        cdef unsigned int ik
-        cdef unsigned int lk
-        H5Pget_sym_k(self.id, &ik, &lk)
-        return (ik, lk)
-
-    @sync
-    def set_istore_k(self, unsigned int ik):
-        """(UINT ik)
-
-        See hdf5 docs for H5Pset_istore_k.
-        """
-        H5Pset_istore_k(self.id, ik)
-    
-    @sync
-    def get_istore_k(self):
-        """() => UINT ik
-
-        See HDF5 docs for H5Pget_istore_k
-        """
-        cdef unsigned int ik
-        H5Pget_istore_k(self.id, &ik)
-        return ik
-
diff --git a/h5py/highlevel.py b/h5py/highlevel.py
index 2a0ab6a..947731c 100644
--- a/h5py/highlevel.py
+++ b/h5py/highlevel.py
@@ -111,7 +111,7 @@ class HLObject(LockableObject):
             return self.id == other.id
         return False
 
-class DictCompat(object):
+class _DictCompat(object):
 
     """
         Contains dictionary-style compatibility methods for groups and
@@ -151,7 +151,7 @@ class DictCompat(object):
                 yield (x, self[x])
 
 
-class Group(HLObject, DictCompat):
+class Group(HLObject, _DictCompat):
 
     """ Represents an HDF5 group.
 
@@ -395,7 +395,8 @@ class Group(HLObject, DictCompat):
             func(<member name>) => <None or return value>
 
         Returning None continues iteration, returning anything else stops
-        and immediately returns that value from the visit method.
+        and immediately returns that value from the visit method.  No
+        particular order of iteration within groups is guranteed.
 
         Example:
 
@@ -422,7 +423,8 @@ class Group(HLObject, DictCompat):
             func(<member name>, <object>) => <None or return value>
 
         Returning None continues iteration, returning anything else stops
-        and immediately returns that value from the visit method.
+        and immediately returns that value from the visit method.  No
+        particular order of iteration within groups is guranteed.
 
         Example:
 
@@ -614,7 +616,7 @@ class Dataset(HLObject):
         """Compression level (or None)"""
         filt = self._plist.get_filter_by_id(h5z.FILTER_DEFLATE)
         if filt is not None:
-            return filt[1]
+            return filt[1][0]
         return None
 
     @property
@@ -627,6 +629,12 @@ class Dataset(HLObject):
         """Fletcher32 filter is present (T/F)"""
         return self._plist.get_filter_by_id(h5z.FILTER_FLETCHER32) is not None
         
+    @property
+    def maxshape(self):
+        space = self.id.get_space()
+        dims = space.get_simple_extent_dims(True)
+        return tuple(x if x != h5s.UNLIMITED else None for x in dims)
+
     def __init__(self, group, name,
                     shape=None, dtype=None, data=None,
                     chunks=None, compression=None, shuffle=False,
@@ -861,7 +869,7 @@ class Dataset(HLObject):
             except Exception:
                 return "<Closed HDF5 dataset>"
 
-class AttributeManager(LockableObject, DictCompat):
+class AttributeManager(LockableObject, _DictCompat):
 
     """ Allows dictionary-style access to an HDF5 object's attributes.
 
diff --git a/h5py/tests/__init__.py b/h5py/tests/__init__.py
index 58a71d2..5d3a512 100644
--- a/h5py/tests/__init__.py
+++ b/h5py/tests/__init__.py
@@ -13,6 +13,7 @@
 import unittest
 import sys
 
+import common
 from common import HDF5TestCase
 import  test_h5a, test_h5d, test_h5f,\
         test_h5g, test_h5i, test_h5p,\
@@ -69,6 +70,7 @@ def runtests(requests=None, verbosity=1):
 
     if verbosity >= 1:
         print "=== Tested HDF5 %s (%s API) ===" % (version.hdf5_version, version.api_version)
+
     return retval.wasSuccessful()
 
 def autotest():
diff --git a/h5py/tests/common.py b/h5py/tests/common.py
index 4f7dbc4..0e0b704 100644
--- a/h5py/tests/common.py
+++ b/h5py/tests/common.py
@@ -29,6 +29,22 @@ def api_18(func):
         return func
     return None
 
+def api_16(func):
+    """Decorator to run test under HDF5 1.6 only"""
+    if not h5.get_config().API_18:
+        return func
+    return None
+
+test_coverage = set()
+
+def covers(*args):
+    global test_coverage
+    
+    def wrap(meth):
+        test_coverage.update(args)
+        return meth
+
+    return wrap
 
 class HDF5TestCase(unittest.TestCase):
 
diff --git a/h5py/tests/test_h5p.py b/h5py/tests/test_h5p.py
index 4387e70..a06f5df 100644
--- a/h5py/tests/test_h5p.py
+++ b/h5py/tests/test_h5p.py
@@ -53,16 +53,6 @@ class TestFCID(unittest.TestCase):
             self.p.set_sizes(a,s)
             self.assertEqual(self.p.get_sizes(), (a,s))
 
-    def test_sym(self):
-        self.p.set_sym_k(2,3)
-        self.assertEqual(self.p.get_sym_k(), (2,3))
-
-    def test_istore(self):
-        for size in (2,4,8,16):
-            self.p.set_istore_k(size)
-            self.assertEqual(self.p.get_istore_k(), size)
-
-
 class TestFAID(unittest.TestCase):
 
     CLOSE_DEGREES = (h5f.CLOSE_WEAK,
diff --git a/h5py/tests/test_highlevel.py b/h5py/tests/test_highlevel.py
index 41dd640..ea569d7 100644
--- a/h5py/tests/test_highlevel.py
+++ b/h5py/tests/test_highlevel.py
@@ -20,7 +20,8 @@ import numpy
 from h5py.highlevel import *
 from h5py import *
 from h5py.h5 import H5Error
-from common import getfullpath, HDF5TestCase
+from common import getfullpath, HDF5TestCase, api_18, api_16
+import testfiles
 
 class SliceFreezer(object):
     def __getitem__(self, args):
@@ -100,11 +101,13 @@ class TestFile(HDF5TestCase):
         with File(self.fname) as f:
             f.flush()
 
-    def test_File_str(self):
+    def test_File_special(self):
         f = File(self.fname, 'r')
         str(f)
+        repr(f)
         f.close()
         str(f)
+        repr(f)
 
     def test_AttributeManager(self):
 
@@ -162,45 +165,160 @@ class TestDataset(HDF5TestCase):
     def tearDown(self):
         self.f.close()
         os.unlink(self.fname)
+  
+    def test_special(self):
+        """ Check object identity, hashing and string representation """
+        dset1 = self.f.create_dataset('TEST', (10,10), '<i4')
+        dset2 = self.f.create_dataset('TEST2', (10,10), '<i4')
+
+        dset1_p = self.f['TEST']
+        dset2_p = self.f['TEST2']
+
+        self.assert_(dset1 != dset2)
+        self.assert_(dset1 == dset1_p)
+
+        self.assert_(hash(dset1) == hash(dset1_p))
+        self.assert_(hash(dset1) != hash(dset2))
+
+        repr(dset1)
+        str(dset1)
+        dset1.id._close()
+        repr(dset1)
+        str(dset1)
+
+    def test_create(self):
+        """ Test the constructor and public properties """
+
+        def new_dset(*args, **kwds):
+            """ Create a dataset from constructor arguments.
+
+                Return is a 2-tuple (template, dataset).
+            """
+            # "None" means the keyword is "not provided"
+            kwds = dict((x,y) for x,y in kwds.iteritems() if y is not None)
+
+            template = testfiles.Dataset(*args, **kwds)
+
+            self.output(str(template.kwds))
+
+            if 'TEST_DSET' in self.f:
+                del self.f['TEST_DSET']
+            dset = Dataset(self.f, 'TEST_DSET', *args, **kwds)
+
+            return (template, dset)
+
+        def verify_ds(hdf, template):
+            """ Compare a real dataset to a template """
+
+            # Make sure the shape and dtype of the real dataset match the
+            # template's description.
+            if 'shape' in template.kwds:
+                shape = template.kwds['shape']
+            else:
+                shape = template.kwds['data'].shape
+            if 'dtype' in template.kwds:
+                dtype = template.kwds['dtype']
+            else:
+                dtype = template.kwds['data'].dtype
+
+            self.assertEqual(hdf.dtype, dtype, "dtype mismatch %s %s" % (hdf.dtype, dtype))
+            self.assertEqual(hdf.shape, shape, "shape mismatch %s %s" % (hdf.shape, shape))
+
+            # If data was given, make sure it's identical
+            if 'data' in template.kwds:
+                self.assert_(numpy.all(hdf.value == template.kwds['data']))
+
+            # If other keywords were given (chunks, etc), make sure they are
+            # correctly recorded.
+            for name, value in template.kwds.iteritems():
+                if name == 'data':
+                    continue
+                elif value is True:
+                    self.assert_(getattr(hdf, name) is not None,
+                      "True kwd ignored: %s" % name)
+                else:
+                    self.assertEqual(getattr(hdf, name), value,
+                      "kwd mismatch: %s: %s %s" % (name, getattr(hdf, name), value))
+
+            # Make sure all the public properties work
+            for name in ('shape', 'dtype', 'chunks', 'compression', 'shuffle',
+              'fletcher32', 'maxshape'):
+                getattr(hdf, name)
+
+            # If a chunks-requiring keyword is used, make sure it's honored
+            for name in ('chunks', 'compression', 'shuffle', 'fletcher32'):
+                if template.kwds.get(name, False):
+                    self.assert_(hdf.chunks is not None, "chunks missing for arg %s" % name)
+
+        # === Begin constructor test ===
+
+        # Method 1: specify shape and dtype
+        shapes = [(), (1,), (10,5), (1,10), (100,1,100), (51,2,1025),
+                  (2**60, 2**60, 2**34)]
 
-    def test_Dataset_create(self):
-        
-        self.output('')
+        for shape in shapes:
+            for dtype in TYPES1+TYPES1:
+                template, dset = new_dset(shape, dtype)
+                verify_ds(dset, template)
+
+        # Method 2: specify actual data
+        for shape in shapes[0:6]:
+            for dtype in TYPES1:
+                arr = numpy.arange(numpy.product(shape), dtype=dtype).reshape(shape)
+                template, dset = new_dset(data=arr)
+                verify_ds(dset, template)
+
+        # Test shape-related keywords
+        maxshapes = { (): [None, ()],
+                      (1,): [None, (1,)],
+                      (10,5): [None, (10,5), (20,20)],
+                      (1,10): [None, (2,10), (None,20)],
+                      (100,1,100): [None, (100,2,100), (None, None, None)],
+                      (51, 2, 1025): [None, (2**60, 2**40, None)],
+                      (2**60, 2**60, 2**34): [(2**62, 2**62, 2**35)] }
+
+        chunks = { (): [None],
+                  (1,): [None, (1,)],
+                  (10,5): [None, (5,5), (10,1)],
+                  (1,10): [None, True, (1,10), (1,3)],
+                  (100,1,100): [None, (50,1,10)],
+                  (51, 2, 1025): [None],
+                  (2**60, 2**60, 2**34): [(128,64, 256)]}
+
+        for shape in shapes:
+            for ms in maxshapes[shape]:
+                for chunk in chunks[shape]:
+                    template, dset = new_dset(shape, '<i4', chunks=chunk, maxshape=ms)
+                    verify_ds(dset, template)
+
+        # Other keywords
+        compression = [None, True, 5, 9]
+        fletcher32 = [True, False]
+        shuffle = [True, False]
+
+        for comp in compression:
+            for f in fletcher32:
+                for sh in [x if comp else None for x in shuffle]:
+                    template, dset = new_dset((100,100), '<i4', compression=comp, fletcher32=f, shuffle=sh)
+                    verify_ds(dset, template)
+    
+    def test_Dataset_order(self):
+        """ Test order coercion """
 
-        shapes = [(), (1,), (10,5), (1,10), (10,1), (100,1,100), (51,2,1025)]
-        chunks = [None, (1,), (10,1), (1,1),  (1,1),  (50,1,100), (51,2,25)]
-
-        # Test auto-chunk creation for each
-        shapes += shapes
-        chunks += [None]*len(chunks)
-
-        for shape, chunk in zip(shapes, chunks):
-            for dt in TYPES:
-                self.output("    Creating %.20s %.40s" % (shape, dt))
-                dt = numpy.dtype(dt)
-                d = Dataset(self.f, "NewDataset", dtype=dt, shape=shape)
-                self.assertEqual(d.shape, shape)
-                self.assertEqual(d.dtype, dt)
-                del self.f["NewDataset"]
-
-                if shape != ():
-                    self.output("        With chunk %s" % (chunk,))
-                    d = Dataset(self.f, "NewDataset", dtype=dt, shape=shape,
-                                chunks=chunk, shuffle=True, compression=6,
-                                fletcher32=True)
-                    self.assertEqual(d.shape, shape)
-                    self.assertEqual(d.dtype, dt)
-                    del self.f["NewDataset"]
-             
-                if 'V' not in dt.kind:
-                    srcarr = numpy.ones(shape, dtype=dt)
-                    d = Dataset(self.f, "NewDataset", data=srcarr)
-                    self.assertEqual(d.shape, shape)
-                    self.assertEqual(d.dtype, dt)
-                    self.assert_(numpy.all(d.value == srcarr))
-                    del self.f["NewDataset"]               
+        fortran = numpy.array([[1,2,3],[4,5,6]], order='F'),
+        strided = numpy.arange(2*3*4, dtype=numpy.uint8)
+        strided.shape=(2,3,4)
+        strided.strides=(0,1,1)
+        b = numpy.arange(2*3*4, dtype=numpy.uint8)
+        view = numpy.ndarray(buffer=b, offset=2, shape=(2,4), dtype=numpy.uint8)
+
+        for x in (fortran, strided, view):
+            dset = self.f.create_dataset('TEST_DATA', data=x)
+            self.assert_(numpy.all(dset[:] == x))
+            del self.f['TEST_DATA']
 
     def test_Dataset_extend(self):
+        """ Test extending datasets """
 
         self.output("")
 
@@ -233,17 +351,19 @@ class TestDataset(HDF5TestCase):
 
             for illegal_shape in illegal_shapes[shape]:
                 self.assertRaises(H5Error, ds.extend, illegal_shape)
-        
-    def test_Dataset_len_iter(self):
 
+    def test_Dataset_len_iter(self):
+        """ Test new and old len(), iteration over rows """
         arr1 = numpy.arange(100).reshape((10,10))
         arr2 = numpy.ones(())
 
         d1 = self.f.create_dataset("D1", data=arr1)
         d2 = self.f.create_dataset("D2", data=arr2)
+        d3 = self.f.create_dataset("D3", shape=(2**60, 2**50))
 
         self.assertEqual(len(arr1), len(d1))
         self.assertRaises(TypeError, d2, len)
+        self.assertEqual(d3.len(), 2**60)
 
         for idx, (hval, nval) in enumerate(zip(d1, arr1)):
             self.assert_(numpy.all(hval == nval))
@@ -251,7 +371,8 @@ class TestDataset(HDF5TestCase):
         self.assertEqual(idx+1, len(arr1))
         self.assertRaises(TypeError, list, d2)
 
-    def test_Dataset_bigslice(self):
+    def test_slice_big(self):
+        """ Test slices > 2**32 """
         self.output("")
 
         s = SliceFreezer()
@@ -281,10 +402,42 @@ class TestDataset(HDF5TestCase):
                 dset[slc] = data
                 arr = dset[slc]
                 self.assert_(numpy.all(arr == data), "%r \n\n %r" % (arr, data))
-        
-    def test_Dataset_slicing(self):
 
-        self.output('')
+    def test_slice_simple(self):
+        """ Test Numpy-style slices """
+
+        srcarr = numpy.arange(10*10*50, dtype='<f4').reshape(10,10,50)
+        srcarr = srcarr + numpy.sin(srcarr)
+
+        def verify_read(dset, data, argtpl):
+            """ Make sure dset and data have identical contents under selection
+            """
+            hresult = dset[argtpl]
+            nresult = data[argtpl]
+
+            if isinstance(nresult, numpy.ndarray):
+                # If the canonical result is an array, compare shapes, dtypes
+                self.assertEqual(hresult.shape, nresult.shape)
+                self.assertEqual(hresult.dtype, nresult.dtype)
+            else:
+                # If it's a scalar, make sure the HDF5 result is also
+                self.assert_(not isinstance(hresult, numpy.ndarray))
+
+            # Must be an exact match
+            self.assert_(numpy.all(hresult == nresult))
+
+        def verify(argtpl):
+            """ Test read/write for the given selection """
+
+            dset = self.f.create_dataset('TEST', data=srcarr)
+            verify_read(dset, srcarr, argtpl)
+
+            srcarr[argtpl] = numpy.cos(srcarr[argtpl])
+            dset[argtpl] = srcarr[argtpl]
+            
+            verify_read(dset, srcarr, argtpl)
+        
+            del self.f['TEST']
 
         s = SliceFreezer()
         slices = [s[0,0,0], s[0,0,:], s[0,:,0], s[0,:,:]]
@@ -295,45 +448,32 @@ class TestDataset(HDF5TestCase):
         slices += [ s[0], s[1], s[9], s[0,0], s[4,5], s[:] ]
         slices += [ s[3,...], s[3,2,...] ]
         slices += [ numpy.random.random((10,10,50)) > 0.5 ]  # Truth array
-        slices += [ s[0,0,0:0], s[1:1,:,:], numpy.zeros((10,10,50),dtype='bool')] # Empty selections
-        for dt in TYPES1:
-
-            srcarr = numpy.arange(10*10*50, dtype=dt).reshape(10,10,50)
-            srcarr = srcarr + numpy.sin(srcarr)
-
-
-            fname = tempfile.mktemp('.hdf5')
-            f = File(fname, 'w')
-            try:
-                d = Dataset(f, "NewDataset", data=srcarr)
-                self.assertEqual(d.shape, srcarr.shape)
-                self.assertEqual(d.dtype, srcarr.dtype)
-                for argtpl in slices:
-                    # Test read
-                    self.output("    Checking read %.20s %s" % (dt, argtpl if not isinstance(argtpl, numpy.ndarray) else 'ARRAY'))
-                    hresult = d[argtpl]
-                    nresult = srcarr[argtpl]
-                    if isinstance(nresult, numpy.ndarray):
-                        self.assertEqual(hresult.shape, nresult.shape)
-                        self.assertEqual(hresult.dtype, nresult.dtype)
-                    else:
-                        self.assert_(not isinstance(hresult, numpy.ndarray))
-                    self.assert_(numpy.all(hresult == nresult))
-
-                del f["NewDataset"]
-                d = Dataset(f, "NewDataset", data=srcarr)
-                for argtpl in slices:
-                    # Test assignment
-                    self.output("    Checking write %.20s %s" % (dt, argtpl if not isinstance(argtpl, numpy.ndarray) else 'ARRAY'))
-                    srcarr[argtpl] = numpy.cos(srcarr[argtpl])
-                    d[argtpl] = srcarr[argtpl]
-                    self.assert_(numpy.all(d.value == srcarr))
-                    
-            finally:
-                f.close()
-                os.unlink(fname)   
-
-    def test_Dataset_coords(self):
+
+        for slc in slices:
+            self.output("    Checking %s" % ((slc,) if not isinstance(slc, numpy.ndarray) else 'ARRAY'))
+            verify(slc)
+
+    def test_slice_names(self):
+        """ Test slicing with named fields """
+
+        srcarr = numpy.ndarray((10,10), dtype=[('a', '<i4'), ('b', '<f8')])
+        srcarr['a'] = numpy.arange(100).reshape((10,10))
+        srcarr['b'] = 100*numpy.arange(100).reshape((10,10))
+
+        dset = self.f.create_dataset('TEST', data=srcarr)
+
+        pairs = \
+            [ (dset[:], srcarr[:]), (dset['a'], srcarr['a']),
+              (dset[5,5,'a'], srcarr['a'][5,5]),
+              (dset[2,:,'b'], srcarr['b'][2,:]),
+              (dset['b', ..., 5], srcarr[...,5]['b']) ]
+
+        for i, (d, n) in enumerate(pairs):
+            self.assert_(numpy.all(d == n), "Index %d mismatch" % i)
+
+
+    def test_slice_coords(self):
+        """ Test slicing with CoordsList instances """
 
         space = (100,100)
 
@@ -368,6 +508,7 @@ class TestDataset(HDF5TestCase):
             self.assertEqual(hresult.shape, nresult.shape)
 
     def test_Dataset_exceptions(self):
+        """ Test exceptions """
         # These trigger exceptions in H5Dread
         ref = numpy.ones((10,10), dtype='<i4')
         dsid = self.f.create_dataset('ds', data=ref)
@@ -375,6 +516,7 @@ class TestDataset(HDF5TestCase):
         self.assertRaises(H5Error, dsid.id.read, h5s.ALL, h5s.ALL, arr)
         # or it'll segfault...
 
+
 class TestGroup(HDF5TestCase):
 
     def setUp(self):
@@ -386,6 +528,9 @@ class TestGroup(HDF5TestCase):
         self.f.close()
         os.unlink(self.fname)
 
+    def assert_equal_contents(self, a, b):
+        self.assertEqual(set(a), set(b))
+
     def test_Group_init(self):
         
         grp = Group(self.f, "NewGroup", create=True)
@@ -422,12 +567,17 @@ class TestGroup(HDF5TestCase):
             self.assert_(name in self.f)
 
         # __iter__
-        self.assertEqual(set(self.f), set(subgroups))
+        self.assert_equal_contents(list(self.f), subgroups)
 
-        # iteritems()
-        for name, obj in self.f.iteritems():
-            self.assert_(name in subgroups)
-            self.assert_(isinstance(obj, Group))
+        # Dictionary compatibility methods
+        self.assert_equal_contents(self.f.listnames(), subgroups)
+        self.assert_equal_contents(list(self.f.iternames()), subgroups)
+
+        self.assert_equal_contents(self.f.listobjects(), [self.f[x] for x in subgroups])
+        self.assert_equal_contents(list(self.f.iterobjects()), [self.f[x] for x in subgroups])
+
+        self.assert_equal_contents(self.f.listitems(), [(x, self.f[x]) for x in subgroups])
+        self.assert_equal_contents(list(self.f.iteritems()), [(x, self.f[x]) for x in subgroups])
 
         # __delitem__
         for name in subgroups:
@@ -440,8 +590,10 @@ class TestGroup(HDF5TestCase):
         # __str__
         grp = self.f.create_group("Foobar")
         str(grp)
+        repr(grp)
         grp.id._close()
         str(grp)
+        repr(grp)
 
     def test_Group_setgetitem(self):
         # Also tests named types
@@ -492,17 +644,105 @@ class TestGroup(HDF5TestCase):
         self.assertEqual(info1.fileno, info2.fileno)
         self.assertEqual(info1.objno, info2.objno)
 
-        
+        # test assignment of out-of-order arrays
+        arr = numpy.array(numpy.arange(100).reshape((10,10)), order='F')
+        self.f['FORTRAN'] = arr
+        dset = self.f['FORTRAN']
+        self.assert_(numpy.all(dset[:] == arr))
+        self.assert_(dset[:].flags['C_CONTIGUOUS'])
 
-        
-        
-        
+    def test_require(self):
 
-        
+        grp = self.f.require_group('foo')
+        self.assert_(isinstance(grp, Group))
+        self.assert_('foo' in self.f)
+
+        grp2 = self.f.require_group('foo')
+        self.assert_(grp == grp2)
+        self.assert_(hash(grp) == hash(grp2))
+
+        dset = self.f.require_dataset('bar', (10,10), '<i4')
+        self.assert_(isinstance(dset, Dataset))
+        self.assert_('bar' in self.f)
+
+        dset2 = self.f.require_dataset('bar', (10,10), '<i4')
+        self.assert_(dset == dset2)
+        self.assert_(hash(dset) == hash(dset2))
+
+        self.assertRaises(H5Error, self.f.require_group, 'bar')
+        self.assertRaises(H5Error, self.f.require_dataset, 'foo', (10,10), '<i4')
+
+        self.assertRaises(H5Error, self.f.require_dataset, 'bar', (10,11), '<i4')
+        self.assertRaises(H5Error, self.f.require_dataset, 'bar', (10,10), '<c8')
+        self.assertRaises(H5Error, self.f.require_dataset, 'bar', (10,10), '<i1', exact=True)
+
+        self.f.require_dataset('bar', (10,10), '<i1')
+
+    @api_16
+    def test_copy_16(self):
+
+        self.f.create_group('foo')
+        self.assertRaises(NotImplementedError, self.f.copy, 'foo', 'bar')
+
+    @api_18
+    def test_copy_18(self):
 
+        self.f.create_group('foo')
+        self.f.create_group('foo/bar')
+
+        self.f.copy('foo', 'new')
+        self.assert_('new' in self.f)
+        self.assert_('new/bar' in self.f)
+
+    @api_16
+    def test_visit_16(self):
+
+        for x in ['grp1','grp2']:
+            self.f.create_group(x)
+
+        grplist = []
+        self.assertRaises(NotImplementedError, self.f.visit, grplist.append)
+
+        self.assertRaises(NotImplementedError, self.f.visititems, lambda x,y: grplist.append((x,y)))
+
+    @api_18
+    def test_visit_18(self):
+
+        groups = ['grp1', 'grp1/sg1', 'grp1/sg2', 'grp2', 'grp2/sg1', 'grp2/sg1/ssg1']
+
+        for x in groups:
+            self.f.create_group(x)
+
+        group_visit = []
+        self.f.visit(group_visit.append)
+
+        self.assert_equal_contents(groups, group_visit)
+
+        grp_items = [(x, self.f[x]) for x in groups]
+
+        group_visit = []
+        self.f.visititems(lambda x, y: group_visit.append((x,y)))
         
+        self.assert_equal_contents(grp_items, group_visit)
+
+        # Test short-circuit return
+
+        group_visit = []
+        def visitor(name, obj=None):
+            group_visit.append(name)
+            if name.find('grp2/sg1') >= 0:
+                return name
+            return None
+
+        result = self.f.visit(visitor)
+        self.assert_(result.find('grp2/sg1') >= 0)
+        self.assert_(not any(x.find('grp2/sg1/ssg1') >= 0 for x in group_visit))
 
+        del group_visit[:]
 
+        result = self.f.visititems(visitor)
+        self.assert_(result.find('grp2/sg1') >= 0)
+        self.assert_(not any(x.find('grp2/sg1/ssg1') >= 0 for x in group_visit))
 
 
 
diff --git a/h5py/tests/testfiles.py b/h5py/tests/testfiles.py
index 91f4dd6..1127ed7 100644
--- a/h5py/tests/testfiles.py
+++ b/h5py/tests/testfiles.py
@@ -19,34 +19,34 @@
 import numpy as np
 import h5py
 
-class Group(object):
+class Group(dict):
 
-    def __init__(self, members=None, attrs=None):
-        self.attrs = {} if attrs is None else attrs
-        self.members = {} if members is None else members
+    def __init__(self, *args, **kwds):
+        dict.__init__(self, *args, **kwds)
+        self.attrs = {}
 
 class File(Group):
 
     def __init__(self, name, *args, **kwds):
-        self.name = name
         Group.__init__(self, *args, **kwds)
+        self.name = name
 
 class Dataset(object):
 
-    def __init__(self, shape=None, dtype=None, data=None, attrs=None, dset_kwds=None):
-        self.data = data
-        self.shape = shape
-        self.dtype = dtype
+    argnames = ('shape', 'dtype', 'data','chunks', 'compression', 'shuffle',
+                'fletcher32', 'maxshape')
+
+    def __init__(self, *args, **kwds):
 
-        self.attrs = {} if attrs is None else attrs
-        self.dset_kwds = {} if dset_kwds is None else dset_kwds
+        kwds.update(zip(self.argnames, args))
+        self.kwds = kwds
+        self.attrs = {}
 
 class Datatype(object):
      
-    def __init__(self, dtype, attrs=None):
-        self.attrs = {} if attrs is None else attrs
+    def __init__(self, dtype):
         self.dtype = dtype
-
+        self.attrs = {}
 
 def compile_hdf5(fileobj):
     """ Take a "model" HDF5 tree and write it to an actual file. """
@@ -58,9 +58,7 @@ def compile_hdf5(fileobj):
 
     def store_dataset(group, name, obj):
         """ Create and store a dataset in the given group """
-        kwds = obj.dset_kwds.copy()
-        kwds.update({'shape': obj.shape, 'dtype': obj.dtype, 'data': obj.data})
-        dset = group.create_dataset(name, **kwds)
+        dset = group.create_dataset(name, **obj.kwds)
         update_attrs(dset, obj.attrs)
 
     def store_type(group, name, obj):
@@ -79,8 +77,8 @@ def compile_hdf5(fileobj):
             hgroup = group
 
         # Now populate it
-        for new_name in sorted(obj.members):
-            new_obj = obj.members[new_name]
+        for new_name in sorted(obj):
+            new_obj = obj[new_name]
 
             if isinstance(new_obj, Dataset):
                 store_dataset(hgroup, new_name, new_obj)
@@ -92,7 +90,7 @@ def compile_hdf5(fileobj):
         update_attrs(hgroup, obj.attrs)
 
     f = h5py.File(fileobj.name, 'w')
-    store_group(f, None, fileobj)
+    store_group(f['/'], None, fileobj)
     f.close()
 
 
@@ -101,11 +99,12 @@ def file_attrs():
     sg1 = Group()
     sg2 = Group()
     sg3 = Group()
-    gattrs = {'String Attribute': np.asarray("This is a string.", '|S18'),
-              'Integer': np.asarray(42, '<i4'),
-              'Integer Array': np.asarray([0,1,2,3], '<i4'),
-              'Byte': np.asarray(-34, '|i1')}
-    grp = Group( {'Subgroup1': sg1, 'Subgroup2': sg2, 'Subgroup3': sg3}, gattrs)
+    grp = Group({'Subgroup1': sg1, 'Subgroup2': sg2, 'Subgroup3': sg3})
+    grp.attrs =  {'String Attribute': np.asarray("This is a string.", '|S18'),
+                  'Integer': np.asarray(42, '<i4'),
+                  'Integer Array': np.asarray([0,1,2,3], '<i4'),
+                  'Byte': np.asarray(-34, '|i1') }
+
     return File('attributes.hdf5', {'Group': grp})
 
 def file_dset():
@@ -129,9 +128,7 @@ def file_dset():
         arr[i]["f_name"][:] = np.array((1024.9637*i,)*10)
         arr[i]["g_name"] = 109
 
-    options = {'chunks': (3,)}
-
-    dset = Dataset(data=arr, attrs={}, dset_kwds=options)
+    dset = Dataset(data=arr, chunks=(3,))
 
     return File('smpl_compound_chunked.hdf5', {'CompoundChunked': dset})
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/h5py.git



More information about the debian-science-commits mailing list