[h5py] 428/455: Dump examples dir, update INSTALL
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Thu Jul 2 18:19:58 UTC 2015
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to annotated tag 1.3.0
in repository h5py.
commit 8a292615082ba230b9b002cce8f2174403c3ac1e
Author: andrewcollette <andrew.collette at gmail.com>
Date: Mon Feb 22 21:37:35 2010 +0000
Dump examples dir, update INSTALL
---
INSTALL.txt | 9 ++---
examples/compression.py | 88 -------------------------------------------------
examples/groups.py | 68 --------------------------------------
examples/simple.py | 65 ------------------------------------
4 files changed, 5 insertions(+), 225 deletions(-)
diff --git a/INSTALL.txt b/INSTALL.txt
index e8002b5..94a9a91 100644
--- a/INSTALL.txt
+++ b/INSTALL.txt
@@ -1,12 +1,13 @@
-========================
-Installation of h5py 1.1
-========================
+====================
+Installation of h5py
+====================
This document is a very quick overview of the installation procedure for UNIX.
Full documentation is on the web at h5py.alfven.org. Windows users should
download a binary installer; installation from source is only supported on
UNIX-like platforms (Linux and Mac OS-X).
+On Mac OS-X, you can also install via MacPorts.
Before you start
----------------
@@ -68,7 +69,7 @@ Running tests
H5py has a battery of built-in tests. To run them, simply do:
- python setup.py nosetests
+ python setup.py test
Please report test failures to the author, either to [h5py at alfven dot org]
or the bug tracker at http://h5py.googlecode.com.
diff --git a/examples/compression.py b/examples/compression.py
deleted file mode 100644
index f9b8075..0000000
--- a/examples/compression.py
+++ /dev/null
@@ -1,88 +0,0 @@
-
-"""
- Example demonstrating how to use compression and other special options
- for storing datasets in HDF5.
-
- Compression is supported in HDF5 via a "filter pipeline" which is applied
- to data as it is written to and read from disk. Each dataset in the
- file has its own pipeline, which allows the compression strategy to be
- specified on a per-dataset basis.
-
- Compression is only available for the actual data, and not for attributes
- or metadata.
-
- As of h5py 1.1, three compression techniques are available, "gzip", "lzf",
- and "szip". The non-compression filters "shuffle" and "fletcher32" are
- also available. See the docstring for the module h5py.filters for more
- information.
-
- Please note LZF is a h5py-only filter. While reference C source is
- available, other HDF5-aware applications may be unable to read data in
- this format.
-"""
-
-import os
-
-import numpy as np
-import h5py
-import h5py.filters
-
-SHAPE = (100,100,100,20)
-DTYPE = np.dtype('i')
-SIZE = np.product(SHAPE)
-
-f = h5py.File('compress_test.hdf5','w')
-
-mydata = np.arange(SIZE,dtype=DTYPE).reshape(SHAPE)
-
-datasets = []
-
-print "Creating dataset with gzip"
-dset = f.create_dataset("gzipped", data=mydata, compression="gzip",
- compression_opts=4) # compression_opts is optional
-datasets.append(dset)
-
-print "Creating dataset with LZF"
-dset = f.create_dataset("lzfcompressed", data=mydata, compression="lzf")
-datasets.append(dset)
-
-if 'szip' in h5py.filters.encode: # Not distributed with all versions of HDF5
- print "Creating dataset with SZIP"
- dset = f.create_dataset("szipped", data=mydata, compression="szip",
- compression_opts=('nn',8))
- datasets.append(dset)
-
-print "Creating dataset with LZF and error detection"
-dset = f.create_dataset("gzip_error_detection", data=mydata,
- compression="gzip", fletcher32=True)
-datasets.append(dset)
-
-print "Creating uncompressed dataset"
-dset = f.create_dataset("uncompressed", data=mydata)
-datasets.append(dset)
-
-f.flush()
-
-def showsettings(dataset):
- """ Demonstrate the public attributes of datasets """
-
- print "="*60
- print "Dataset ", dataset.name
- print '-'*30
- print "Shape ", dataset.shape
- print "Chunk size ", dataset.chunks
- print "Datatype ", dataset.dtype
- print '-'*30
- print "Compression ", dataset.compression
- print "Settings ", dataset.compression_opts
- print '-'*32
- print "Shuffle ", dataset.shuffle
- print "Fletcher32 ", dataset.fletcher32
-
-for x in datasets:
- showsettings(x)
-
-f.close()
-
-
-
diff --git a/examples/groups.py b/examples/groups.py
deleted file mode 100644
index 435119d..0000000
--- a/examples/groups.py
+++ /dev/null
@@ -1,68 +0,0 @@
-
-"""
- HDF5 for Python (h5py) is a Python interface to the HDF5 library. Built
- on a near-complete Python wrapping of the HDF5 C API, it exposes a simple,
- NumPy-like interface for interacting with HDF5 files, datasets, attributes
- and groups.
-
- This module demonstrates the use of HDF5 groups from h5py. HDF5 groups
- are analagous to directories in a filesystem; they even use the UNIX-style
- /path/to/resource syntax. In h5py, groups act like dictionaries. They
- also provide the necessary methods to create subgroups, datasets, and
- attributes.
-
- HDF5 for Python is available at h5py.alfven.org.
-"""
-
-import numpy as np
-import h5py
-
-f = h5py.File('myfile.hdf5','w')
-
-# The file object is also the "root group" ("/") in HDF5. It's currently
-# empty:
-print "Number of items in the root group: %d" % len(f)
-
-# Create some groups
-g1 = f.create_group('Group1')
-g2 = f.create_group('Another Group')
-g3 = f.create_group('Yet another group')
-
-# All groups, including the root group, support a basic dictionary-style
-# interface
-print "There are now %d items in the root group" % len(f)
-print "They are: %s" % ", ".join(f) # iterating yields member names
-
-# Groups can contain subgroups themselves
-sub1 = g1.create_group("Subgroup1")
-
-# Prints "/Group1/Subgroup1"
-print "Full name of subgroup is %s" % sub1.name
-
-# You can retrieve them using __getitem__ syntax
-sub2 = g1['Subgroup1']
-
-# You can attach attributes to groups, just like datasets, containing just
-# about anything NumPy can handle.
-g1.attrs['purpose'] = "A demonstration group"
-g1.attrs['Life, universe, everything'] = 42
-g1.attrs['A numpy array'] = np.ones((3,), dtype='>i2')
-
-# Create datasets using group methods. (See other examples for a more in-
-# depth introduction to datasets).
-
-data = np.arange(100*100).reshape((100,100))
-
-dset = sub1.create_dataset("My dataset", data=data)
-
-print "The new dataset has full name %s, shape %s and type %s" % \
- (dset.name, dset.shape, dset.dtype)
-
-# Closing the file closes all open objects
-f.close()
-
-
-
-
-
-
diff --git a/examples/simple.py b/examples/simple.py
deleted file mode 100644
index d43c8a5..0000000
--- a/examples/simple.py
+++ /dev/null
@@ -1,65 +0,0 @@
-
-"""
- HDF5 for Python (h5py) is a Python interface to the HDF5 library. Built
- on a near-complete Python wrapping of the HDF5 C API, it exposes a simple,
- NumPy-like interface for interacting with HDF5 files, datasets, attributes
- and groups.
-
- This is a simple module which demonstrates some of the features of HDF5,
- including the ability to interact with large on-disk datasets in a
- NumPy-like fashion.
-
- In this example, we create a file containing a 1 GB dataset, populate it
- from NumPy, and then slice into it. HDF5 attributes are also demonstrated.
-
- HDF5 for Python is available at h5py.alfven.org.
-"""
-
-import numpy as np
-import h5py
-
-f = h5py.File('myfile.hdf5','w')
-
-# Create a new, empty dataset to hold 1GB of floats
-dset = f.create_dataset('MyDataset', (256, 1024, 1024), dtype='f')
-
-# Datasets have some of the same properties as NumPy arrays
-print "The new dataset has shape %s and type %s" % (dset.shape, dset.dtype)
-
-# Attach some attributes
-dset.attrs['purpose'] = "Demonstration dataset for floats"
-dset.attrs['original size'] = (256, 1024, 1024) # This tuple is auto-
- # converted to an HDF5 array.
-dset.attrs['constant'] = 42
-
-# Populate the file in a loop. Note that you can use NumPy-style slicing
-# on datasets directly, including the row-like selection demonstrated here.
-
-base = np.arange(1024*1024, dtype='f').reshape((1024,1024))
-for idx in xrange(256):
- if(idx%16==0): print 'Populating row %d' % idx
-
- base += idx*(1024*1024)
- dset[idx] = base
-
-
-# Perform some operations requiring random access. Note these operations use
-# HDF5 "dataspaces" for efficient read/write.
-
-print "Resetting some indices to one"
-dset[15, 24, 100:200] = np.ones((100,), dtype='f')
-
-print 'Retrieving every 64th element... '
-subarray = dset[...,::64]
-print 'Retrived array has shape %s' % (subarray.shape,)
-
-# We can also access attributes using dictionary-style syntax
-for name, value in dset.attrs.iteritems():
- print 'Attribute "%s" has value: %r' % (name, value)
-
-# When finished, close the file. The dataset (and all other open objects)
-# are closed automatically.
-f.close()
-
-
-
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/h5py.git
More information about the debian-science-commits
mailing list