[libtie-cache-perl] 01/08: Import last packaging into git.
Harlan Lieberman-Berg
H.LiebermanBerg at gmail.com
Mon Nov 16 02:42:12 UTC 2015
This is an automated email from the git hooks/post-receive script.
hlieberman-guest pushed a commit to branch master
in repository libtie-cache-perl.
commit 62b94745cb71b62092f971de8dd844c3c5953a78
Author: Harlan Lieberman-Berg <hlieberman at setec.io>
Date: Sun Nov 15 21:05:47 2015 -0500
Import last packaging into git.
---
CHANGES | 126 ++++++++++
Cache.pm | 697 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
MANIFEST | 9 +
MANIFEST.SKIP | 4 +
Makefile.PL | 5 +
README | 172 ++++++++++++++
bench.pl | 64 +++++
debian/changelog | 53 +++++
debian/compat | 1 +
debian/control | 22 ++
debian/copyright | 31 +++
debian/examples | 2 +
debian/rules | 84 +++++++
debian/watch | 3 +
test.pl | 199 ++++++++++++++++
15 files changed, 1472 insertions(+)
diff --git a/CHANGES b/CHANGES
new file mode 100755
index 0000000..411bdc3
--- /dev/null
+++ b/CHANGES
@@ -0,0 +1,126 @@
+$MODULE = "Tie::Cache"; $VERSION = .17; $DATE = '07/03/2002';
+
++ added tied(%cache)->flush() method to sync dirty writes
+ that have been cache with WriteSync => 0 config for a true
+ cache inherited object. Added testing to cover in test.pl
+
+$MODULE = "Tie::Cache"; $VERSION = .15; $DATE = '03/20/2001';
+
++ Better test.pl timing output, also differentiate between
+ cache with MaxBytes setting and one with just MaxCount,
+ as MaxBytes is a bit slower for data size calculations.
+
++ Better Tie::Cache options / object config error checking,
+ die() on obvious misconfigurations that will get developers
+ in trouble.
+
+- MaxSize only set if MaxSize or MaxBytes are defined,
+ was defaulting to 1 otherwise, killing basic MaxCount config.
+
++ Optimizations for common use where Tie::Cache is not
+ subclassed for write()/read() API
+
+$MODULE = "Tie::Cache"; $VERSION = .11; $DATE = '03/18/2001';
+
++ use of arrays for internal nodes structure instead of
+ hashes for savings on memory and lookup speed.
+
++ MaxBytes now more accurate with recursive length calculations
+ on reference keys & values, and added estimates of 240 bytes
+ per key/value pair, + 16 bytes per reference found, in addition
+ to the length of the data being stored.
+
+ A MaxBytes setting of 1000000 would then try to really limit
+ total memory consumption of the entire cache to 1M. This is
+ relevant when being used in high process environments like mod_perl
+ where memory savings are critical.
+
++ MaxSize has default of MaxBytes/10, so large cache entries don't
+ purge many others. If cache entry exceeds MaxSize in bytes, then
+ that value won't be cached.
+
+$MODULE = "Tie::Cache"; $VERSION = .10; $DATE = '11/22/00';
+
+- Allow for undef values to be stored like $cache{1} = undef
+ Really doesn't affect cache, but got rid off error message
+ in this case.
+
+$MODULE = "Tie::Cache"; $VERSION = .09; $DATE = 'TBA';
+
++ Added benchmark section comparing Tie::Cache & Tie::Cache::LRU
+
+$MODULE = "Tie::Cache"; $VERSION = .08; $DATE = '12/14/99';
+
+- FETCH, or %cache reads like $cache{key} would returned
+ a defined value, null, when it should have been undefined.
+ This would breaks tests that test for defined($cache{$key}),
+ and is a big problem. I introduced this bug with my
+ tuning in .07 :(
+
+$MODULE = "Tie::Cache"; $VERSION = .07; $DATE = '12/13/99';
+
++ Improved test suite to do some benchmarking, and
+ real interpreted tests.
+
++ Performance tuning making the cache about 5-10% faster.
+
++ MaxSize setting makes it so that key/value pair lengths
+ of MaxSize or greater will not be stored in the cache.
+ This prevents odd large entries from flushing the cache
+ of many smaller entries.
+
+- setting Debug for one Tie::Cache hash is independent of
+ another's Debug setting, before one setting would affect
+ all the rest. To set Debug value for all Tie::Cache
+ hashes, set $Tie::Cache::Debug.
+
+$MODULE = "Tie::Cache"; $VERSION = .06; $DATE = '2/16/99';
+
++ WriteSynch config option for TRUE CACHE. WriteSynch => 0
+ will have the dirty data be written back as late as possible.
+ WriteSynch => 1 is immediate write-through for data dirtied.
+
+- STORE returns value stored; $cache{$key} = $value returns $value now
+
++ decomped FETCH better so it doesn't use STORE internally
+ necessary for new WriteSynch functionality
+
++ WriteSynch => 0 config set in test.pl to demostrate use.
+
++ Optimizations, especially for refreshing entries in cache on FETCH
+
+$MODULE = "Tie::Cache"; $VERSION = .05;
+
++ Keep track of hits / misses even without debug option set.
+
+$MODULE = "Tie::Cache"; $VERSION = .04;
+
+- Get rid of -w warnings in test.pl.
+
+$MODULE = "Tie::Cache"; $VERSION = .031;
+
+- Removed test2.pl from installation as it was getting installed
+ with Tie::Cache :(
+
+$MODULE = "Tie::Cache"; $VERSION = .03;
+
+- Global destructor bug fixed. Wasn't flushing cache consistently.
+
+$MODULE = "Tie::Cache"; $VERSION = .02;
+
++ MaxBytes config option is new. It allows cache size to
+ be based on the bytes the cache holds.
+
++ test.pl output has Debug set to 2, so full debugging output is
+ displayed. This allows a new user to see how cache works.
+
+$MODULE = "Tie::Cache"; $VERSION = .01;
+
+First release of module.
+
+
+
+
+
+
+
diff --git a/Cache.pm b/Cache.pm
new file mode 100755
index 0000000..f24d0fc
--- /dev/null
+++ b/Cache.pm
@@ -0,0 +1,697 @@
+#!/usr/bin/perl -w
+
+package Tie::Cache;
+use strict;
+use vars qw(
+ $VERSION $Debug $STRUCT_SIZE $REF_SIZE
+ $BEFORE $AFTER $KEY $VALUE $BYTES $DIRTY
+);
+
+$VERSION = .17;
+$Debug = 0; # set to 1 for summary, 2 for debug output
+$STRUCT_SIZE = 240; # per cached elem bytes overhead, approximate
+$REF_SIZE = 16;
+
+# NODE ARRAY STRUCT
+$KEY = 0;
+$VALUE = 1;
+$BYTES = 2;
+$BEFORE = 3;
+$AFTER = 4;
+$DIRTY = 5;
+
+=pod
+
+=head1 NAME
+
+Tie::Cache - LRU Cache in Memory
+
+=head1 SYNOPSIS
+
+ use Tie::Cache;
+ tie %cache, 'Tie::Cache', 100, { Debug => 1 };
+ tie %cache2, 'Tie::Cache', { MaxCount => 100, MaxBytes => 50000 };
+ tie %cache3, 'Tie::Cache', 100, { Debug => 1 , WriteSync => 0};
+
+ # Options ##################################################################
+ #
+ # Debug => 0 - DEFAULT, no debugging output
+ # 1 - prints cache statistics upon destroying
+ # 2 - prints detailed debugging info
+ #
+ # MaxCount => Maximum entries in cache.
+ #
+ # MaxBytes => Maximum bytes taken in memory for cache based on approximate
+ # size of total cache structure in memory
+ #
+ # There is approximately 240 bytes used per key/value pair in the cache for
+ # the cache data structures, so a cache of 5000 entries would take
+ # at approximately 1.2M plus the size of the data being cached.
+ #
+ # MaxSize => Maximum size of each cache entry. Larger entries are not cached.
+ # This helps prevent much of the cache being flushed when
+ # you set an exceptionally large entry. Defaults to MaxBytes/10
+ #
+ # WriteSync => 1 - DEFAULT, write() when data is dirtied for
+ # TRUE CACHE (see below)
+ # 0 - write() dirty data as late as possible, when leaving
+ # cache, or when cache is being DESTROY'd
+ #
+ ############################################################################
+
+ # cache supports normal tied hash functions
+ $cache{1} = 2; # STORE
+ print "$cache{1}\n"; # FETCH
+
+ # FIRSTKEY, NEXTKEY
+ while(($k, $v) = each %cache) { print "$k: $v\n"; }
+
+ delete $cache{1}; # DELETE
+ %cache = (); # CLEAR
+
+=head1 DESCRIPTION
+
+This module implements a least recently used (LRU) cache in memory
+through a tie interface. Any time data is stored in the tied hash,
+that key/value pair has an entry time associated with it, and
+as the cache fills up, those members of the cache that are
+the oldest are removed to make room for new entries.
+
+So, the cache only "remembers" the last written entries, up to the
+size of the cache. This can be especially useful if you access
+great amounts of data, but only access a minority of the data a
+majority of the time.
+
+The implementation is a hash, for quick lookups,
+overlaying a doubly linked list for quick insertion and deletion.
+On a WinNT PII 300, writes to the hash were done at a rate
+3100 per second, and reads from the hash at 6300 per second.
+Work has been done to optimize refreshing cache entries that are
+frequently read from, code like $cache{entry}, which moves the
+entry to the end of the linked list internally.
+
+=cut Documentation continues at the end of the module.
+
+sub TIEHASH {
+ my($class, $max_count, $options) = @_;
+
+ if(ref($max_count)) {
+ $options = $max_count;
+ $max_count = $options->{MaxCount};
+ }
+
+ unless($max_count || $options->{MaxBytes}) {
+ die('you must specify cache size with either MaxBytes or MaxCount');
+ }
+
+ my $sync = exists($options->{WriteSync}) ? $options->{WriteSync} : 1;
+
+ my $self = bless
+ {
+ # how many items to cache
+ max_count=> $max_count,
+
+ # max bytes to cache
+ max_bytes => $options->{MaxBytes},
+
+ # max size (in bytes) of an individual cache entry
+ max_size => $options->{MaxSize} || ($options->{MaxBytes} ? (int($options->{MaxBytes}/10) + 1) : 0),
+
+ # class track, so know if overridden subs should be used
+ 'class' => $class,
+ 'subclass' => $class ne 'Tie::Cache' ? 1 : 0,
+
+ # current sizes
+ count=>0,
+ bytes=>0,
+
+ # inner structures
+ head=>0,
+ tail=>0,
+ nodes=>{},
+ 'keys'=>[],
+
+ # statistics
+ hit => 0,
+ miss => 0,
+
+ # config
+ sync => $sync,
+ dbg => $options->{Debug} || $Debug
+
+
+ }, $class;
+
+ if (($self->{max_bytes} && ! $self->{max_size})) {
+ die("MaxSize must be defined when MaxBytes is");
+ }
+
+ if($self->{max_bytes} and $self->{max_bytes} < 1000) {
+ die("cannot set MaxBytes to under 1000, each raw entry takes $STRUCT_SIZE bytes alone");
+ }
+
+ if($self->{max_size} && $self->{max_size} < 3) {
+ die("cannot set MaxSize to under 3 bytes, assuming error in config");
+ }
+
+ $self;
+}
+
+# override to write data leaving cache
+sub write { undef; }
+# commented this section out for speed
+# my($self, $key, $value) = @_;
+# 1;
+#}
+
+# override to get data if not in cache, should return $value
+# associated with $key
+sub read { undef; }
+# commented this section out for speed
+# my($self, $key) = @_;
+# undef;
+#}
+
+sub FETCH {
+ my($self, $key) = @_;
+
+ my $node = $self->{nodes}{$key};
+ if($node) {
+ # refresh node's entry
+ $self->{hit}++; # if $self->{dbg};
+
+ # we used to call delete then insert, but we streamlined code
+ if(my $after = $node->[$AFTER]) {
+ $self->{dbg} > 1 and $self->print("update() node $node to tail of list");
+ # reconnect the nodes
+ my $before = $after->[$BEFORE] = $node->[$BEFORE];
+ if($before) {
+ $before->[$AFTER] = $after;
+ } else {
+ $self->{head} = $after;
+ }
+
+ # place at the end
+ $self->{tail}[$AFTER] = $node;
+ $node->[$BEFORE] = $self->{tail};
+ $node->[$AFTER] = undef;
+ $self->{tail} = $node; # always true after this
+ } else {
+ # if there is nothing after node, then we are at the end already
+ # so don't do anything to move the nodes around
+ die("this node is the tail, so something's wrong")
+ unless($self->{tail} eq $node);
+ }
+
+ $self->print("FETCH [$key, $node->[$VALUE]]") if ($self->{dbg} > 1);
+ $node->[$VALUE];
+ } else {
+ # we have a cache miss here
+ $self->{miss}++; # if $self->{dbg};
+
+ # its fine to always insert a node, even when we have an undef,
+ # because even if we aren't a sub-class, we should assume use
+ # that would then set the entry. This model works well with
+ # sub-classing and reads() that might want to return undef as
+ # a valid value.
+ my $value;
+ if ($self->{subclass}) {
+ $self->print("read() for key $key") if $self->{dbg} > 1;
+ $value = $self->read($key);
+ }
+
+ if(defined $value) {
+ my $length;
+ if($self->{max_size}) {
+ # check max size of entry, that it not exceed max size
+ $length = &_get_data_length(\$key, \$value);
+ if($length > $self->{max_size}) {
+ $self->print("direct read() [$key, $value]") if ($self->{dbg} > 1);
+ return $value;
+ }
+ }
+ # if we get here, we should insert the new node
+ $node = &create_node($self, \$key, \$value, $length);
+ &insert($self, $node);
+ $value;
+ } else {
+ undef;
+ }
+ }
+}
+
+sub STORE {
+ my($self, $key, $value) = @_;
+ my $node;
+
+ $self->print("STORE [$key,$value]") if ($self->{dbg} > 1);
+
+ # do not cache undefined values
+ defined($value) || return(undef);
+
+ # check max size of entry, that it not exceed max size
+ my $length;
+ if($self->{max_size}) {
+ $length = &_get_data_length(\$key, \$value);
+ if($length > $self->{max_size}) {
+ if ($self->{subclass}) {
+ $self->print("direct write() [$key, $value]") if ($self->{dbg} > 1);
+ $self->write($key, $value);
+ }
+ return $value;
+ }
+ }
+
+ # do we have node already ?
+ if($self->{nodes}{$key}) {
+ $node = &delete($self, $key);
+# $node = &delete($self, $key);
+# $node->[$VALUE] = $value;
+# $node->[$BYTES] = $length || &_get_data_length(\$key, \$value);
+ }
+
+ # insert new node
+ $node = &create_node($self, \$key, \$value, $length);
+# $node ||= &create_node($self, \$key, \$value, $length);
+ &insert($self, $node);
+
+ # if the data is sync'd call write now, otherwise defer the data
+ # writing, but mark it dirty so it can be cleanup up at the end
+ if ($self->{subclass}) {
+ if($self->{sync}) {
+ $self->print("sync write() [$key, $value]") if $self->{dbg} > 1;
+ $self->write($key, $value);
+ } else {
+ $node->[$DIRTY] = 1;
+ }
+ }
+
+ $value;
+}
+
+sub DELETE {
+ my($self, $key) = @_;
+
+ $self->print("DELETE $key") if ($self->{dbg} > 1);
+ my $node = $self->delete($key);
+ $node ? $node->[$VALUE] : undef;
+}
+
+sub CLEAR {
+ my($self) = @_;
+
+ $self->print("CLEAR CACHE") if ($self->{dbg} > 1);
+
+ if($self->{subclass}) {
+ my $flushed = $self->flush();
+ $self->print("FLUSH COUNT $flushed") if ($self->{dbg} > 1);
+ }
+
+ my $node;
+ while($node = $self->{head}) {
+ $self->delete($self->{head}[$KEY]);
+ }
+
+ 1;
+}
+
+sub EXISTS {
+ my($self, $key) = @_;
+ exists $self->{nodes}{$key};
+}
+
+# firstkey / nextkey emulate keys() and each() behavior by
+# taking a snapshot of all the nodes at firstkey, and
+# iterating through the keys with nextkey
+#
+# this method therefore will only supports one each() / keys()
+# happening during any given time.
+#
+sub FIRSTKEY {
+ my($self) = @_;
+
+ $self->{'keys'} = [];
+ my $node = $self->{head};
+ while($node) {
+ push(@{$self->{'keys'}}, $node->[$KEY]);
+ $node = $node->[$AFTER];
+ }
+
+ shift @{$self->{'keys'}};
+}
+
+sub NEXTKEY {
+ my($self, $lastkey) = @_;
+ shift @{$self->{'keys'}};
+}
+
+sub DESTROY {
+ my($self) = @_;
+
+ # if debugging, snapshot cache before clearing
+ if($self->{dbg}) {
+ if($self->{hit} || $self->{miss}) {
+ $self->{hit_ratio} =
+ sprintf("%4.3f", $self->{hit} / ($self->{hit} + $self->{miss}));
+ }
+ $self->print($self->pretty_self());
+ if($self->{dbg} > 1) {
+ $self->print($self->pretty_chains());
+ }
+ }
+
+ $self->print("DESTROYING") if $self->{dbg} > 1;
+ $self->CLEAR();
+
+ 1;
+}
+
+####PERL##LRU##TIE##CACHE##PERL##LRU##TIE##CACHE##PERL##LRU##TIE##CACHE
+## Helper Routines
+####PERL##LRU##TIE##CACHE##PERL##LRU##TIE##CACHE##PERL##LRU##TIE##CACHE
+
+# we use scalar_refs for the data for speed
+sub create_node {
+ my($self, $key, $value, $length) = @_;
+ (defined($$key) && defined($$value))
+ || die("need more localized data than $$key and $$value");
+
+ # max_size always defined when max_bytes is
+ if (($self->{max_size})) {
+ $length = defined $length ? $length : &_get_data_length($key, $value)
+ } else {
+ $length = 0;
+ }
+
+ # ORDER SPECIFIC, see top for NODE ARRAY STRUCT
+ my $node = [ $$key, $$value, $length ];
+}
+
+sub _get_data_length {
+ my($key, $value) = @_;
+ my $length = 0;
+ my %refs;
+
+ my @data = ($$key, $$value);
+ while(my $elem = shift @data) {
+ next if $refs{$elem};
+ $refs{$elem} = 1;
+ if(ref $elem && $elem =~ /(SCALAR|HASH|ARRAY)/) {
+ my $type = $1;
+ $length += $REF_SIZE; # guess, 16 bytes per ref, probably more
+ if (($type eq 'SCALAR')) {
+ $length += length($$elem);
+ } elsif (($type eq 'HASH')) {
+ while (my($k,$v) = each %$elem) {
+ for my $kv($k,$v) {
+ if ((ref $kv)) {
+ push(@data, $kv);
+ } else {
+ $length += length($kv);
+ }
+ }
+ }
+ } elsif (($type eq 'ARRAY')) {
+ for my $val (@$elem){
+ if ((ref $val)) {
+ push(@data, $val);
+ } else {
+ $length += length($val);
+ }
+ }
+ }
+ } else {
+ $length += length($elem);
+ }
+ }
+
+ $length;
+}
+
+sub insert {
+ my($self, $new_node) = @_;
+
+ $new_node->[$AFTER] = 0;
+ $new_node->[$BEFORE] = $self->{tail};
+ $self->print("insert() [$new_node->[$KEY], $new_node->[$VALUE]]") if ($self->{dbg} > 1);
+
+ $self->{nodes}{$new_node->[$KEY]} = $new_node;
+
+ # current sizes
+ $self->{count}++;
+ $self->{bytes} += $new_node->[$BYTES] + $STRUCT_SIZE;
+
+ if($self->{tail}) {
+ $self->{tail}[$AFTER] = $new_node;
+ } else {
+ $self->{head} = $new_node;
+ }
+ $self->{tail} = $new_node;
+
+ ## if we are too big now, remove head
+ while(($self->{max_count} && ($self->{count} > $self->{max_count})) ||
+ ($self->{max_bytes} && ($self->{bytes} > $self->{max_bytes})))
+ {
+ if($self->{dbg} > 1) {
+ $self->print("current/max: ".
+ "bytes ($self->{bytes}/$self->{max_bytes}) ".
+ "count ($self->{count}/$self->{max_count}) "
+ );
+ }
+ my $old_node = $self->delete($self->{head}[$KEY]);
+ if ($self->{subclass}) {
+ if($old_node->[$DIRTY]) {
+ $self->print("dirty write() [$old_node->[$KEY], $old_node->[$VALUE]]")
+ if ($self->{dbg} > 1);
+ $self->write($old_node->[$KEY], $old_node->[$VALUE]);
+ }
+ }
+# if($self->{dbg} > 1) {
+# $self->print("after delete - bytes $self->{bytes}; count $self->{count}");
+# }
+ }
+
+ 1;
+}
+
+sub delete {
+ my($self, $key) = @_;
+ my $node = $self->{nodes}{$key} || return;
+# return unless $node;
+
+ $self->print("delete() [$key, $node->[$VALUE]]") if ($self->{dbg} > 1);
+
+ my $before = $node->[$BEFORE];
+ my $after = $node->[$AFTER];
+
+ # my($before, $after) = $node->{before,after};
+ if($before) {
+ ($before->[$AFTER] = $after);
+ } else {
+ $self->{head} = $after;
+ }
+
+ if($after) {
+ ($after->[$BEFORE] = $before);
+ } else {
+ $self->{tail} = $before;
+ }
+
+ delete $self->{nodes}{$key};
+ $self->{bytes} -= ($node->[$BYTES] + $STRUCT_SIZE);
+ $self->{count}--;
+
+ $node;
+}
+
+sub flush {
+ my $self = shift;
+
+ $self->print("FLUSH CACHE") if ($self->{dbg} > 1);
+
+ my $node = $self->{head};
+ my $flush_count = 0;
+ while($node) {
+ if($node->[$DIRTY]) {
+ $self->print("flush dirty write() [$node->[$KEY], $node->[$VALUE]]")
+ if ($self->{dbg} > 1);
+ $self->write($node->[$KEY], $node->[$VALUE]);
+ $node->[$DIRTY] = 0;
+ $flush_count++;
+ }
+ $node = $node->[$AFTER];
+ }
+
+ $flush_count;
+}
+
+sub print {
+ my($self, $msg) = @_;
+ print "$self: $msg\n";
+}
+
+sub pretty_self {
+ my($self) = @_;
+
+ my(@prints);
+ for(sort keys %{$self}) {
+ next unless defined $self->{$_};
+ push(@prints, "$_=>$self->{$_}");
+ }
+
+ "{ " . join(", ", @prints) . " }";
+}
+
+sub pretty_chains {
+ my($self) = @_;
+ my($str);
+ my $k = $self->FIRSTKEY();
+
+ $str .= "[head]->";
+ my($curr_node) = $self->{head};
+ while($curr_node) {
+ $str .= "[$curr_node->[$KEY],$curr_node->[$VALUE]]->";
+ $curr_node = $curr_node->[$AFTER];
+ }
+ $str .= "[tail]->";
+
+ $curr_node = $self->{tail};
+ while($curr_node) {
+ $str .= "[$curr_node->[$KEY],$curr_node->[$VALUE]]->";
+ $curr_node = $curr_node->[$BEFORE];
+ }
+ $str .= "[head]";
+
+ $str;
+}
+
+1;
+
+__END__
+
+=head1 INSTALLATION
+
+Tie::Cache installs easily using the make or nmake commands as
+shown below. Otherwise, just copy Cache.pm to $PERLLIB/site/Tie
+
+ > perl Makefile.PL
+ > make
+ > make test
+ > make install
+
+ * use nmake for win32
+ ** you can also just copy Cache.pm to $perllib/Tie
+
+=head1 BENCMARKS
+
+There is another simpler LRU cache implementation in CPAN,
+Tie::Cache::LRU, which has the same basic size limiting
+functionality, and for this functionality, the exact same
+interface.
+
+Through healthy competition, Michael G Schwern got
+Tie::Cache::LRU mostly faster than Tie::Cache on reads & writes:
+
+ Cache Size 5000 Tie::Cache 0.17 Tie::Cache::LRU 0.21
+ 10000 Writes 1.55 CPU sec 1.10 CPU sec
+ 40000 Reads 1.82 CPU sec 1.58 CPU sec
+ 10000 Deletes 0.55 CPU sec 0.59 CPU sec
+
+Unless you are using TRUE CACHE or MaxBytes functionality,
+using Tie::Cache::LRU should be an easy replacement for Tie::Cache.
+
+=head1 TRUE CACHE
+
+To use class as a true cache, which acts as the sole interface
+for some data set, subclass the real cache off Tie::Cache,
+with @ISA = qw( 'Tie::Cache' ) notation. Then override
+the read() method for behavior when there is a cache miss,
+and the write() method for behavior when the cache's data
+changes.
+
+When WriteSync is 1 or TRUE (DEFAULT), write() is called immediately
+when data in the cache is modified. If set to 0, data that has
+been modified in the cache gets written out when the entries are deleted or
+during the DESTROY phase of the cache object, usually at the end of
+a script.
+
+To have the dirty data write() periodically while WriteSync is set to 0,
+there is a flush() cache API call that will flush the dirty writes
+in this way. Just call the flush() API like:
+
+ my $write_flush_count = tied(%cache)->flush();
+
+The flush() API was added in the .17 release thanks to Rob Bloodgood.
+
+=head1 TRUE CACHE EXAMPLE
+
+ use Tie::Cache;
+
+ # personalize the Tie::Cache object, by inheriting from it
+ package My::Cache;
+ @ISA = qw(Tie::Cache);
+
+ # override the read() and write() member functions
+ # these tell the cache what to do with a cache miss or flush
+ sub read {
+ my($self, $key) = @_;
+ print "cache miss for $key, read() data\n";
+ rand() * $key;
+ }
+ sub write {
+ my($self, $key, $value) = @_;
+ print "flushing [$key, $value] from cache, write() data\n";
+ }
+
+ my $cache_size = $ARGV[0] || 2;
+ my $num_to_cache = $ARGV[1] || 4;
+ my $Debug = $ARGV[2] || 1;
+
+ tie %cache, 'My::Cache', $cache_size, {Debug => $Debug};
+
+ # load the cache with new data, each through its contents,
+ # and then reload in reverse order.
+ for(1..$num_to_cache) { print "read data $_: $cache{$_}\n" }
+ while(my($k, $v) = each %cache) { print "each data $k: $v\n"; }
+ for(my $i=$num_to_cache; $i>0; $i--) { print "read data $i: $cache{$i}\n"; }
+
+ # flush writes now, trivial use since will happen in DESTROY() anyway
+ tied(%cache)->flush();
+
+ # clear cache in 2 ways, write will flush out to disk
+ %cache = ();
+ undef %cache;
+
+=head1 NOTES
+
+Many thanks to all those who helped me make this module a reality,
+including:
+
+ :) Tom Hukins who provided me insight and motivation for
+ finishing this module.
+ :) Jamie McCarthy, for trying to make Tie::Cache be all
+ that it can be.
+ :) Rob Fugina who knows how to "TRULY CACHE".
+ :) Rob Bloodgood, for the TRUE CACHE flush() API
+
+=head1 AUTHOR
+
+Please send any questions or comments to Joshua Chamas
+at chamas at alumni.stanford.org
+
+=head1 COPYRIGHT
+
+Copyright (c) 1999-2002 Joshua Chamas, Chamas Enterprises Inc.
+Sponsored by development on NodeWorks http://www.nodeworks.com
+
+All rights reserved. This program is free software;
+you can redistribute it and/or modify it under the same
+terms as Perl itself.
+
+=cut
+
+
+
+
+
+
diff --git a/MANIFEST b/MANIFEST
new file mode 100644
index 0000000..8bf3607
--- /dev/null
+++ b/MANIFEST
@@ -0,0 +1,9 @@
+CHANGES
+Cache.pm
+MANIFEST
+MANIFEST.SKIP
+MANIFEST.bak
+Makefile.PL
+README
+bench.pl
+test.pl
diff --git a/MANIFEST.SKIP b/MANIFEST.SKIP
new file mode 100644
index 0000000..a4371bb
--- /dev/null
+++ b/MANIFEST.SKIP
@@ -0,0 +1,4 @@
+~$
+blib
+Tie-Cache
+Makefile(\.old)?$
diff --git a/Makefile.PL b/Makefile.PL
new file mode 100755
index 0000000..8c557f7
--- /dev/null
+++ b/Makefile.PL
@@ -0,0 +1,5 @@
+#!perl
+
+use ExtUtils::MakeMaker;
+&WriteMakefile( NAME => "Tie::Cache", VERSION_FROM => 'CHANGES' );
+
diff --git a/README b/README
new file mode 100755
index 0000000..0d2e36a
--- /dev/null
+++ b/README
@@ -0,0 +1,172 @@
+NAME
+ Tie::Cache - LRU Cache in Memory
+
+SYNOPSIS
+ use Tie::Cache;
+ tie %cache, 'Tie::Cache', 100, { Debug => 1 };
+ tie %cache2, 'Tie::Cache', { MaxCount => 100, MaxBytes => 50000 };
+ tie %cache3, 'Tie::Cache', 100, { Debug => 1 , WriteSync => 0};
+
+ # Options ##################################################################
+ #
+ # Debug => 0 - DEFAULT, no debugging output
+ # 1 - prints cache statistics upon destroying
+ # 2 - prints detailed debugging info
+ #
+ # MaxCount => Maximum entries in cache.
+ #
+ # MaxBytes => Maximum bytes taken in memory for cache based on approximate
+ # size of total cache structure in memory
+ #
+ # There is approximately 240 bytes used per key/value pair in the cache for
+ # the cache data structures, so a cache of 5000 entries would take
+ # at approximately 1.2M plus the size of the data being cached.
+ #
+ # MaxSize => Maximum size of each cache entry. Larger entries are not cached.
+ # This helps prevent much of the cache being flushed when
+ # you set an exceptionally large entry. Defaults to MaxBytes/10
+ #
+ # WriteSync => 1 - DEFAULT, write() when data is dirtied for
+ # TRUE CACHE (see below)
+ # 0 - write() dirty data as late as possible, when leaving
+ # cache, or when cache is being DESTROY'd
+ #
+ ############################################################################
+
+ # cache supports normal tied hash functions
+ $cache{1} = 2; # STORE
+ print "$cache{1}\n"; # FETCH
+
+ # FIRSTKEY, NEXTKEY
+ while(($k, $v) = each %cache) { print "$k: $v\n"; }
+
+ delete $cache{1}; # DELETE
+ %cache = (); # CLEAR
+
+DESCRIPTION
+ This module implements a least recently used (LRU) cache in memory
+ through a tie interface. Any time data is stored in the tied hash, that
+ key/value pair has an entry time associated with it, and as the cache
+ fills up, those members of the cache that are the oldest are removed to
+ make room for new entries.
+
+ So, the cache only "remembers" the last written entries, up to the size
+ of the cache. This can be especially useful if you access great amounts
+ of data, but only access a minority of the data a majority of the time.
+
+ The implementation is a hash, for quick lookups, overlaying a doubly
+ linked list for quick insertion and deletion. On a WinNT PII 300, writes
+ to the hash were done at a rate 3100 per second, and reads from the hash
+ at 6300 per second. Work has been done to optimize refreshing cache
+ entries that are frequently read from, code like $cache{entry}, which
+ moves the entry to the end of the linked list internally.
+
+INSTALLATION
+ Tie::Cache installs easily using the make or nmake commands as shown
+ below. Otherwise, just copy Cache.pm to $PERLLIB/site/Tie
+
+ > perl Makefile.PL
+ > make
+ > make test
+ > make install
+
+ * use nmake for win32
+ ** you can also just copy Cache.pm to $perllib/Tie
+
+BENCMARKS
+ There is another simpler LRU cache implementation in CPAN,
+ Tie::Cache::LRU, which has the same basic size limiting functionality,
+ and for this functionality, the exact same interface.
+
+ Through healthy competition, Michael G Schwern got Tie::Cache::LRU
+ mostly faster than Tie::Cache on reads & writes:
+
+ Cache Size 5000 Tie::Cache 0.17 Tie::Cache::LRU 0.21
+ 10000 Writes 1.55 CPU sec 1.10 CPU sec
+ 40000 Reads 1.82 CPU sec 1.58 CPU sec
+ 10000 Deletes 0.55 CPU sec 0.59 CPU sec
+
+ Unless you are using TRUE CACHE or MaxBytes functionality, using
+ Tie::Cache::LRU should be an easy replacement for Tie::Cache.
+
+TRUE CACHE
+ To use class as a true cache, which acts as the sole interface for some
+ data set, subclass the real cache off Tie::Cache, with @ISA = qw(
+ 'Tie::Cache' ) notation. Then override the read() method for behavior
+ when there is a cache miss, and the write() method for behavior when the
+ cache's data changes.
+
+ When WriteSync is 1 or TRUE (DEFAULT), write() is called immediately
+ when data in the cache is modified. If set to 0, data that has been
+ modified in the cache gets written out when the entries are deleted or
+ during the DESTROY phase of the cache object, usually at the end of a
+ script.
+
+ To have the dirty data write() periodically while WriteSync is set to 0,
+ there is a flush() cache API call that will flush the dirty writes in
+ this way. Just call the flush() API like:
+
+ my $write_flush_count = tied(%cache)->flush();
+
+ The flush() API was added in the .17 release thanks to Rob Bloodgood.
+
+TRUE CACHE EXAMPLE
+ use Tie::Cache;
+
+ # personalize the Tie::Cache object, by inheriting from it
+ package My::Cache;
+ @ISA = qw(Tie::Cache);
+
+ # override the read() and write() member functions
+ # these tell the cache what to do with a cache miss or flush
+ sub read {
+ my($self, $key) = @_;
+ print "cache miss for $key, read() data\n";
+ rand() * $key;
+ }
+ sub write {
+ my($self, $key, $value) = @_;
+ print "flushing [$key, $value] from cache, write() data\n";
+ }
+
+ my $cache_size = $ARGV[0] || 2;
+ my $num_to_cache = $ARGV[1] || 4;
+ my $Debug = $ARGV[2] || 1;
+
+ tie %cache, 'My::Cache', $cache_size, {Debug => $Debug};
+
+ # load the cache with new data, each through its contents,
+ # and then reload in reverse order.
+ for(1..$num_to_cache) { print "read data $_: $cache{$_}\n" }
+ while(my($k, $v) = each %cache) { print "each data $k: $v\n"; }
+ for(my $i=$num_to_cache; $i>0; $i--) { print "read data $i: $cache{$i}\n"; }
+
+ # flush writes now, trivial use since will happen in DESTROY() anyway
+ tied(%cache)->flush();
+
+ # clear cache in 2 ways, write will flush out to disk
+ %cache = ();
+ undef %cache;
+
+NOTES
+ Many thanks to all those who helped me make this module a reality,
+ including:
+
+ :) Tom Hukins who provided me insight and motivation for
+ finishing this module.
+ :) Jamie McCarthy, for trying to make Tie::Cache be all
+ that it can be.
+ :) Rob Fugina who knows how to "TRULY CACHE".
+ :) Rob Bloodgood, for the TRUE CACHE flush() API
+
+AUTHOR
+ Please send any questions or comments to Joshua Chamas at
+ chamas at alumni.stanford.org
+
+COPYRIGHT
+ Copyright (c) 1999-2002 Joshua Chamas, Chamas Enterprises Inc. Sponsored
+ by development on NodeWorks http://www.nodeworks.com
+
+ All rights reserved. This program is free software; you can redistribute
+ it and/or modify it under the same terms as Perl itself.
+
diff --git a/bench.pl b/bench.pl
new file mode 100755
index 0000000..15b4383
--- /dev/null
+++ b/bench.pl
@@ -0,0 +1,64 @@
+#!/usr/bin/perl
+
+use Tie::Cache;
+use Tie::Cache::LRU;
+use Benchmark;
+use strict;
+
+my $cache_size = 5000;
+my $write_count = $cache_size * 2;
+my $read_count = $write_count * 4;
+my $delete_count = $write_count;
+
+tie my %cache, 'Tie::Cache', $cache_size;
+tie my %cache_lru, 'Tie::Cache::LRU', $cache_size;
+
+my @cols;
+push(@cols, \%cache, \%cache_lru);
+
+printf " %15s", "Cache Size $cache_size";
+for(@cols) {
+ my $module = ref(tied(%$_));
+ printf " %16s %3.2f", $module, eval "\$$module"."::VERSION";
+}
+print "\n";
+
+&report("$write_count Writes", sub {
+ my $cache = shift;
+ for(1..$write_count) {
+ $cache->{$_} = $_;
+ }
+ },
+ @cols,
+ );
+
+&report("$read_count Reads", sub {
+ my $cache = shift;
+ for(1..$read_count) {
+ my $value = $cache->{$_};
+ }
+ },
+ @cols,
+ );
+
+&report("$delete_count Deletes", sub {
+ my $cache = shift;
+ for(1..$delete_count) {
+ my $value = $cache->{$_};
+ }
+ },
+ @cols,
+ );
+
+sub report {
+ my($desc, $sub, @caches) = @_;
+
+ printf(" %-15s", $desc);
+ for my $cache (@caches) {
+ my $timed = timestr(timeit(1, sub { &$sub($cache) }));
+ $timed =~ /([\d\.]+\s+cpu)/i;
+ printf("%18s sec", $1);
+ }
+ print "\n";
+}
+
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..44ba608
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,53 @@
+libtie-cache-perl (0.17-4) unstable; urgency=low
+
+ * FTBFS with perl5.10 (Closes: #467984)
+ - Changes done in debian/rules
+ * Bumped Standard version to 3.7.3.
+ * Changed debian/control
+ - Removed Homepage from description and used as a Standard tag.
+
+ -- Deepak Tripathi <apenguinlinux at gmail.com> Sun, 02 Mar 2008 00:34:14 +0530
+
+libtie-cache-perl (0.17-3) unstable; urgency=low
+
+ * New maintainer. Closes: #279807.
+
+ -- Deepak Tripathi <apenguinlinux at gmail.com> Mon, 09 Jul 2007 18:22:47 +0000
+
+libtie-cache-perl (0.17-2) unstable; urgency=low
+
+ * QA Upload
+ * Changed Maintainer to Debian QA Group <packages at qa.debian.org>
+ * Changed Section from 'interpreters' to 'perl' to match override.
+ * Removed /usr/share/perl5/Tie/bench.pl, this is provided as an example.
+
+ -- Stephen Quinney <stephen at jadevine.org.uk> Wed, 2 Mar 2005 21:38:54 +0000
+
+libtie-cache-perl (0.17-1) unstable; urgency=low
+
+ * New upstream release.
+ * New maintainer,
+ Closes: #151255
+
+ -- Stephen Zander <gibreel at debian.org> Sat, 6 Jul 2002 21:53:30 -0700
+
+libtie-cache-perl (0.15-2) unstable; urgency=low
+
+ * Build-Depends-Indep in debian/control
+
+ -- Piotr Roszatycki <dexter at debian.org> Wed, 17 Oct 2001 14:48:32 +0200
+
+libtie-cache-perl (0.15-1) unstable; urgency=low
+
+ * New upstream release.
+ * New Debian Policy.
+ * Removed yada.
+
+ -- Piotr Roszatycki <dexter at debian.org> Thu, 12 Jul 2001 15:12:55 +0200
+
+libtie-cache-perl (0.08-1) unstable; urgency=low
+
+ * Initial Debian version.
+
+ -- Piotr Roszatycki <dexter at debian.org> Fri, 21 Jul 2000 10:07:58 +0200
+
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..7ed6ff8
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+5
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..6c9a750
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,22 @@
+Source: libtie-cache-perl
+Section: perl
+Priority: extra
+Build-Depends: debhelper (>= 5.0.42)
+Build-Depends-Indep: perl (>= 5.8.8-7)
+Maintainer: Deepak Tripathi <apenguinlinux at gmail.com>
+Standards-Version: 3.7.3
+Homepage: http://www.cpan.org/modules/by-module/Tie/
+
+Package: libtie-cache-perl
+Architecture: all
+Depends: ${perl:Depends}
+Description: perl Tie::Cache - LRU Cache in Memory
+ This module implements a least recently used (LRU) cache in memory
+ through a tie interface. Any time data is stored in the tied hash, that
+ key/value pair has an entry time associated with it, and as the cache
+ fills up, those members of the cache that are the oldest are removed to
+ make room for new entries.
+ .
+ So, the cache only "remembers" the last written entries, up to the size
+ of the cache. This can be especially useful if you access great amounts
+ of data, but only access a minority of the data a majority of the time.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..0b14aaa
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,31 @@
+This is the Debian prepackaged version of perl Tie::Cache - LRU Cache
+in Memory. It was Debianised by Piotr Roszatycki <dexter at debian.org>,
+using files obtained from
+<URL:http://www.cpan.org/modules/by-module/Tie/> and is now maintained
+by Deepak Tripathi <apenguinlinux at gmail.com>
+
+Copyright and licence notice:
+
+ Copyright (c) 1999-2001 Joshua Chamas, Chamas Enterprises Inc.
+
+ All rights reserved. This program is free software; you can redistribute
+ it and/or modify it under the same terms as Perl itself.
+
+ Sponsored by development on NodeWorks http://www.nodeworks.com
+
+ a) the GNU General Public License as published by the Free
+ Software Foundation; either version 1, or (at your option) any
+ later version, or
+
+ b) the "Artistic License" which comes with Debian.
+
+ You should have received a copy of the Artistic License with this
+ Kit, in the file /usr/share/common-licenses/Artistic. If not, I'll be
+ glad to provide one.
+
+ You should also have received a copy of the GNU General Public
+ License along with this system, in /usr/share/common-licenses/GPL; if not,
+ write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+.
diff --git a/debian/examples b/debian/examples
new file mode 100644
index 0000000..70e89ea
--- /dev/null
+++ b/debian/examples
@@ -0,0 +1,2 @@
+test.pl
+bench.pl
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..75ad4f6
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,84 @@
+#!/usr/bin/make -f
+# This debian/rules file is provided as a template for normal perl
+# packages. It was created by Marc Brockschmidt <marc at dch-faq.de> for
+# the Debian Perl Group (http://pkg-perl.alioth.debian.org/) but may
+# be used freely wherever it is useful.
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+# If set to a true value then MakeMaker's prompt function will
+# always return the default without waiting for user input.
+export PERL_MM_USE_DEFAULT=1
+
+PACKAGE=$(shell dh_listpackages)
+
+ifndef PERL
+PERL = /usr/bin/perl
+endif
+
+TMP =$(CURDIR)/debian/$(PACKAGE)
+
+build: build-stamp
+build-stamp:
+ dh_testdir
+
+ # Add commands to compile the package here
+ $(PERL) Makefile.PL INSTALLDIRS=vendor
+ $(MAKE) OPTIMIZE="-Wall -O2 -g"
+
+ touch build-stamp
+
+clean:
+ dh_testdir
+ dh_testroot
+
+ # Add commands to clean up after the build process here
+ [ ! -f Makefile ] || $(MAKE) realclean
+
+ dh_clean build-stamp install-stamp
+
+install: build install-stamp
+install-stamp:
+ dh_testdir
+ dh_testroot
+ dh_clean -k
+
+ # Add commands to install the package into debian/$PACKAGE_NAME here
+ $(MAKE) test
+ $(MAKE) install DESTDIR=$(TMP) PREFIX=/usr
+ rm -f $(TMP)/usr/share/perl5/Tie/bench.pl
+
+ # As this is a architecture independent package, we are not
+ # supposed to install stuff to /usr/lib. MakeMaker creates
+ # the dirs, we delete them from the deb:
+ [ ! -d $(TMP)/usr/lib/perl5 ] || rmdir --ignore-fail-on-non-empty --parents --verbose $(TMP)/usr/lib/perl5
+
+ touch install-stamp
+
+binary-arch:
+# We have nothing to do by default.
+
+binary-indep: build install
+ dh_testdir
+ dh_testroot
+# dh_installcron
+# dh_installmenu
+ dh_installexamples
+ dh_installdocs README
+ dh_installchangelogs CHANGES
+ dh_perl
+ dh_link
+ dh_strip
+ dh_compress
+ dh_fixperms
+ dh_installdeb
+ dh_gencontrol
+ dh_md5sums
+ dh_builddeb
+
+source diff:
+ @echo >&2 'source and diff are obsolete - use dpkg-source -b'; false
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary
diff --git a/debian/watch b/debian/watch
new file mode 100644
index 0000000..9e335e6
--- /dev/null
+++ b/debian/watch
@@ -0,0 +1,3 @@
+version=3
+http://search.cpan.org/~chamas/Tie-Cache/ \
+.*Tie-Cache-(.*)\.t.*
diff --git a/test.pl b/test.pl
new file mode 100755
index 0000000..017b104
--- /dev/null
+++ b/test.pl
@@ -0,0 +1,199 @@
+#!/usr/local/bin/perl
+
+use Cache;
+use Benchmark;
+use vars qw($Size %cache %count_cache);
+use strict;
+
+$Size = 5000;
+$| = 1;
+
+sub report {
+ my($desc, $count, $sub) = @_;
+ my $timed = timestr(timeit($count, $sub));
+ $timed =~ /([\d\.]+\s+cpu)/i;
+ printf("%-65.65s %s\n", "[ timing ] $desc", $1);
+}
+
+sub test {
+ my($desc, $eval) = @_;
+ my $result = eval { &$eval } ? "OK" : "ERROR - $@";
+ print "$result ... $desc\n";
+}
+
+tie %cache, 'Tie::Cache', {
+ Debug => 0,
+ MaxCount => $Size,
+ MaxSize => 1000,
+ MaxBytes => 5000000,
+# WriteSync => 0,
+# Debug => 2,
+ };
+
+tie %count_cache, 'Tie::Cache', $Size;
+
+
+my %normal;
+
+print "++++ Benchmarking operations on Tie::Cache of size $Size\n\n";
+my $i = 0;
+report("insert of $Size elements into normal %hash", $Size,
+ sub { $normal{++$i} = $i }
+ );
+$i = 0;
+report("insert of $Size elements into MaxCount Tie::Cache", $Size,
+ sub { $count_cache{++$i} = $i }
+ );
+
+$i = 0;
+report("insert of $Size elements into MaxBytes Tie::Cache", $Size,
+ sub { $cache{++$i} = $i }
+ );
+
+
+my $rv;
+$i = 0;
+report("reading $Size elements from normal %hash",
+ $Size, sub { $rv = $normal{++$i} } );
+$i = 0;
+report("reading $Size elements from MaxCount Tie::Cache",
+ $Size, sub { $rv = $count_cache{++$i} } );
+$i = 0;
+report("reading $Size elements from MaxBytes Tie::Cache",
+ $Size, sub { $rv = $cache{++$i} } );
+
+
+$i = 0;
+report("deleting $Size elements from normal %hash",
+ $Size, sub { $rv = delete $normal{++$i} } );
+$i = 0;
+report("deleting $Size elements from MaxCount Tie::Cache",
+ $Size, sub { $rv = delete $count_cache{++$i} }
+ );
+report("deleting $Size elements from MaxBytes Tie::Cache",
+ $Size, sub { $rv = delete $cache{++$i} }
+ );
+
+my $over = $Size * 2;
+$i = 0;
+%cache = ();
+report(
+ "$over inserts overflowing MaxBytes Tie::Cache",
+ $over,
+ sub { $cache{++$i} = $i; }
+ );
+
+$i = 0;
+report(
+ "$over reads from overflowed MaxBytes Tie::Cache",
+ $over,
+ sub { $cache{++$i} }
+ );
+
+report(
+ "$over undef inserts, not affecting MaxBytes Tie::Cache",
+ $over,
+ sub { $cache{rand()} = undef; }
+ );
+
+report(
+ "$over undef reads, not affecting MaxBytes Tie::Cache",
+ $over,
+ sub { $cache{rand()}; }
+ );
+
+print "\n++++ Testing for correctness\n\n";
+my @keys = keys %cache;
+test("number of keys in %cache = $Size",
+ sub { @keys == $Size }
+ );
+test("first key in %cache = ".($Size + 1),
+ sub { $keys[0] == $Size + 1 }
+ );
+test("last key in %cache = ".($Size + $Size),
+ sub { $keys[$#keys] == $Size + $Size }
+ );
+test("first key value in %cache = ".($Size + 1),
+ sub { $cache{$keys[0]} == $Size + 1 }
+ );
+
+delete $cache{$keys[0]};
+test("deleting key $keys[0]; no value defined for deleted key",
+ sub { ! defined $cache{$keys[0]} }
+ );
+test("existance of deleted key = ! exists",
+ sub { ! exists $cache{$Size+1} }
+ );
+ at keys = keys %cache;
+test("first key in %cache after delete = ".($Size + 2),
+ sub { $keys[0] == $Size + 2 }
+ );
+test("keys in cache after delete = ".($Size-1),
+ sub { keys %cache == $Size - 1 }
+ );
+
+print "\n++++ Stats for %cache\n\n";
+my $obj = tied(%cache);
+print join("\n", map { "$_:\t$obj->{$_}" } 'count', 'hit', 'miss', 'bytes');
+print "\n";
+
+# personalize the Tie::Cache object, by inheriting from it
+package My::Cache;
+use vars qw(@ISA);
+ at ISA = qw(Tie::Cache);
+
+my($read_count, $write_count) = (0,0);
+# override the read() and write() member functions
+# these tell the cache what to do with a cache miss or flush
+sub read {
+ my($self, $key) = @_;
+# print "cache miss for $key, read() data\n";
+ $read_count++;
+ rand() * $key;
+}
+sub write {
+ my($self, $key, $value) = @_;
+ $write_count++;
+# print "flushing [$key, $value] from cache, write() data\n";
+}
+
+print "\n++++ Testing TRUE CACHE ++++\n\n";
+
+my $cache_size = 100;
+my %cache;
+
+tie %cache, 'My::Cache', {
+ MaxBytes => $cache_size * 1000,
+ MaxCount => $cache_size,
+ Debug => 0,
+ WriteSync => 0,
+ };
+
+# load the cache with new data, each through its contents,
+# and then reload in reverse order.
+&main::test("read count == 0 pre reads", sub { $read_count == 0 });
+my $count = 0;
+for(1..$cache_size) {
+ my $value = $cache{$_};
+}
+&main::test("read count == $cache_size post reads", sub { $read_count == $cache_size });
+
+for(1..$cache_size) {
+ my $new_value = int(rand() * 10);
+ $cache{$_} = $new_value;
+}
+
+&main::test("write count == 0 pre flush()", sub { $write_count == 0 });
+tied(%cache)->flush();
+&main::test("write count == $cache_size post flush()", sub { $write_count == $cache_size });
+
+%cache = ();
+
+&main::test("write count == $cache_size post CLEAR()", sub { $write_count == $cache_size });
+
+undef %cache;
+
+print "\n";
+
+exit;
+
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-perl/packages/libtie-cache-perl.git
More information about the Pkg-perl-cvs-commits
mailing list