t/25_tie_return_value.t
t/26_scalar_ref.t
t/27_filehandle.t
-t/28_transactions.t
+t/28_audit_trail.t
t/29_freespace_manager.t
t/30_already_tied.t
t/31_references.t
t/32_dash_ell.t
-t/33_audit_trail.t
+t/33_transactions.t
t/34_transaction_arrays.t
+t/35_transaction_multiple.t
\.old$
^#.*#$
^\.#
+^\.DS_Store
+^__MACOSX
$self->{$param} = $args->{$param};
}
- $self->{engine}->setup_fh( $self );
+ $self->_engine->setup_fh( $self );
$self->{fileobj}->set_db( $self );
$self->unlock();
$self->_fileobj->close;
$self->_fileobj->open;
- $self->{engine}->setup_fh( $self );
+ $self->_engine->setup_fh( $self );
return 1;
}
# Accessor methods
##
+sub _engine {
+ my $self = $_[0]->_get_self;
+ return $self->{engine};
+}
+
sub _fileobj {
my $self = $_[0]->_get_self;
return $self->{fileobj};
##
$self->lock( LOCK_EX );
- my $md5 = $self->{engine}{digest}->($key);
+ my $md5 = $self->_engine->{digest}->($key);
- my $tag = $self->{engine}->find_bucket_list( $self->_base_offset, $md5, { create => 1 } );
+ my $tag = $self->_engine->find_blist( $self->_base_offset, $md5, { create => 1 } );
# User may be storing a hash, in which case we do not want it run
# through the filtering system
##
# Add key/value to bucket list
##
- $self->{engine}->add_bucket( $tag, $md5, $key, $value, undef, $orig_key );
+ $self->_engine->add_bucket( $tag, $md5, $key, $value, undef, $orig_key );
$self->unlock();
my $self = shift->_get_self;
my ($key, $orig_key) = @_;
- my $md5 = $self->{engine}{digest}->($key);
+ my $md5 = $self->_engine->{digest}->($key);
##
# Request shared lock for reading
##
$self->lock( LOCK_SH );
- my $tag = $self->{engine}->find_bucket_list( $self->_base_offset, $md5 );#, { create => 1 } );
+ my $tag = $self->_engine->find_blist( $self->_base_offset, $md5 );#, { create => 1 } );
#XXX This needs to autovivify
if (!$tag) {
$self->unlock();
##
# Get value from bucket list
##
- my $result = $self->{engine}->get_bucket_value( $tag, $md5, $orig_key );
+ my $result = $self->_engine->get_bucket_value( $tag, $md5, $orig_key );
$self->unlock();
##
$self->lock( LOCK_EX );
- my $md5 = $self->{engine}{digest}->($key);
+ my $md5 = $self->_engine->{digest}->($key);
- my $tag = $self->{engine}->find_bucket_list( $self->_base_offset, $md5 );
+ my $tag = $self->_engine->find_blist( $self->_base_offset, $md5 );
if (!$tag) {
$self->unlock();
return;
##
# Delete bucket
##
- my $value = $self->{engine}->get_bucket_value( $tag, $md5 );
+ my $value = $self->_engine->get_bucket_value( $tag, $md5 );
if (defined $value && !ref($value) && $self->_fileobj->{filter_fetch_value}) {
$value = $self->_fileobj->{filter_fetch_value}->($value);
}
- my $result = $self->{engine}->delete_bucket( $tag, $md5, $orig_key );
+ my $result = $self->_engine->delete_bucket( $tag, $md5, $orig_key );
##
# If this object is an array and the key deleted was on the end of the stack,
my $self = shift->_get_self;
my ($key) = @_;
- my $md5 = $self->{engine}{digest}->($key);
+ my $md5 = $self->_engine->{digest}->($key);
##
# Request shared lock for reading
##
$self->lock( LOCK_SH );
- my $tag = $self->{engine}->find_bucket_list( $self->_base_offset, $md5 );
+ my $tag = $self->_engine->find_blist( $self->_base_offset, $md5 );
if (!$tag) {
$self->unlock();
##
# Check if bucket exists and return 1 or ''
##
- my $result = $self->{engine}->bucket_exists( $tag, $md5 ) || '';
+ my $result = $self->_engine->bucket_exists( $tag, $md5 ) || '';
$self->unlock();
##
$self->lock( LOCK_EX );
+ if ( $self->_type eq TYPE_HASH ) {
+ my $key = $self->first_key;
+ while ( $key ) {
+ my $next_key = $self->next_key( $key );
+ my $md5 = $self->_engine->{digest}->($key);
+ my $tag = $self->_engine->find_blist( $self->_base_offset, $md5 );
+ $self->_engine->delete_bucket( $tag, $md5, $key );
+ $key = $next_key;
+ }
+ }
+ else {
+ my $size = $self->FETCHSIZE;
+ for my $key ( map { pack ( $self->_engine->{long_pack}, $_ ) } 0 .. $size - 1 ) {
+ my $md5 = $self->_engine->{digest}->($key);
+ my $tag = $self->_engine->find_blist( $self->_base_offset, $md5 );
+ $self->_engine->delete_bucket( $tag, $md5, $key );
+ }
+ $self->STORESIZE( 0 );
+ }
#XXX This needs updating to use _release_space
- $self->{engine}->write_tag(
- $self->_base_offset, $self->_type,
- chr(0)x$self->{engine}{index_size},
- );
+# $self->_engine->write_tag(
+# $self->_base_offset, $self->_type,
+# chr(0)x$self->_engine->{index_size},
+# );
$self->unlock();
use DBM::Deep;
my $db = DBM::Deep->new( "foo.db" );
- $db->{key} = 'value'; # tie() style
+ $db->{key} = 'value';
print $db->{key};
- $db->put('key' => 'value'); # OO style
+ $db->put('key' => 'value');
print $db->get('key');
# true multi-level support
42, 99,
];
-=head1 DESCRIPTION
+ tie my %db, 'DBM::Deep', 'foo.db';
+ $db{key} = 'value';
+ print $db{key};
-A unique flat-file database module, written in pure perl. True
-multi-level hash/array support (unlike MLDBM, which is faked), hybrid
-OO / tie() interface, cross-platform FTPable files, and quite fast. Can
-handle millions of keys and unlimited hash levels without significant
-slow-down. Written from the ground-up in pure perl -- this is NOT a
-wrapper around a C-based DBM. Out-of-the-box compatibility with Unix,
-Mac OS X and Windows.
+ tied(%db)->put('key' => 'value');
+ print tied(%db)->get('key');
-=head1 VERSION DIFFERENCES
-
-B<NOTE>: 0.99_01 and above have significant file format differences from 0.98 and
-before. While attempts have been made to be backwards compatible, no guarantees.
+=head1 DESCRIPTION
-=head1 INSTALLATION
+A unique flat-file database module, written in pure perl. True multi-level
+hash/array support (unlike MLDBM, which is faked), hybrid OO / tie()
+interface, cross-platform FTPable files, ACID transactions, and is quite fast.
+Can handle millions of keys and unlimited levels without significant
+slow-down. Written from the ground-up in pure perl -- this is NOT a wrapper
+around a C-based DBM. Out-of-the-box compatibility with Unix, Mac OS X and
+Windows.
-Hopefully you are using Perl's excellent CPAN module, which will download
-and install the module for you. If not, get the tarball, and run these
-commands:
+=head1 VERSION DIFFERENCES
- tar zxf DBM-Deep-*
- cd DBM-Deep-*
- perl Makefile.PL
- make
- make test
- make install
+B<NOTE>: 0.99_01 and above have significant file format differences from 0.983 and
+before. There will be a backwards-compatibility layer in 1.00, but that is
+slated for a later 0.99_x release. This version is B<NOT> backwards compatible
+with 0.983 and before.
=head1 SETUP
=head2 OO CONSTRUCTION
The recommended way to construct a DBM::Deep object is to use the new()
-method, which gets you a blessed, tied hash or array reference.
+method, which gets you a blessed I<and> tied hash (or array) reference.
my $db = DBM::Deep->new( "foo.db" );
hash, unless otherwise specified (see L<OPTIONS> below).
You can pass a number of options to the constructor to specify things like
-locking, autoflush, etc. This is done by passing an inline hash:
+locking, autoflush, etc. This is done by passing an inline hash (or hashref):
my $db = DBM::Deep->new(
- file => "foo.db",
- locking => 1,
+ file => "foo.db",
+ locking => 1,
autoflush => 1
);
constructor. This is required if any options are specified.
See L<OPTIONS> below for the complete list.
-
-
You can also start with an array instead of a hash. For this, you must
specify the C<type> parameter:
Alternately, you can create a DBM::Deep handle by using Perl's built-in
tie() function. The object returned from tie() can be used to call methods,
-such as lock() and unlock(), but cannot be used to assign to the DBM::Deep
-file (as expected with most tie'd objects).
+such as lock() and unlock(). (That object can be retrieved from the tied
+variable at any time using tied() - please see L<perltie/> for more info.
my %hash;
my $db = tie %hash, "DBM::Deep", "foo.db";
open it write-only or append-only, an exception will be thrown immediately as DBM::Deep
needs to read from the fh.
+=item * audit_file / audit_fh
+
+These are just like file/fh, except for auditing. Please see L</AUDITING> for
+more information.
+
=item * file_offset
This is the offset within the file that the DBM::Deep db starts. Most of the time, you will
=item * locking
-Specifies whether locking is to be enabled. DBM::Deep uses Perl's Fnctl flock()
-function to lock the database in exclusive mode for writes, and shared mode for
-reads. Pass any true value to enable. This affects the base DB handle I<and
-any child hashes or arrays> that use the same DB file. This is an optional
-parameter, and defaults to 0 (disabled). See L<LOCKING> below for more.
+Specifies whether locking is to be enabled. DBM::Deep uses Perl's flock()
+function to lock the database in exclusive mode for writes, and shared mode
+for reads. Pass any true value to enable. This affects the base DB handle
+I<and any child hashes or arrays> that use the same DB file. This is an
+optional parameter, and defaults to 0 (disabled). See L<LOCKING> below for
+more.
=item * autoflush
Remember that Perl's C<keys()> function extracts I<every> key from the hash and
pushes them onto an array, all before the loop even begins. If you have an
-extra large hash, this may exhaust Perl's memory. Instead, consider using
+extremely large hash, this may exhaust Perl's memory. Instead, consider using
Perl's C<each()> function, which pulls keys/values one at a time, using very
little memory:
In addition to the I<tie()> interface, you can also use a standard OO interface
to manipulate all aspects of DBM::Deep databases. Each type of object (hash or
array) has its own methods, but both types share the following common methods:
-C<put()>, C<get()>, C<exists()>, C<delete()> and C<clear()>.
+C<put()>, C<get()>, C<exists()>, C<delete()> and C<clear()>. C<fetch()> and
+C<store(> are aliases to C<put()> and C<get()>, respectively.
=over
=item * optimize()
-Recover lost disk space.
+Recover lost disk space. This is important to do, especially if you use
+transactions.
=item * import() / export()
level), and works with both hash and array DB types.
B<Note:> Make sure your existing structure has no circular references in it.
-These will cause an infinite loop when importing.
+These will cause an infinite loop when importing. There are plans to fix this
+in a later release.
=head2 EXPORTING
in-memory Perl structure.
B<Note:> Make sure your database has no circular references in it.
-These will cause an infinite loop when exporting.
+These will cause an infinite loop when exporting. There are plans to fix this
+in a later release.
=head1 FILTERS
C<export()> methods) will result in an infinite loop. This will be fixed in
a future release.
+=head1 AUDITING
+
+New in 0.99_01 is the ability to audit your databases actions. By passing in
+audit_file (or audit_fh) to the constructor, all actions will be logged to
+that file. The format is one that is suitable for eval'ing against the
+database to replay the actions. Please see t/33_audit_trail.t for an example
+of how to do this.
+
+=head1 TRANSACTIONS
+
+New in 0.99_01 is ACID transactions. Every DBM::Deep object is completely
+transaction-ready - it is not an option you have to turn on. Three new methods
+have been added to support them. They are:
+
+=over 4
+
+=item * begin_work()
+
+This starts a transaction.
+
+=item * commit()
+
+This applies the changes done within the transaction to the mainline and ends
+the transaction.
+
+=item * rollback()
+
+This discards the changes done within the transaction to the mainline and ends
+the transaction.
+
+=back
+
+Transactions in DBM::Deep are done using the MVCC method, the same method used
+by the InnoDB MySQL table type.
+
=head1 CAVEATS / ISSUES / BUGS
This section describes all the known issues with DBM::Deep. It you have found
=head1 CODE COVERAGE
-We use B<Devel::Cover> to test the code coverage of our tests, below is the
-B<Devel::Cover> report on this module's test suite.
+B<Devel::Cover> is used to test the code coverage of the tests. Below is the
+B<Devel::Cover> report on this distribution's test suite.
- ----------------------------------- ------ ------ ------ ------ ------ ------
- File stmt bran cond sub time total
- ----------------------------------- ------ ------ ------ ------ ------ ------
- blib/lib/DBM/Deep.pm 94.9 80.6 73.0 100.0 37.9 90.4
- blib/lib/DBM/Deep/Array.pm 100.0 91.1 100.0 100.0 18.2 98.1
- blib/lib/DBM/Deep/Engine.pm 98.9 87.3 80.0 100.0 34.2 95.2
- blib/lib/DBM/Deep/Hash.pm 100.0 87.5 100.0 100.0 9.7 97.3
- Total 97.9 85.9 79.7 100.0 100.0 94.3
- ----------------------------------- ------ ------ ------ ------ ------ ------
+ ---------------------------- ------ ------ ------ ------ ------ ------ ------
+ File stmt bran cond sub pod time total
+ ---------------------------- ------ ------ ------ ------ ------ ------ ------
+ blib/lib/DBM/Deep.pm 96.2 89.0 75.0 95.8 89.5 36.0 92.9
+ blib/lib/DBM/Deep/Array.pm 96.1 88.3 100.0 96.4 100.0 15.9 94.7
+ blib/lib/DBM/Deep/Engine.pm 96.6 86.6 89.5 100.0 0.0 20.0 91.0
+ blib/lib/DBM/Deep/File.pm 99.4 88.3 55.6 100.0 0.0 19.6 89.5
+ blib/lib/DBM/Deep/Hash.pm 98.5 83.3 100.0 100.0 100.0 8.5 96.3
+ Total 96.9 87.4 81.2 98.0 38.5 100.0 92.1
+ ---------------------------- ------ ------ ------ ------ ------ ------ ------
=head1 MORE INFORMATION
Check out the DBM::Deep Google Group at L<http://groups.google.com/group/DBM-Deep>
-or send email to L<DBM-Deep@googlegroups.com>.
+or send email to L<DBM-Deep@googlegroups.com>. You can also visit #dbm-deep on
+irc.perl.org
-=head1 AUTHORS
-
-Joseph Huckaby, L<jhuckaby@cpan.org>
+=head1 MAINTAINERS
Rob Kinyon, L<rkinyon@cpan.org>
+Originally written by Joseph Huckaby, L<jhuckaby@cpan.org>
+
Special thanks to Adam Sah and Rich Gaushell! You know why :-)
=head1 SEE ALSO
}
}
- $key = pack($self->{engine}{long_pack}, $key);
+ $key = pack($self->_engine->{long_pack}, $key);
}
my $rv = $self->SUPER::FETCH( $key, $orig_key );
}
}
- $key = pack($self->{engine}{long_pack}, $key);
+ $key = pack($self->_engine->{long_pack}, $key);
}
my $rv = $self->SUPER::STORE( $key, $value, $orig_key );
}
}
- $key = pack($self->{engine}{long_pack}, $key);
+ $key = pack($self->_engine->{long_pack}, $key);
}
my $rv = $self->SUPER::EXISTS( $key );
}
}
- $key = pack($self->{engine}{long_pack}, $key);
+ $key = pack($self->_engine->{long_pack}, $key);
}
my $rv = $self->SUPER::DELETE( $key, $orig );
$self->unlock;
if ($packed_size) {
- return int(unpack($self->{engine}{long_pack}, $packed_size));
+ return int(unpack($self->_engine->{long_pack}, $packed_size));
}
return 0;
my $SAVE_FILTER = $self->_fileobj->{filter_store_value};
$self->_fileobj->{filter_store_value} = undef;
- my $result = $self->STORE('length', pack($self->{engine}{long_pack}, $new_length), 'length');
+ my $result = $self->STORE('length', pack($self->_engine->{long_pack}, $new_length), 'length');
$self->_fileobj->{filter_store_value} = $SAVE_FILTER;
# The 2**8 here indicates the number of different characters in the
# current hashing algorithm
#XXX Does this need to be updated with different hashing algorithms?
- $self->{index_size} = (2**8) * $self->{long_size};
- $self->{bucket_size} = $self->{hash_size} + $self->{long_size} * 3;
+ $self->{hash_chars_used} = (2**8);
+ $self->{index_size} = $self->{hash_chars_used} * $self->{long_size};
+
+ $self->{bucket_size} = $self->{hash_size} + $self->{long_size} * 2;
$self->{bucket_list_size} = $self->{max_buckets} * $self->{bucket_size};
+ $self->{key_size} = $self->{long_size} * 2;
+ $self->{keyloc_size} = $self->{max_buckets} * $self->{key_size};
+
return;
}
sub write_file_header {
my $self = shift;
- my $loc = $self->_fileobj->request_space( length( SIG_FILE ) + 21 );
+ my $loc = $self->_fileobj->request_space( length( SIG_FILE ) + 33 );
$self->_fileobj->print_at( $loc,
SIG_FILE,
SIG_HEADER,
pack('N', 1), # header version
- pack('N', 12), # header size
- pack('N', 0), # currently running transaction IDs
+ pack('N', 24), # header size
+ pack('N4', 0, 0, 0, 0), # currently running transaction IDs
pack('n', $self->{long_size}),
pack('A', $self->{long_pack}),
pack('n', $self->{data_size}),
}
my $buffer2 = $self->_fileobj->read_at( undef, $size );
- my ($running_transactions, @values) = unpack( 'N n A n A n', $buffer2 );
+ my ($a1, $a2, $a3, $a4, @values) = unpack( 'N4 n A n A n', $buffer2 );
$self->_fileobj->set_transaction_offset( 13 );
return {
signature => $sig,
+ #XXX Is this even used?
size => $size,
offset => $offset + SIG_SIZE + $self->{data_size},
content => $content
return {
signature => $sig,
+ #XXX Is this even used?
size => $size,
offset => $offset + SIG_SIZE + $self->{data_size},
content => $fileobj->read_at( undef, $size ),
};
}
+sub find_keyloc {
+ my $self = shift;
+ my ($tag, $transaction_id) = @_;
+ $transaction_id = $self->_fileobj->transaction_id
+ unless defined $transaction_id;
+
+ for ( my $i = 0; $i < $self->{max_buckets}; $i++ ) {
+ my ($loc, $trans_id, $is_deleted) = unpack(
+ "$self->{long_pack} C C",
+ substr( $tag->{content}, $i * $self->{key_size}, $self->{key_size} ),
+ );
+
+ if ( $loc == 0 ) {
+ return ( $loc, $is_deleted, $i * $self->{key_size} );
+ }
+
+ next if $transaction_id != $trans_id;
+
+ return ( $loc, $is_deleted, $i * $self->{key_size} );
+ }
+
+ return;
+}
+
sub add_bucket {
##
# Adds one key/value pair to bucket list, given offset, MD5 digest of key,
##
my $self = shift;
my ($tag, $md5, $plain_key, $value, $deleted, $orig_key) = @_;
- $deleted ||= 0;
# This verifies that only supported values will be stored.
{
my $fileobj = $self->_fileobj;
- my $actual_length = $self->_length_needed( $value, $plain_key );
-
#ACID - This is a mutation. Must only find the exact transaction
- my ($subloc, $offset, $size,$is_deleted) = $self->_find_in_buckets( $tag, $md5, 1 );
+ my ($keyloc, $offset) = $self->_find_in_buckets( $tag, $md5, 1 );
my @transactions;
if ( $fileobj->transaction_id == 0 ) {
}
# $self->_release_space( $size, $subloc );
- # Updating a known md5
#XXX This needs updating to use _release_space
+
my $location;
- if ( $subloc ) {
- if ($actual_length <= $size) {
- $location = $subloc;
- }
- else {
- $location = $fileobj->request_space( $actual_length );
+ my $size = $self->_length_needed( $value, $plain_key );
- $fileobj->print_at( $tag->{offset} + $offset + $self->{hash_size},
- pack($self->{long_pack}, $location ),
- pack($self->{long_pack}, $actual_length ),
- pack('n n', $fileobj->transaction_id, $deleted ),
- );
+ # Updating a known md5
+ if ( $keyloc ) {
+ my $keytag = $self->load_tag( $keyloc );
+ my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
+
+ if ( @transactions ) {
+ my $old_value = $self->read_from_loc( $subloc, $orig_key );
+ my $old_size = $self->_length_needed( $old_value, $plain_key );
+
+ for my $trans_id ( @transactions ) {
+ my ($loc, $is_deleted, $offset2) = $self->find_keyloc( $keytag, $trans_id );
+ unless ($loc) {
+ my $location2 = $fileobj->request_space( $old_size );
+ $fileobj->print_at( $keytag->{offset} + $offset2,
+ pack($self->{long_pack}, $location2 ),
+ pack( 'C C', $trans_id, 0 ),
+ );
+ $self->write_value( $location2, $plain_key, $old_value, $orig_key );
+ }
+ }
}
- my $old_value = $self->read_from_loc( $subloc, $orig_key );
- for ( @transactions ) {
- my $tag2 = $self->load_tag( $tag->{offset} - SIG_SIZE - $self->{data_size} );
- $fileobj->{transaction_id} = $_;
- $self->add_bucket( $tag2, $md5, $orig_key, $old_value, undef, $orig_key );
- $fileobj->{transaction_id} = 0;
- }
- $tag = $self->load_tag( $tag->{offset} - SIG_SIZE - $self->{data_size} );
+ $location = $self->_fileobj->request_space( $size );
+ #XXX This needs to be transactionally-aware in terms of which keytag->{offset} to use
+ $fileobj->print_at( $keytag->{offset} + $offset,
+ pack($self->{long_pack}, $location ),
+ pack( 'C C', $fileobj->transaction_id, 0 ),
+ );
}
# Adding a new md5
- elsif ( defined $offset ) {
- $location = $fileobj->request_space( $actual_length );
+ else {
+ my $keyloc = $fileobj->request_space( $self->tag_size( $self->{keyloc_size} ) );
- $fileobj->print_at( $tag->{offset} + $offset,
- $md5,
- pack($self->{long_pack}, $location ),
- pack($self->{long_pack}, $actual_length ),
- pack('n n', $fileobj->transaction_id, $deleted ),
+ # The bucket fit into list
+ if ( defined $offset ) {
+ $fileobj->print_at( $tag->{offset} + $offset,
+ $md5, pack( $self->{long_pack}, $keyloc ),
+ );
+ }
+ # If bucket didn't fit into list, split into a new index level
+ else {
+ $self->split_index( $tag, $md5, $keyloc );
+ }
+
+ my $keytag = $self->write_tag(
+ $keyloc, SIG_KEYS, chr(0)x$self->{keyloc_size},
);
- for ( @transactions ) {
- my $tag2 = $self->load_tag( $tag->{offset} - SIG_SIZE - $self->{data_size} );
- $fileobj->{transaction_id} = $_;
- $self->add_bucket( $tag2, $md5, '', '', 1, $orig_key );
- $fileobj->{transaction_id} = 0;
+ $location = $self->_fileobj->request_space( $size );
+ $fileobj->print_at( $keytag->{offset},
+ pack( $self->{long_pack}, $location ),
+ pack( 'C C', $fileobj->transaction_id, 0 ),
+ );
+
+ my $offset = 1;
+ for my $trans_id ( @transactions ) {
+ $fileobj->print_at( $keytag->{offset} + $self->{key_size} * $offset++,
+ pack( $self->{long_pack}, -1 ),
+ pack( 'C C', $trans_id, 1 ),
+ );
}
- $tag = $self->load_tag( $tag->{offset} - SIG_SIZE - $self->{data_size} );
- }
- # If bucket didn't fit into list, split into a new index level
- # split_index() will do the $self->_fileobj->request_space() call
- #XXX It also needs to be transactionally aware
- else {
- $location = $self->split_index( $md5, $tag );
}
$self->write_value( $location, $plain_key, $value, $orig_key );
sub split_index {
my $self = shift;
- my ($md5, $tag) = @_;
+ my ($tag, $md5, $keyloc) = @_;
my $fileobj = $self->_fileobj;
chr(0)x$self->{index_size},
);
- my $newtag_loc = $fileobj->request_space(
- $self->tag_size( $self->{bucket_list_size} ),
- );
-
my $keys = $tag->{content}
- . $md5 . pack($self->{long_pack}, $newtag_loc)
- . pack($self->{long_pack}, 0) # size
- . pack($self->{long_pack}, 0); # transaction ID
+ . $md5 . pack($self->{long_pack}, $keyloc);
my @newloc = ();
BUCKET:
# The <= here is deliberate - we have max_buckets+1 keys to iterate
# through, unlike every other loop that uses max_buckets as a stop.
for (my $i = 0; $i <= $self->{max_buckets}; $i++) {
- my ($key, $old_subloc, $size) = $self->_get_key_subloc( $keys, $i );
+ my ($key, $old_subloc) = $self->_get_key_subloc( $keys, $i );
die "[INTERNAL ERROR]: No key in split_index()\n" unless $key;
die "[INTERNAL ERROR]: No subloc in split_index()\n" unless $old_subloc;
my $subkeys = $fileobj->read_at( $newloc[$num], $self->{bucket_list_size} );
# This is looking for the first empty spot
- my ($subloc, $offset, $size) = $self->_find_in_buckets(
+ my ($subloc, $offset) = $self->_find_in_buckets(
{ content => $subkeys }, '',
);
$tag->{offset} - SIG_SIZE - $self->{data_size},
);
- return $newtag_loc;
+ return 1;
}
sub read_from_loc {
my ($tag, $md5, $orig_key) = @_;
#ACID - This is a read. Can find exact or HEAD
- my ($subloc, $offset, $size, $is_deleted) = $self->_find_in_buckets( $tag, $md5 );
+ my ($keyloc, $offset) = $self->_find_in_buckets( $tag, $md5 );
- if ( !$subloc ) {
+ if ( !$keyloc ) {
#XXX Need to use real key
-# $self->add_bucket( $tag, $md5, $orig_key, undef, undef, $orig_key );
+# $self->add_bucket( $tag, $md5, $orig_key, undef, $orig_key );
# return;
}
- elsif ( !$is_deleted ) {
- return $self->read_from_loc( $subloc, $orig_key );
+# elsif ( !$is_deleted ) {
+ else {
+ my $keytag = $self->load_tag( $keyloc );
+ my ($subloc, $is_deleted) = $self->find_keyloc( $keytag );
+ if (!$subloc) {
+ ($subloc, $is_deleted) = $self->find_keyloc( $keytag, 0 );
+ }
+ if ( $subloc && !$is_deleted ) {
+ return $self->read_from_loc( $subloc, $orig_key );
+ }
}
return;
#ACID - Although this is a mutation, we must find any transaction.
# This is because we need to mark something as deleted that is in the HEAD.
- my ($subloc, $offset, $size,$is_deleted) = $self->_find_in_buckets( $tag, $md5 );
+ my ($keyloc, $offset) = $self->_find_in_buckets( $tag, $md5 );
- return if !$subloc;
+ return if !$keyloc;
my $fileobj = $self->_fileobj;
}
if ( $fileobj->transaction_id == 0 ) {
+ my $keytag = $self->load_tag( $keyloc );
+ my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
my $value = $self->read_from_loc( $subloc, $orig_key );
- for (@transactions) {
- $fileobj->{transaction_id} = $_;
- #XXX Need to use real key
- $self->add_bucket( $tag, $md5, $orig_key, $value, undef, $orig_key );
- $fileobj->{transaction_id} = 0;
+ my $size = $self->_length_needed( $value, $orig_key );
+
+ for my $trans_id ( @transactions ) {
+ my ($loc, $is_deleted, $offset2) = $self->find_keyloc( $keytag, $trans_id );
+ unless ($loc) {
+ my $location2 = $fileobj->request_space( $size );
+ $fileobj->print_at( $keytag->{offset} + $offset2,
+ pack($self->{long_pack}, $location2 ),
+ pack( 'C C', $trans_id, 0 ),
+ );
+ $self->write_value( $location2, $orig_key, $value, $orig_key );
+ }
}
- $tag = $self->load_tag( $tag->{offset} - SIG_SIZE - $self->{data_size} );
- #XXX This needs _release_space() for the value and anything below
- $fileobj->print_at(
- $tag->{offset} + $offset,
- substr( $tag->{content}, $offset + $self->{bucket_size} ),
- chr(0) x $self->{bucket_size},
+ $keytag = $self->load_tag( $keyloc );
+ ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
+ $fileobj->print_at( $keytag->{offset} + $offset,
+ substr( $keytag->{content}, $offset + $self->{key_size} ),
+ chr(0) x $self->{key_size},
);
}
else {
- $self->add_bucket( $tag, $md5, '', '', 1, $orig_key );
+ my $keytag = $self->load_tag( $keyloc );
+ my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
+ $fileobj->print_at( $keytag->{offset} + $offset,
+ pack($self->{long_pack}, -1 ),
+ pack( 'C C', $fileobj->transaction_id, 1 ),
+ );
}
return 1;
my ($tag, $md5) = @_;
#ACID - This is a read. Can find exact or HEAD
- my ($subloc, $offset, $size, $is_deleted) = $self->_find_in_buckets( $tag, $md5 );
+ my ($keyloc) = $self->_find_in_buckets( $tag, $md5 );
+ my $keytag = $self->load_tag( $keyloc );
+ my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
+ if ( !$subloc ) {
+ ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag, 0 );
+ }
return ($subloc && !$is_deleted) && 1;
}
-sub find_bucket_list {
+sub find_blist {
##
# Locate offset for bucket list, given digested key
##
# Scan index and recursively step into deeper levels, looking for next key.
##
my $self = shift;
- my ($obj, $offset, $ch, $force_return_next) = @_;
+ my ($xxxx, $offset, $ch, $force_return_next) = @_;
my $tag = $self->load_tag( $offset );
if ($tag->{signature} ne SIG_BLIST) {
- my $content = $tag->{content};
- my $start = $obj->{return_next} ? 0 : ord(substr($obj->{prev_md5}, $ch, 1));
+ my $start = $xxxx->{return_next} ? 0 : ord(substr($xxxx->{prev_md5}, $ch, 1));
- for (my $idx = $start; $idx < (2**8); $idx++) {
+ for (my $idx = $start; $idx < $self->{hash_chars_used}; $idx++) {
my $subloc = unpack(
$self->{long_pack},
substr(
- $content,
+ $tag->{content},
$idx * $self->{long_size},
$self->{long_size},
),
if ($subloc) {
my $result = $self->traverse_index(
- $obj, $subloc, $ch + 1, $force_return_next,
+ $xxxx, $subloc, $ch + 1, $force_return_next,
);
- if (defined($result)) { return $result; }
+ if (defined $result) { return $result; }
}
} # index loop
- $obj->{return_next} = 1;
+ $xxxx->{return_next} = 1;
}
# This is the bucket list
else {
my $keys = $tag->{content};
- if ($force_return_next) { $obj->{return_next} = 1; }
+ if ($force_return_next) { $xxxx->{return_next} = 1; }
##
# Iterate through buckets, looking for a key match
##
my $transaction_id = $self->_fileobj->transaction_id;
for (my $i = 0; $i < $self->{max_buckets}; $i++) {
- my ($key, $subloc, $size, $trans_id, $is_deleted) = $self->_get_key_subloc( $keys, $i );
-
- next if $is_deleted;
-#XXX Need to find all the copies of this key to find out if $transaction_id has it
-#XXX marked as deleted, in use, or what.
- next if $trans_id && $trans_id != $transaction_id;
+ my ($key, $keyloc) = $self->_get_key_subloc( $keys, $i );
# End of bucket list -- return to outer loop
- if (!$subloc) {
- $obj->{return_next} = 1;
+ if (!$keyloc) {
+ $xxxx->{return_next} = 1;
last;
}
# Located previous key -- return next one found
- elsif ($key eq $obj->{prev_md5}) {
- $obj->{return_next} = 1;
+ elsif ($key eq $xxxx->{prev_md5}) {
+ $xxxx->{return_next} = 1;
next;
}
# Seek to bucket location and skip over signature
- elsif ($obj->{return_next}) {
+ elsif ($xxxx->{return_next}) {
my $fileobj = $self->_fileobj;
+ my $keytag = $self->load_tag( $keyloc );
+ my ($subloc, $is_deleted) = $self->find_keyloc( $keytag );
+ if ( $subloc == 0 ) {
+ ($subloc, $is_deleted) = $self->find_keyloc( $keytag, 0 );
+ }
+ next if $is_deleted;
+
# Skip over value to get to plain key
my $sig = $fileobj->read_at( $subloc, SIG_SIZE );
}
}
- $obj->{return_next} = 1;
+ $xxxx->{return_next} = 1;
}
return;
my $self = shift;
my ($obj) = @_;
- $obj->{prev_md5} = $_[1] ? $_[1] : undef;
- $obj->{return_next} = 0;
-
##
# If the previous key was not specifed, start at the top and
# return the first one found.
##
- if (!$obj->{prev_md5}) {
- $obj->{prev_md5} = chr(0) x $self->{hash_size};
- $obj->{return_next} = 1;
+ my $temp;
+ if ( @_ > 1 ) {
+ $temp = {
+ prev_md5 => $_[1],
+ return_next => 0,
+ };
+ }
+ else {
+ $temp = {
+ prev_md5 => chr(0) x $self->{hash_size},
+ return_next => 1,
+ };
}
- return $self->traverse_index( $obj, $obj->_base_offset, 0 );
+ return $self->traverse_index( $temp, $obj->_base_offset, 0 );
}
# Utilities
my $self = shift;
my ($keys, $idx) = @_;
- my ($key, $subloc, $size, $transaction_id, $is_deleted) = unpack(
+ return unpack(
# This is 'a', not 'A'. Please read the pack() documentation for the
# difference between the two and why it's important.
- "a$self->{hash_size} $self->{long_pack}2 n2",
+ "a$self->{hash_size} $self->{long_pack}",
substr(
$keys,
($idx * $self->{bucket_size}),
$self->{bucket_size},
),
);
-
- return ($key, $subloc, $size, $transaction_id, $is_deleted);
}
sub _find_in_buckets {
my $self = shift;
- my ($tag, $md5, $exact) = @_;
- $exact ||= 0;
-
- my $trans_id = $self->_fileobj->transaction_id;
-
- my @zero;
+ my ($tag, $md5) = @_;
BUCKET:
for ( my $i = 0; $i < $self->{max_buckets}; $i++ ) {
- my ($key, $subloc, $size, $transaction_id, $is_deleted) = $self->_get_key_subloc(
+ my ($key, $subloc) = $self->_get_key_subloc(
$tag->{content}, $i,
);
- my @rv = ($subloc, $i * $self->{bucket_size}, $size, $is_deleted);
+ my @rv = ($subloc, $i * $self->{bucket_size});
unless ( $subloc ) {
- if ( !$exact && @zero && $trans_id ) {
- @rv = ($zero[2], $zero[0] * $self->{bucket_size},$zero[3],$is_deleted);
- }
return @rv;
}
next BUCKET if $key ne $md5;
- # Save off the HEAD in case we need it.
- @zero = ($i,$key,$subloc,$size,$transaction_id,$is_deleted) if $transaction_id == 0;
-
- next BUCKET if $transaction_id != $trans_id;
-
return @rv;
}
$self->lock;
my $buffer = $self->read_at( $self->{transaction_offset}, 4 );
- my ($next, @trans) = unpack( 'C C C C', $buffer );
+ my ($next, @trans) = unpack( 'C C C C C C C C C C C C C C C C', $buffer );
$self->{transaction_id} = ++$next;
$self->print_at(
$self->{transaction_offset},
- pack( 'C C C C', $next, @trans),
+ pack( 'C C C C C C C C C C C C C C C C', $next, @trans),
);
$self->unlock;
$self->lock;
my $buffer = $self->read_at( $self->{transaction_offset}, 4 );
- my ($next, @trans) = unpack( 'C C C C', $buffer );
+ my ($next, @trans) = unpack( 'C C C C C C C C C C C C C C C C', $buffer );
@trans = grep { $_ != $self->{transaction_id} } @trans;
$self->print_at(
$self->{transaction_offset},
- pack( 'C C C C', $next, @trans),
+ pack( 'C C C C C C C C C C C C C C C C', $next, @trans),
);
#XXX Need to free the space used by the current transaction
$self->lock;
my $buffer = $self->read_at( $self->{transaction_offset}, 4 );
- my ($next, @trans) = unpack( 'C C C C', $buffer );
+ my ($next, @trans) = unpack( 'C C C C C C C C C C C C C C C C', $buffer );
$self->unlock;
##
$self->lock( $self->LOCK_SH );
- my $result = $self->{engine}->get_next_key($self);
+ my $result = $self->_engine->get_next_key($self);
$self->unlock();
? $self->_fileobj->{filter_store_key}->($_[0])
: $_[0];
- my $prev_md5 = $self->{engine}{digest}->($prev_key);
+ my $prev_md5 = $self->_engine->{digest}->($prev_key);
##
# Request shared lock for reading
##
$self->lock( $self->LOCK_SH );
- my $result = $self->{engine}->get_next_key( $self, $prev_md5 );
+ my $result = $self->_engine->get_next_key( $self, $prev_md5 );
$self->unlock();
# DBM::Deep Test
##
use strict;
-use Test::More tests => 36;
+use Test::More tests => 38;
use Test::Exception;
use t::common qw( new_fh );
is( $db->get("key1"), "value1", "get() works with hash assignment" );
is( $db->fetch("key1"), "value1", "... fetch() works with hash assignment" );
is( $db->{key1}, "value1", "... and hash-access also works" );
-
$db->put("key2", undef);
is( $db->get("key2"), undef, "get() works with put()" );
is( $db->fetch("key2"), undef, "... fetch() works with put()" );
##
# count keys
##
+
is( scalar keys %$db, 3, "keys() works against tied hash" );
##
##
my $temphash = {};
while ( my ($key, $value) = each %$db ) {
- $temphash->{$key} = $value;
+ $temphash->{$key} = $value;
}
is( $temphash->{key1}, 'value1', "First key copied successfully using tied interface" );
$temphash = {};
my $key = $db->first_key();
while ($key) {
- $temphash->{$key} = $db->get($key);
- $key = $db->next_key($key);
+ $temphash->{$key} = $db->get($key);
+ $key = $db->next_key($key);
}
is( $temphash->{key1}, 'value1', "First key copied successfully using OO interface" );
is( delete $db->{key2}, undef, "delete through tied inteface works" );
is( $db->delete("key1"), 'value1', "delete through OO inteface works" );
is( $db->{key3}, 'value3', "The other key is still there" );
+ok( !exists $db->{key1}, "key1 doesn't exist" );
+ok( !exists $db->{key2}, "key2 doesn't exist" );
is( scalar keys %$db, 1, "After deleting two keys, 1 remains" );
my $next_key = $db->next_key($first_key);
ok(
- (($first_key eq "key1") || ($first_key eq "key2")) &&
- (($next_key eq "key1") || ($next_key eq "key2")) &&
- ($first_key ne $next_key)
+ (($first_key eq "key1") || ($first_key eq "key2")) &&
+ (($next_key eq "key1") || ($next_key eq "key2")) &&
+ ($first_key ne $next_key)
,"keys() still works if you replace long values with shorter ones"
);
use_ok( 'DBM::Deep' );
+diag "This test can take up to a minute to run. Please be patient.";
+
my ($fh, $filename) = new_fh();
my $db = DBM::Deep->new(
file => $filename,
}
is( $count, $max_keys, "We read $count keys" );
-
my @keys = sort keys %$db;
cmp_ok( scalar(@keys), '==', $max_keys + 1, "Number of keys is correct" );
my @control = sort map { "hello $_" } 0 .. $max_keys;
$db->[0] = [ 1 .. 3 ];
$db->[1] = { a => 'foo' };
-is( $db->[1]->fetch('a'), 'foo', "Reuse of same space with hash successful" );
is( $db->[0]->length, 3, "Reuse of same space with array successful" );
-
+is( $db->[1]->fetch('a'), 'foo', "Reuse of same space with hash successful" );
# Test autovivification
$db->[9999]{bar} = 1;
use_ok( 'DBM::Deep' );
+diag "This test can take up to a minute to run. Please be patient.";
+
my ($fh, $filename) = new_fh();
my $db = DBM::Deep->new(
file => $filename,
use strict;
-use Test::More tests => 58;
+use Test::More tests => 62;
use Test::Deep;
use t::common qw( new_fh );
is( $db2->{other_x}, 'foo', "DB2 set other_x within DB1's transaction, so DB2 can see it" );
is( $db1->{other_x}, undef, "Since other_x was added after the transaction began, DB1 doesn't see it." );
-TODO: {
- local $TODO = "keys aren't working yet";
cmp_bag( [ keys %$db1 ], [qw( x )], "DB1 keys correct" );
-}
cmp_bag( [ keys %$db2 ], [qw( x other_x )], "DB2 keys correct" );
$db1->rollback;
ok( !exists $db1->{x}, "DB1 deleted X in a transaction, so it can't see it anymore" );
is( $db2->{x}, 'z', "But, DB2 can still see it" );
-TODO: {
- local $TODO = "keys aren't working yet";
cmp_bag( [ keys %$db1 ], [qw( other_x )], "DB1 keys correct" );
-}
cmp_bag( [ keys %$db2 ], [qw( x )], "DB2 keys correct" );
$db1->rollback;
delete $db1->{x};
ok( !exists $db1->{x}, "DB1 deleted X in a transaction, so it can't see it anymore" );
+#__END__
is( $db2->{x}, 'z', "But, DB2 can still see it" );
-TODO: {
- local $TODO = "keys aren't working yet";
cmp_bag( [ keys %$db1 ], [qw()], "DB1 keys correct" );
-}
cmp_bag( [ keys %$db2 ], [qw( x )], "DB2 keys correct" );
$db1->commit;
cmp_bag( [ keys %$db1 ], [qw( foo )], "DB1 keys correct" );
cmp_bag( [ keys %$db2 ], [qw( foo )], "DB2 keys correct" );
-TODO: {
- todo_skip 'Still need to work on clear()', 4;
-
$db1->begin_work;
%$db1 = (); # clear()
cmp_bag( [ keys %$db1 ], [qw( foo )], "DB1 keys correct" );
cmp_bag( [ keys %$db2 ], [qw( foo )], "DB2 keys correct" );
-}
-
$db1->optimize;
+
is( $db1->{foo}, 'bar', 'After optimize, everything is ok' );
is( $db2->{foo}, 'bar', 'After optimize, everything is ok' );
* Two transactions running at the same time
* Doing a clear on the head while a transaction is running
# More than just two keys
-* Arrays (in particular, how is length handled?)
use strict;
-use Test::More tests => 17;
+use Test::More tests => 47;
use Test::Deep;
use t::common qw( new_fh );
cmp_ok( scalar(@$db1), '==', 2, "DB1 now has 2 elements" );
cmp_ok( scalar(@$db2), '==', 2, "DB2 still has 2 elements" );
+$db1->begin_work;
+
+ is( $db1->[0], 'y', "DB1 transaction started, no actions - DB1's 0 is Y" );
+ is( $db2->[0], 'y', "DB1 transaction started, no actions - DB2's 0 is Y" );
+
+ $db1->[2] = 'z';
+ is( $db1->[2], 'z', "Within DB1 transaction, DB1's 2 is Z" );
+ ok( !exists $db2->[2], "Within DB1 transaction, DB2 cannot see 2" );
+
+ cmp_ok( scalar(@$db1), '==', 3, "DB1 has 3 elements" );
+ cmp_ok( scalar(@$db2), '==', 2, "DB2 has 2 elements" );
+
+$db1->commit;
+
+is( $db1->[0], 'y', "After rollback, DB1's 0 is Y" );
+is( $db2->[0], 'y', "After rollback, DB2's 0 is Y" );
+
+is( $db1->[2], 'z', "After DB1 transaction is over, DB1 can still see 2" );
+is( $db2->[2], 'z', "After DB1 transaction is over, DB2 can now see 2" );
+
+cmp_ok( scalar(@$db1), '==', 3, "DB1 now has 2 elements" );
+cmp_ok( scalar(@$db2), '==', 3, "DB2 still has 2 elements" );
+
+$db1->begin_work;
+
+ push @$db1, 'foo';
+ unshift @$db1, 'bar';
+
+ cmp_ok( scalar(@$db1), '==', 5, "DB1 now has 5 elements" );
+ cmp_ok( scalar(@$db2), '==', 3, "DB2 still has 3 elements" );
+
+ is( $db1->[0], 'bar' );
+ is( $db1->[-1], 'foo' );
+
+$db1->rollback;
+
+cmp_ok( scalar(@$db1), '==', 3, "DB1 is back to 3 elements" );
+cmp_ok( scalar(@$db2), '==', 3, "DB2 still has 3 elements" );
+
+$db1->begin_work;
+
+ push @$db1, 'foo';
+ unshift @$db1, 'bar';
+
+ cmp_ok( scalar(@$db1), '==', 5, "DB1 now has 5 elements" );
+ cmp_ok( scalar(@$db2), '==', 3, "DB2 still has 3 elements" );
+
+$db1->commit;
+
+cmp_ok( scalar(@$db1), '==', 5, "DB1 is still at 5 elements" );
+cmp_ok( scalar(@$db2), '==', 5, "DB2 now has 5 elements" );
+
+is( $db1->[0], 'bar' );
+is( $db1->[-1], 'foo' );
+
+is( $db2->[0], 'bar' );
+is( $db2->[-1], 'foo' );
+
+$db1->begin_work;
+
+ @$db1 = (); # clear()
+
+ cmp_ok( scalar(@$db1), '==', 0, "DB1 now has 0 elements" );
+ cmp_ok( scalar(@$db2), '==', 5, "DB2 still has 5 elements" );
+
+$db1->rollback;
+
+cmp_ok( scalar(@$db1), '==', 5, "DB1 now has 5 elements" );
+cmp_ok( scalar(@$db2), '==', 5, "DB2 still has 5 elements" );
+
--- /dev/null
+use strict;
+use Test::More tests => 51;
+use Test::Deep;
+use t::common qw( new_fh );
+
+use_ok( 'DBM::Deep' );
+
+my ($fh, $filename) = new_fh();
+my $db1 = DBM::Deep->new(
+ file => $filename,
+ locking => 1,
+ autoflush => 1,
+);
+
+my $db2 = DBM::Deep->new(
+ file => $filename,
+ locking => 1,
+ autoflush => 1,
+);
+
+my $db3 = DBM::Deep->new(
+ file => $filename,
+ locking => 1,
+ autoflush => 1,
+);
+
+$db1->{foo} = 'bar';
+is( $db1->{foo}, 'bar', "Before transaction, DB1's foo is bar" );
+is( $db2->{foo}, 'bar', "Before transaction, DB2's foo is bar" );
+is( $db3->{foo}, 'bar', "Before transaction, DB3's foo is bar" );
+
+$db1->begin_work;
+
+is( $db1->{foo}, 'bar', "Before transaction work, DB1's foo is bar" );
+is( $db2->{foo}, 'bar', "Before transaction work, DB2's foo is bar" );
+is( $db3->{foo}, 'bar', "Before transaction work, DB3's foo is bar" );
+
+$db1->{foo} = 'bar2';
+
+is( $db1->{foo}, 'bar2', "After DB1 foo to bar2, DB1's foo is bar2" );
+is( $db2->{foo}, 'bar', "After DB1 foo to bar2, DB2's foo is bar" );
+is( $db3->{foo}, 'bar', "After DB1 foo to bar2, DB3's foo is bar" );
+
+$db1->{bar} = 'foo';
+
+ok( exists $db1->{bar}, "After DB1 set bar to foo, DB1's bar exists" );
+ok( !exists $db2->{bar}, "After DB1 set bar to foo, DB2's bar doesn't exist" );
+ok( !exists $db3->{bar}, "After DB1 set bar to foo, DB3's bar doesn't exist" );
+
+$db2->begin_work;
+
+is( $db1->{foo}, 'bar2', "After DB2 transaction begin, DB1's foo is bar2" );
+is( $db2->{foo}, 'bar', "After DB2 transaction begin, DB2's foo is bar" );
+is( $db3->{foo}, 'bar', "After DB2 transaction begin, DB3's foo is bar" );
+
+ok( exists $db1->{bar}, "After DB2 transaction begin, DB1's bar exists" );
+ok( !exists $db2->{bar}, "After DB2 transaction begin, DB2's bar doesn't exist" );
+ok( !exists $db3->{bar}, "After DB2 transaction begin, DB3's bar doesn't exist" );
+
+$db2->{foo} = 'bar333';
+
+is( $db1->{foo}, 'bar2', "After DB2 foo to bar2, DB1's foo is bar2" );
+is( $db2->{foo}, 'bar333', "After DB2 foo to bar2, DB2's foo is bar333" );
+is( $db3->{foo}, 'bar', "After DB2 foo to bar2, DB3's foo is bar" );
+
+$db2->{bar} = 'mybar';
+
+ok( exists $db1->{bar}, "After DB2 set bar to mybar, DB1's bar exists" );
+ok( exists $db2->{bar}, "After DB2 set bar to mybar, DB2's bar exists" );
+ok( !exists $db3->{bar}, "After DB2 set bar to mybar, DB3's bar doesn't exist" );
+
+is( $db1->{bar}, 'foo', "DB1's bar is still foo" );
+is( $db2->{bar}, 'mybar', "DB2's bar is now mybar" );
+
+$db2->{mykey} = 'myval';
+
+ok( !exists $db1->{mykey}, "After DB2 set mykey to myval, DB1's mykey doesn't exist" );
+ok( exists $db2->{mykey}, "After DB2 set mykey to myval, DB2's mykey exists" );
+ok( !exists $db3->{mykey}, "After DB2 set mykey to myval, DB3's mykey doesn't exist" );
+
+cmp_bag( [ keys %$db1 ], [qw( foo bar )], "DB1 keys correct" );
+cmp_bag( [ keys %$db2 ], [qw( foo bar mykey )], "DB2 keys correct" );
+cmp_bag( [ keys %$db3 ], [qw( foo )], "DB3 keys correct" );
+
+$db1->commit;
+
+is( $db1->{foo}, 'bar2', "After DB1 commit, DB1's foo is bar2" );
+is( $db2->{foo}, 'bar333', "After DB1 commit, DB2's foo is bar333" );
+is( $db3->{foo}, 'bar2', "After DB1 commit, DB3's foo is bar2" );
+
+is( $db1->{bar}, 'foo', "DB1's bar is still foo" );
+is( $db2->{bar}, 'mybar', "DB2's bar is still mybar" );
+is( $db3->{bar}, 'foo', "DB3's bar is now foo" );
+
+cmp_bag( [ keys %$db1 ], [qw( foo bar )], "DB1 keys correct" );
+cmp_bag( [ keys %$db2 ], [qw( foo bar mykey )], "DB2 keys correct" );
+cmp_bag( [ keys %$db3 ], [qw( foo bar )], "DB3 keys correct" );
+
+$db2->commit;
+
+is( $db1->{foo}, 'bar333', "After DB2 commit, DB1's foo is bar333" );
+is( $db2->{foo}, 'bar333', "After DB2 commit, DB2's foo is bar333" );
+is( $db3->{foo}, 'bar333', "After DB2 commit, DB3's foo is bar333" );
+
+is( $db1->{bar}, 'mybar', "DB1's bar is now mybar" );
+is( $db2->{bar}, 'mybar', "DB2's bar is still mybar" );
+is( $db3->{bar}, 'mybar', "DB3's bar is now mybar" );
+
+cmp_bag( [ keys %$db1 ], [qw( foo bar mykey )], "DB1 keys correct" );
+cmp_bag( [ keys %$db2 ], [qw( foo bar mykey )], "DB2 keys correct" );
+cmp_bag( [ keys %$db3 ], [qw( foo bar mykey )], "DB3 keys correct" );