From: rkinyon Date: Fri, 28 Sep 2007 16:59:28 +0000 (+0000) Subject: r8199@h460878c2 (orig r10013): rkinyon | 2007-09-28 12:05:34 -0400 X-Git-Tag: 1-0005~2 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=888453b9638223f4db7bd35d3ac3808fe86c1ab5;p=dbsrgits%2FDBM-Deep.git r8199@h460878c2 (orig r10013): rkinyon | 2007-09-28 12:05:34 -0400 r6222@h460878c2 (orig r10003): rkinyon | 2007-09-26 21:30:53 -0400 Added _dump_file and improved how arrays/hashes clean up after themselves r8192@h460878c2 (orig r10004): rkinyon | 2007-09-26 22:25:04 -0400 Added test that breaks the dumper r8193@h460878c2 (orig r10005): rkinyon | 2007-09-27 15:16:18 -0400 Fixed the bug revealed by making bucketlists properly clean up after themselves r8194@h460878c2 (orig r10006): rkinyon | 2007-09-27 15:19:45 -0400 Fixed hardcoded 0 staleness for HEAD in inc_txn_staleness_counter r8195@h460878c2 (orig r10008): rkinyon | 2007-09-27 23:06:25 -0400 The refcount functions have been refactored a bit r8196@h460878c2 (orig r10011): rkinyon | 2007-09-28 09:35:35 -0400 Added a test for dump_file within the core tests and got all subs to be called at least once in the core tests. r8198@h460878c2 (orig r10012): rkinyon | 2007-09-28 11:29:08 -0400 A raft of minor improvements r8200@h460878c2 (orig r10014): rkinyon | 2007-09-28 12:10:04 -0400 Updated Changes file --- diff --git a/Changes b/Changes index 2c39a3c..94c7011 100644 --- a/Changes +++ b/Changes @@ -1,6 +1,18 @@ Revision history for DBM::Deep. -1.0009_01 Sep 24 14:00:00 2007 EDT +1.0004 Sep 28 12:15:00 2007 EDT + - (This version is compatible with 1.0003) + - Fixed the Changes file (wrong version was displayed for 1.0003) + - Added filter sugar methods to be more API-compatible with other DBMs + - This was added to support a patch provided to IO::All so it can + use DBM::Deep as a DBM provider. + - Implemented _dump_file in order to display the file structure. As a + result, the following bugs were fixed: + - Arrays and hashes now clean up after themselves better. + - Bucketlists now clean up after themselves better. + - Reindexing properly clears the old bucketlist before freeing it. + +1.0003 Sep 24 14:00:00 2007 EDT - THIS VERSION IS INCOMPATIBLE WITH FILES FROM ALL OTHER PRIOR VERSIONS. - Further fixes for unshift/shift/splice and references (RT# 29583) - To fix that, I had to put support for real references in. diff --git a/MANIFEST b/MANIFEST index 6b774db..8862a0f 100644 --- a/MANIFEST +++ b/MANIFEST @@ -59,6 +59,7 @@ t/42_transaction_indexsector.t t/43_transaction_maximum.t t/44_upgrade_db.t t/45_references.t +t/97_dump_file.t t/98_pod.t t/99_pod_coverage.t t/common.pm diff --git a/lib/DBM/Deep.pm b/lib/DBM/Deep.pm index b307009..a72833d 100644 --- a/lib/DBM/Deep.pm +++ b/lib/DBM/Deep.pm @@ -5,7 +5,7 @@ use 5.006_000; use strict; use warnings; -our $VERSION = q(1.0003); +our $VERSION = q(1.0004); use Fcntl qw( :flock ); @@ -245,12 +245,13 @@ sub optimize { type => $self->_type, # Bring over all the parameters that we need to bring over - num_txns => $self->_engine->num_txns, - byte_size => $self->_engine->byte_size, - max_buckets => $self->_engine->max_buckets, + ( map { $_ => $self->_engine->$_ } qw( + byte_size max_buckets data_sector_size num_txns + )), ); $self->lock(); + #DBM::Deep::Engine::Sector::Reference->_clear_cache; $self->_copy_node( $db_temp ); undef $db_temp; @@ -319,9 +320,6 @@ sub clone { ); sub set_filter { - ## - # Setup filter function for storing or fetching the key or value - ## my $self = shift->_get_self; my $type = lc shift; my $func = shift; @@ -333,6 +331,11 @@ sub clone { return; } + + sub filter_store_key { $_[0]->set_filter( store_key => $_[1] ); } + sub filter_store_value { $_[0]->set_filter( store_value => $_[1] ); } + sub filter_fetch_key { $_[0]->set_filter( fetch_key => $_[1] ); } + sub filter_fetch_value { $_[0]->set_filter( fetch_value => $_[1] ); } } sub begin_work { @@ -389,14 +392,12 @@ sub _fh { ## sub _throw_error { - die "DBM::Deep: $_[1]\n"; my $n = 0; while( 1 ) { my @caller = caller( ++$n ); next if $caller[0] =~ m/^DBM::Deep/; die "DBM::Deep: $_[1] at $0 line $caller[2]\n"; - last; } } @@ -552,5 +553,7 @@ sub delete { (shift)->DELETE( @_ ) } sub exists { (shift)->EXISTS( @_ ) } sub clear { (shift)->CLEAR( @_ ) } +sub _dump_file {shift->_get_self->_engine->_dump_file;} + 1; __END__ diff --git a/lib/DBM/Deep.pod b/lib/DBM/Deep.pod index eccea9e..8b4b689 100644 --- a/lib/DBM/Deep.pod +++ b/lib/DBM/Deep.pod @@ -1023,16 +1023,16 @@ reference to be imported in order to explicitly leave it untied. B is used to test the code coverage of the tests. Below is the B report on this distribution's test suite. - ----------------------------------- ------ ------ ------ ------ ------ ------ - File stmt bran cond sub time total - ----------------------------------- ------ ------ ------ ------ ------ ------ - blib/lib/DBM/Deep.pm 94.4 85.0 90.5 100.0 5.0 93.4 - blib/lib/DBM/Deep/Array.pm 100.0 94.6 100.0 100.0 4.7 98.8 - blib/lib/DBM/Deep/Engine.pm 97.2 85.8 82.4 100.0 51.3 93.8 - blib/lib/DBM/Deep/File.pm 97.2 81.6 66.7 100.0 36.5 91.9 - blib/lib/DBM/Deep/Hash.pm 100.0 100.0 100.0 100.0 2.5 100.0 - Total 97.2 87.4 83.9 100.0 100.0 94.6 - ----------------------------------- ------ ------ ------ ------ ------ ------ + ------------------------------------------ ------ ------ ------ ------ ------ + File stmt bran cond sub total + ------------------------------------------ ------ ------ ------ ------ ------ + blib/lib/DBM/Deep.pm 94.5 85.0 90.5 100.0 93.6 + blib/lib/DBM/Deep/Array.pm 100.0 94.3 100.0 100.0 98.7 + blib/lib/DBM/Deep/Engine.pm 95.9 84.9 81.7 100.0 92.8 + blib/lib/DBM/Deep/File.pm 97.2 81.6 66.7 100.0 91.9 + blib/lib/DBM/Deep/Hash.pm 100.0 100.0 100.0 100.0 100.0 + Total 96.5 86.5 83.5 100.0 94.0 + ------------------------------------------ ------ ------ ------ ------ ------ =head1 MORE INFORMATION diff --git a/lib/DBM/Deep/Array.pm b/lib/DBM/Deep/Array.pm index 500473b..f9b9af2 100644 --- a/lib/DBM/Deep/Array.pm +++ b/lib/DBM/Deep/Array.pm @@ -5,7 +5,7 @@ use 5.006_000; use strict; use warnings; -our $VERSION = q(1.0003); +our $VERSION = q(1.0004); # This is to allow DBM::Deep::Array to handle negative indices on # its own. Otherwise, Perl would intercept the call to negative @@ -47,6 +47,7 @@ sub FETCH { $self->lock( $self->LOCK_SH ); if ( !defined $key ) { + $self->unlock; DBM::Deep->_throw_error( "Cannot use an undefined array index." ); } elsif ( $key =~ /^-?\d+$/ ) { @@ -79,6 +80,7 @@ sub STORE { my $size; my $idx_is_numeric; if ( !defined $key ) { + $self->unlock; DBM::Deep->_throw_error( "Cannot use an undefined array index." ); } elsif ( $key =~ /^-?\d+$/ ) { @@ -117,6 +119,7 @@ sub EXISTS { $self->lock( $self->LOCK_SH ); if ( !defined $key ) { + $self->unlock; DBM::Deep->_throw_error( "Cannot use an undefined array index." ); } elsif ( $key =~ /^-?\d+$/ ) { @@ -148,6 +151,7 @@ sub DELETE { my $size = $self->FETCHSIZE; if ( !defined $key ) { + $self->unlock; DBM::Deep->_throw_error( "Cannot use an undefined array index." ); } elsif ( $key =~ /^-?\d+$/ ) { diff --git a/lib/DBM/Deep/Engine.pm b/lib/DBM/Deep/Engine.pm index 4441278..99198fe 100644 --- a/lib/DBM/Deep/Engine.pm +++ b/lib/DBM/Deep/Engine.pm @@ -5,7 +5,7 @@ use 5.006_000; use strict; use warnings; -our $VERSION = q(1.0003); +our $VERSION = q(1.0004); use Scalar::Util (); @@ -564,8 +564,8 @@ sub get_txn_staleness_counter { return unpack( $StP{$STALE_SIZE}, $self->storage->read_at( - $self->trans_loc + 4 + $STALE_SIZE * ($trans_id - 1), - 4, + $self->trans_loc + $self->txn_bitfield_len + $STALE_SIZE * ($trans_id - 1), + $STALE_SIZE, ) ); } @@ -575,10 +575,10 @@ sub inc_txn_staleness_counter { my ($trans_id) = @_; # Hardcode staleness of 0 for the HEAD - return unless $trans_id; + return 0 unless $trans_id; $self->storage->print_at( - $self->trans_loc + 4 + $STALE_SIZE * ($trans_id - 1), + $self->trans_loc + $self->txn_bitfield_len + $STALE_SIZE * ($trans_id - 1), pack( $StP{$STALE_SIZE}, $self->get_txn_staleness_counter( $trans_id ) + 1 ), ); } @@ -875,6 +875,99 @@ sub set_trans_loc { $_[0]{trans_loc} = $_[1] } sub chains_loc { $_[0]{chains_loc} } sub set_chains_loc { $_[0]{chains_loc} = $_[1] } +sub _dump_file { + my $self = shift; + + # Read the header + my $spot = $self->_read_file_header(); + + my %types = ( + 0 => 'B', + 1 => 'D', + 2 => 'I', + ); + + my %sizes = ( + 'D' => $self->data_sector_size, + 'B' => DBM::Deep::Engine::Sector::BucketList->new({engine=>$self,offset=>1})->size, + 'I' => DBM::Deep::Engine::Sector::Index->new({engine=>$self,offset=>1})->size, + ); + + my $return = ""; + # Read the free sector chains + my %sectors; + foreach my $multiple ( 0 .. 2 ) { + $return .= "Chains($types{$multiple}):"; + my $old_loc = $self->chains_loc + $multiple * $self->byte_size; + while ( 1 ) { + my $loc = unpack( + $StP{$self->byte_size}, + $self->storage->read_at( $old_loc, $self->byte_size ), + ); + + # We're now out of free sectors of this kind. + unless ( $loc ) { + last; + } + + $sectors{ $types{$multiple} }{ $loc } = undef; + $old_loc = $loc + SIG_SIZE + $STALE_SIZE; + $return .= " $loc"; + } + $return .= $/; + } + + SECTOR: + while ( $spot < $self->storage->{end} ) { + # Read each sector in order. + my $sector = $self->_load_sector( $spot ); + if ( !$sector ) { + # Find it in the free-sectors that were found already + foreach my $type ( keys %sectors ) { + if ( exists $sectors{$type}{$spot} ) { + my $size = $sizes{$type}; + $return .= sprintf "%08d: %s %04d\n", $spot, 'F' . $type, $size; + $spot += $size; + next SECTOR; + } + } + + die "********\n$return\nDidn't find free sector for $spot in chains\n********\n"; + } + else { + $return .= sprintf "%08d: %s %04d", $spot, $sector->type, $sector->size; + if ( $sector->type eq 'D' ) { + $return .= ' ' . $sector->data; + } + elsif ( $sector->type eq 'A' || $sector->type eq 'H' ) { + $return .= ' REF: ' . $sector->get_refcount; + } + elsif ( $sector->type eq 'B' ) { + foreach my $bucket ( $sector->chopped_up ) { + $return .= "\n "; + $return .= sprintf "%08d", unpack($StP{$self->byte_size}, + substr( $bucket->[-1], $self->hash_size, $self->byte_size), + ); + foreach my $txn ( 0 .. $self->num_txns - 1 ) { + my $l = unpack( $StP{$self->byte_size}, + substr( $bucket->[-1], + $self->hash_size + $self->byte_size + $txn * ($self->byte_size + $STALE_SIZE), + $self->byte_size, + ), + ); + $return .= sprintf " %08d", $l; + } + } + } + $return .= $/; + + $spot += $sector->size; + } + } + + return $return; +} + ################################################################################ package DBM::Deep::Iterator; @@ -1279,23 +1372,6 @@ sub _init { return; } -sub free { - my $self = shift; - - # We're not ready to be removed yet. - if ( $self->decrement_refcount > 0 ) { - return; - } - - my $blist_loc = $self->get_blist_loc; - $self->engine->_load_sector( $blist_loc )->free if $blist_loc; - - my $class_loc = $self->get_class_offset; - $self->engine->_load_sector( $class_loc )->free if $class_loc; - - $self->SUPER::free(); -} - sub staleness { $_[0]{staleness} } sub get_data_for { @@ -1405,6 +1481,8 @@ sub delete_key { my @trans_ids = $self->engine->get_running_txn_ids; + # If we're the HEAD and there are running txns, then we need to clone this value to the other + # transactions to preserve Isolation. if ( $self->engine->trans_id == 0 ) { if ( @trans_ids ) { foreach my $other_trans_id ( @trans_ids ) { @@ -1563,6 +1641,7 @@ sub get_bucket_list { ); } + $sector->clear; $sector->free; $sector = $blist_cache{ ord( substr( $args->{key_md5}, $i, 1 ) ) }; @@ -1595,44 +1674,64 @@ sub get_classname { } #XXX Add singleton handling here -sub data { - my $self = shift; +{ + my %cache; + # XXX This is insufficient +# sub _clear_cache { %cache = (); } + sub data { + my $self = shift; - my $new_obj = DBM::Deep->new({ - type => $self->type, - base_offset => $self->offset, - staleness => $self->staleness, - storage => $self->engine->storage, - engine => $self->engine, - }); +# unless ( $cache{ $self->offset } ) { + my $new_obj = DBM::Deep->new({ + type => $self->type, + base_offset => $self->offset, + staleness => $self->staleness, + storage => $self->engine->storage, + engine => $self->engine, + }); - if ( $self->engine->storage->{autobless} ) { - my $classname = $self->get_classname; - if ( defined $classname ) { - bless $new_obj, $classname; - } + if ( $self->engine->storage->{autobless} ) { + my $classname = $self->get_classname; + if ( defined $classname ) { + bless $new_obj, $classname; + } + } + + $cache{$self->offset} = $new_obj; +# } + return $cache{$self->offset}; } - return $new_obj; + sub free { + my $self = shift; + + # We're not ready to be removed yet. + if ( $self->decrement_refcount > 0 ) { + return; + } + + # Rebless the object into DBM::Deep::Null. +# %{$cache{ $self->offset }} = (); +# bless $cache{$self->offset}, 'DBM::Deep::Null'; + + my $blist_loc = $self->get_blist_loc; + $self->engine->_load_sector( $blist_loc )->free if $blist_loc; + + my $class_loc = $self->get_class_offset; + $self->engine->_load_sector( $class_loc )->free if $class_loc; + + $self->SUPER::free(); + } } sub increment_refcount { my $self = shift; - my $e = $self->engine; - my $refcount = unpack( - $StP{$e->byte_size}, - $e->storage->read_at( - $self->offset + $self->base_size + 2 * $e->byte_size, $e->byte_size, - ), - ); + my $refcount = $self->get_refcount; $refcount++; - $e->storage->print_at( - $self->offset + $self->base_size + 2 * $e->byte_size, - pack( $StP{$e->byte_size}, $refcount ), - ); + $self->write_refcount( $refcount ); return $refcount; } @@ -1640,20 +1739,11 @@ sub increment_refcount { sub decrement_refcount { my $self = shift; - my $e = $self->engine; - my $refcount = unpack( - $StP{$e->byte_size}, - $e->storage->read_at( - $self->offset + $self->base_size + 2 * $e->byte_size, $e->byte_size, - ), - ); + my $refcount = $self->get_refcount; $refcount--; - $e->storage->print_at( - $self->offset + $self->base_size + 2 * $e->byte_size, - pack( $StP{$e->byte_size}, $refcount ), - ); + $self->write_refcount( $refcount ); return $refcount; } @@ -1670,6 +1760,17 @@ sub get_refcount { ); } +sub write_refcount { + my $self = shift; + my ($num) = @_; + + my $e = $self->engine; + $e->storage->print_at( + $self->offset + $self->base_size + 2 * $e->byte_size, + pack( $StP{$e->byte_size}, $num ), + ); +} + package DBM::Deep::Engine::Sector::BucketList; our @ISA = qw( DBM::Deep::Engine::Sector ); @@ -1697,6 +1798,13 @@ sub _init { return $self; } +sub clear { + my $self = shift; + $self->engine->storage->print_at( $self->offset + $self->base_size, + chr(0) x ($self->size - $self->base_size), # Zero-fill the data + ); +} + sub size { my $self = shift; unless ( $self->{size} ) { @@ -1709,6 +1817,31 @@ sub size { sub free_meth { return '_add_free_blist_sector' } +sub free { + my $self = shift; + + my $e = $self->engine; + foreach my $bucket ( $self->chopped_up ) { + my $rest = $bucket->[-1]; + + # Delete the keysector + my $l = unpack( $StP{$e->byte_size}, substr( $rest, $e->hash_size, $e->byte_size ) ); + my $s = $e->_load_sector( $l ); $s->free if $s; + + foreach my $txn ( 0 .. $e->num_txns - 1 ) { + my $l = unpack( $StP{$e->byte_size}, + substr( $rest, + $e->hash_size + $e->byte_size + $txn * ($e->byte_size + $STALE_SIZE), + $e->byte_size, + ), + ); + my $s = $e->_load_sector( $l ); $s->free if $s; + } + } + + $self->SUPER::free(); +} + sub bucket_size { my $self = shift; unless ( $self->{bucket_size} ) { @@ -2070,5 +2203,17 @@ sub set_entry { ); } +# This was copied from MARCEL's Class::Null. However, I couldn't use it because +# I need an undef value, not an implementation of the Null Class pattern. +package DBM::Deep::Null; + +use overload + 'bool' => sub { undef}, + '""' => sub { undef }, + '0+' => sub { undef}, + fallback => 1; + +sub AUTOLOAD { return; } + 1; __END__ diff --git a/lib/DBM/Deep/File.pm b/lib/DBM/Deep/File.pm index c62d81f..ccedf04 100644 --- a/lib/DBM/Deep/File.pm +++ b/lib/DBM/Deep/File.pm @@ -5,7 +5,7 @@ use 5.006_000; use strict; use warnings; -our $VERSION = q(1.0003); +our $VERSION = q(1.0004); use Fcntl qw( :DEFAULT :flock :seek ); diff --git a/lib/DBM/Deep/Hash.pm b/lib/DBM/Deep/Hash.pm index 22a7acc..b703705 100644 --- a/lib/DBM/Deep/Hash.pm +++ b/lib/DBM/Deep/Hash.pm @@ -5,7 +5,7 @@ use 5.006_000; use strict; use warnings; -our $VERSION = q(1.0003); +our $VERSION = q(1.0004); use base 'DBM::Deep'; diff --git a/t/04_array.t b/t/04_array.t index 01eb346..e4616ee 100644 --- a/t/04_array.t +++ b/t/04_array.t @@ -2,7 +2,7 @@ # DBM::Deep Test ## use strict; -use Test::More tests => 125; +use Test::More tests => 128; use Test::Exception; use t::common qw( new_fh ); @@ -197,6 +197,11 @@ is($db->[0], "elem first"); is($db->[1], "elem last"); is($returned[0], "middle ABC"); +@returned = $db->splice; +is( $db->length, 0 ); +is( $returned[0], "elem first" ); +is( $returned[1], "elem last" ); + $db->[0] = [ 1 .. 3 ]; $db->[1] = { a => 'foo' }; is( $db->[0]->length, 3, "Reuse of same space with array successful" ); diff --git a/t/14_filter.t b/t/14_filter.t index 240e96d..fbff9b1 100644 --- a/t/14_filter.t +++ b/t/14_filter.t @@ -53,10 +53,10 @@ ok( exists $db->{key2}, "Key2 exists" ); ## # Now clear all filters, and make sure all is unfiltered ## -ok( $db->set_filter( 'store_key', undef ), "Unset store_key filter" ); -ok( $db->set_filter( 'store_value', undef ), "Unset store_value filter" ); -ok( $db->set_filter( 'fetch_key', undef ), "Unset fetch_key filter" ); -ok( $db->set_filter( 'fetch_value', undef ), "Unset fetch_value filter" ); +ok( $db->filter_store_key( undef ), "Unset store_key filter" ); +ok( $db->filter_store_value( undef ), "Unset store_value filter" ); +ok( $db->filter_fetch_key( undef ), "Unset fetch_key filter" ); +ok( $db->filter_fetch_value( undef ), "Unset fetch_value filter" ); is( $db->{MYFILTERkey2}, "MYFILTERvalue2", "We get the right unfiltered value" ); diff --git a/t/27_filehandle.t b/t/27_filehandle.t index 810154d..11f9eca 100644 --- a/t/27_filehandle.t +++ b/t/27_filehandle.t @@ -20,16 +20,14 @@ use_ok( 'DBM::Deep' ); { open(my $fh, '<', $filename) || die("Can't open '$filename' for reading: $!\n"); - my $db; - # test if we can open and read a db using its filehandle - ok(($db = DBM::Deep->new(fh => $fh)), "open db in filehandle"); - ok($db->{hash}->{foo}->[1] eq 'b', "and get at stuff in the database"); + my $db; + ok( ($db = DBM::Deep->new( fh => $fh )), "open db in filehandle" ); + ok( $db->{hash}{foo}[1] eq 'b', "and get at stuff in the database" ); throws_ok { $db->{foo} = 1; - } qr/Cannot write to a readonly filehandle/, - "Can't write to a read-only filehandle"; + } qr/Cannot write to a readonly filehandle/, "Can't write to a read-only filehandle"; ok( !$db->exists( 'foo' ), "foo doesn't exist" ); my $db_obj = $db->_get_self; diff --git a/t/31_references.t b/t/31_references.t index ebeb811..0184795 100644 --- a/t/31_references.t +++ b/t/31_references.t @@ -1,7 +1,5 @@ -## -# DBM::Deep Test -## use strict; + use Test::More tests => 16; use Test::Exception; use t::common qw( new_fh ); @@ -55,9 +53,6 @@ is( $db->{array}[2]{b}, 'floober' ); my %hash2 = ( abc => [ 1 .. 3 ] ); $array[3] = \%hash2; -SKIP: { - skip "Internal references are not supported right now", 1; - $hash2{ def } = \%hash; - is( $array[3]{def}{foo}, 2 ); -} +$hash2{ def } = \%hash; +is( $array[3]{def}{foo}, 2 ); diff --git a/t/39_singletons.t b/t/39_singletons.t index f9ff2e1..8a3573e 100644 --- a/t/39_singletons.t +++ b/t/39_singletons.t @@ -1,5 +1,5 @@ use strict; -use Test::More tests => 2; +use Test::More tests => 5; use Test::Deep; use t::common qw( new_fh ); @@ -19,6 +19,11 @@ my $y = $db->{foo}; print "$x -> $y\n"; TODO: { - local $TODO = "Singletons aren't working yet"; -is( $x, $y, "The references are the same" ); + local $TODO = "Singletons are unimplmeneted yet"; + is( $x, $y, "The references are the same" ); + + delete $db->{foo}; + is( $x, undef ); + is( $y, undef ); } +is( $db->{foo}, undef ); diff --git a/t/44_upgrade_db.t b/t/44_upgrade_db.t index 04be1c9..c39d153 100644 --- a/t/44_upgrade_db.t +++ b/t/44_upgrade_db.t @@ -13,7 +13,7 @@ BEGIN { } } -plan tests => 192; +plan tests => 202; use t::common qw( new_fh ); use File::Spec; @@ -63,7 +63,7 @@ my @output_versions = ( '0.981', '0.982', '0.983', '0.99_01', '0.99_02', '0.99_03', '0.99_04', '1.00', '1.000', '1.0000', '1.0001', '1.0002', - '1.0003', + '1.0003', '1.0004', ); foreach my $input_filename ( @@ -117,7 +117,7 @@ foreach my $input_filename ( eval "use DBM::Deep::10002"; $db = DBM::Deep::10002->new( $output_filename ); } - elsif ( $v =~ /^1\.000[3]/ ) { + elsif ( $v =~ /^1\.000[34]/ ) { push @INC, 'lib'; eval "use DBM::Deep"; $db = DBM::Deep->new( $output_filename ); diff --git a/t/45_references.t b/t/45_references.t index 1cd157f..d39ba0a 100644 --- a/t/45_references.t +++ b/t/45_references.t @@ -2,7 +2,7 @@ # DBM::Deep Test ## use strict; -use Test::More tests => 10; +use Test::More tests => 15; use Test::Exception; use t::common qw( new_fh ); @@ -10,7 +10,17 @@ use_ok( 'DBM::Deep' ); my ($fh, $filename) = new_fh(); my $db = DBM::Deep->new( - file => $filename, + file => $filename, + locking => 1, + autoflush => 1, + num_txns => 16, +); + +my $db2 = DBM::Deep->new( + file => $filename, + locking => 1, + autoflush => 1, + num_txns => 16, ); $db->{foo} = 5; @@ -37,3 +47,37 @@ is( $db->{bar}[3], 42, "Bar[3] is also 42" ); delete $db->{foo}; is( $db->{bar}[3], 42, "After delete Foo, Bar[3] is still 42" ); + +$db->{foo} = $db->{bar}; +$db2->begin_work; + + delete $db2->{bar}; + delete $db2->{foo}; + + is( $db2->{bar}, undef, "It's deleted in the transaction" ); + is( $db->{bar}[3], 42, "... but not in the main" ); + +$db2->rollback; + +# Why hasn't this failed!? Is it because stuff isn't getting deleted as expected? +# I need a test that walks the sectors +is( $db->{bar}[3], 42, "After delete Foo, Bar[3] is still 42" ); +is( $db2->{bar}[3], 42, "After delete Foo, Bar[3] is still 42" ); + +delete $db->{foo}; + +is( $db->{bar}[3], 42, "After delete Foo, Bar[3] is still 42" ); + +__END__ +warn "-2\n"; +$db2->begin_work; + +warn "-1\n"; + delete $db2->{bar}; + +warn "0\n"; +$db2->commit; + +warn "1\n"; +ok( !exists $db->{bar}, "After commit, bar is gone" ); +warn "2\n"; diff --git a/t/97_dump_file.t b/t/97_dump_file.t new file mode 100644 index 0000000..931cb07 --- /dev/null +++ b/t/97_dump_file.t @@ -0,0 +1,32 @@ +use strict; +use Test::More tests => 3; +use Test::Deep; +use t::common qw( new_fh ); + +use_ok( 'DBM::Deep' ); + +my ($fh, $filename) = new_fh(); +my $db = DBM::Deep->new( + file => $filename, +); + +is( $db->_dump_file, <<"__END_DUMP__", "Dump of initial file correct" ); +Chains(B): +Chains(D): +Chains(I): +00000030: H 0064 REF: 1 +__END_DUMP__ + +$db->{foo} = 'bar'; + +is( $db->_dump_file, <<"__END_DUMP__", "Dump of initial file correct" ); +Chains(B): +Chains(D): +Chains(I): +00000030: H 0064 REF: 1 +00000094: D 0064 bar +00000158: B 0387 + 00000545 00000094 +00000545: D 0064 foo +__END_DUMP__ + diff --git a/utils/upgrade_db.pl b/utils/upgrade_db.pl index 9b64ced..960abce 100755 --- a/utils/upgrade_db.pl +++ b/utils/upgrade_db.pl @@ -28,7 +28,7 @@ my %is_dev = ( my %opts = ( man => 0, help => 0, - version => '1.0003', + version => '1.0004', autobless => 1, ); GetOptions( \%opts, @@ -77,7 +77,7 @@ my %db; elsif ( $ver =~ /^1\.000?[0-2]?/) { $ver = 2; } - elsif ( $ver =~ /^1\.000[3]/) { + elsif ( $ver =~ /^1\.000[34]/) { $ver = 3; } else {