module_name => 'DBM::Deep',
license => 'perl',
requires => {
+ perl => '5.6.0',
'Digest::MD5' => '1.00',
'Scalar::Util' => '1.14',
},
Revision history for DBM::Deep.
0.99_01 ??? ?? ??;??:?? 2006 Pacific
+ - Provided explicit dependency on Perl 5.6.0
+ - Digest::MD5 requires 5.6.0
+ - Sub::Uplevel (dep of Test::Exception) requires 5.6.0
- Removed error()/clear_error()
+ - Broke out DBM::Deep's code into DBM::Deep::Engine
0.98 Feb 28 11:00:00 2006 Pacific
- Added in patch by David Cantrell to allow use of DATA filehandle
##
# Setup constants for users to pass to new()
##
-sub TYPE_HASH () { DBM::Deep::Engine::SIG_HASH }
-sub TYPE_ARRAY () { DBM::Deep::Engine::SIG_ARRAY }
-sub TYPE_SCALAR () { DBM::Deep::Engine::SIG_SCALAR }
+sub TYPE_HASH () { DBM::Deep::Engine->SIG_HASH }
+sub TYPE_ARRAY () { DBM::Deep::Engine->SIG_ARRAY }
sub _get_args {
my $proto = shift;
my $self = bless {
type => TYPE_HASH,
engine => DBM::Deep::Engine->new,
+ base_offset => undef,
}, $class;
- $self->{base_offset} = length( $self->{engine}->SIG_FILE );
foreach my $param ( keys %$self ) {
next unless exists $args->{$param};
file => $self->_root->{file} . '.tmp',
type => $self->_type
);
- if (!$db_temp) {
- $self->_throw_error("Cannot optimize: failed to open temp file: $!");
- }
$self->lock();
$self->_copy_node( $db_temp );
# Fetch single value or element given plain key or array index
##
my $self = shift->_get_self;
- my $key = shift;
+ my ($key) = @_;
my $md5 = $self->{engine}{digest}->($key);
$self->open( $obj ) if !defined $obj->_fh;
+ $obj->{base_offset} = length( SIG_FILE )
+ unless defined $obj->{base_offset};
+
#XXX We have to make sure we don't mess up when autoflush isn't turned on
unless ( $obj->_root->{inode} ) {
my @stats = stat($obj->_fh);
return 1;
}
+ $obj->{base_offset} = $bytes_read
+ unless defined $obj->{base_offset};
+
##
# Check signature was valid
##
my $self = shift;
my ($obj, $offset) = @_;
+# print join(':',map{$_||''}caller(1)), $/;
+
my $fh = $obj->_fh;
seek($fh, $offset + $obj->_root->{file_offset}, SEEK_SET);
for (my $idx = $start; $idx < (2**8); $idx++) {
my $subloc = unpack(
$self->{long_pack},
- substr($content, $idx * $self->{long_size}, $self->{long_size}),
+ substr(
+ $content,
+ $idx * $self->{long_size},
+ $self->{long_size},
+ ),
);
if ($subloc) {
is( $db->{key1}, 'value1', "key1's value is still there after optimize" );
is( $db->{a}{c}, 'value2', "key2's value is still there after optimize" );
+#print keys %{$db->{a}}, $/;
+
##
# now for the tricky one -- try to store a new key while file is being
# optimized and locked by another process. filehandle should be invalidated,
# see if it was stored successfully
is( $db->{parentfork}, "hello", "stored key while optimize took place" );
+
+# undef $db;
+# $db = DBM::Deep->new(
+# file => $filename,
+# autoflush => 1,
+# locking => 1
+# );
# now check some existing values from before
is( $db->{key1}, 'value1', "key1's value is still there after optimize" );
ok($db->{hash}->{foo}->[1] eq 'b', "and get at stuff in the database");
throws_ok {
$db->{foo} = 1;
- } qr/Cannot write to a readonly filehandle/, "Can't write to a read-only filehandle";
+ } qr/Cannot write to a readonly filehandle/,
+ "Can't write to a read-only filehandle";
ok( !$db->exists( 'foo' ), "foo doesn't exist" );
my $db_obj = $db->_get_self;