From: rkinyon Date: Wed, 7 Jun 2006 18:35:29 +0000 (+0000) Subject: r14010@rob-kinyons-powerbook58: rob | 2006-06-07 14:35:06 -0400 X-Git-Tag: 0-99_03~27 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=83371fe3400b9ebc3197b4aff468a2061d33db89;p=dbsrgits%2FDBM-Deep.git r14010@rob-kinyons-powerbook58: rob | 2006-06-07 14:35:06 -0400 Converted to use _storage instead of _fileobj and laid out the new code for using key-to-me pointers --- diff --git a/Build.PL b/Build.PL index 420b96d..ce77101 100644 --- a/Build.PL +++ b/Build.PL @@ -24,7 +24,7 @@ my $build = Module::Build->new( }, create_makefile_pl => 'traditional', add_to_cleanup => [ - 'META.yml', '*.bak', '*.gz', 'Makefile.PL', 't/test*.db', 'cover_db' + 'META.yml', '*.bak', '*.gz', 'Makefile.PL', 't/test*.db', 'cover_db', ], test_files => 't/??_*.t', ); diff --git a/lib/DBM/Deep.pm b/lib/DBM/Deep.pm index 3f8d128..aa23179 100644 --- a/lib/DBM/Deep.pm +++ b/lib/DBM/Deep.pm @@ -108,8 +108,8 @@ sub _init { my $class = shift; my ($args) = @_; - $args->{fileobj} = DBM::Deep::File->new( $args ) - unless exists $args->{fileobj}; + $args->{storage} = DBM::Deep::File->new( $args ) + unless exists $args->{storage}; # locking implicitly enables autoflush if ($args->{locking}) { $args->{autoflush} = 1; } @@ -122,7 +122,7 @@ sub _init { parent => undef, parent_key => undef, - fileobj => undef, + storage => undef, }, $class; $self->{engine} = DBM::Deep::Engine->new( { %{$args}, obj => $self } ); @@ -134,7 +134,7 @@ sub _init { $self->_engine->setup_fh( $self ); - $self->_fileobj->set_db( $self ); + $self->_storage->set_db( $self ); return $self; } @@ -153,12 +153,12 @@ sub TIEARRAY { sub lock { my $self = shift->_get_self; - return $self->_fileobj->lock( $self, @_ ); + return $self->_storage->lock( $self, @_ ); } sub unlock { my $self = shift->_get_self; - return $self->_fileobj->unlock( $self, @_ ); + return $self->_storage->unlock( $self, @_ ); } sub _copy_value { @@ -259,14 +259,14 @@ sub optimize { my $self = shift->_get_self; #XXX Need to create a new test for this -# if ($self->_fileobj->{links} > 1) { +# if ($self->_storage->{links} > 1) { # $self->_throw_error("Cannot optimize: reference count is greater than 1"); # } #XXX Do we have to lock the tempfile? my $db_temp = DBM::Deep->new( - file => $self->_fileobj->{file} . '.tmp', + file => $self->_storage->{file} . '.tmp', type => $self->_type ); @@ -281,8 +281,8 @@ sub optimize { my $perms = $stats[2] & 07777; my $uid = $stats[4]; my $gid = $stats[5]; - chown( $uid, $gid, $self->_fileobj->{file} . '.tmp' ); - chmod( $perms, $self->_fileobj->{file} . '.tmp' ); + chown( $uid, $gid, $self->_storage->{file} . '.tmp' ); + chmod( $perms, $self->_storage->{file} . '.tmp' ); # q.v. perlport for more information on this variable if ( $^O eq 'MSWin32' || $^O eq 'cygwin' ) { @@ -293,18 +293,18 @@ sub optimize { # with a soft copy. ## $self->unlock(); - $self->_fileobj->close; + $self->_storage->close; } - if (!rename $self->_fileobj->{file} . '.tmp', $self->_fileobj->{file}) { - unlink $self->_fileobj->{file} . '.tmp'; + if (!rename $self->_storage->{file} . '.tmp', $self->_storage->{file}) { + unlink $self->_storage->{file} . '.tmp'; $self->unlock(); $self->_throw_error("Optimize failed: Cannot copy temp file over original: $!"); } $self->unlock(); - $self->_fileobj->close; - $self->_fileobj->open; + $self->_storage->close; + $self->_storage->open; $self->_engine->setup_fh( $self ); return 1; @@ -319,7 +319,7 @@ sub clone { return DBM::Deep->new( type => $self->_type, base_offset => $self->_base_offset, - fileobj => $self->_fileobj, + storage => $self->_storage, parent => $self->{parent}, parent_key => $self->{parent_key}, ); @@ -342,7 +342,7 @@ sub clone { my $func = shift; if ( $is_legal_filter{$type} ) { - $self->_fileobj->{"filter_$type"} = $func; + $self->_storage->{"filter_$type"} = $func; return 1; } @@ -352,17 +352,17 @@ sub clone { sub begin_work { my $self = shift->_get_self; - return $self->_fileobj->begin_transaction; + return $self->_storage->begin_transaction; } sub rollback { my $self = shift->_get_self; - return $self->_fileobj->end_transaction; + return $self->_storage->end_transaction; } sub commit { my $self = shift->_get_self; - return $self->_fileobj->commit_transaction; + return $self->_storage->commit_transaction; } ## @@ -374,9 +374,9 @@ sub _engine { return $self->{engine}; } -sub _fileobj { +sub _storage { my $self = $_[0]->_get_self; - return $self->{fileobj}; + return $self->{storage}; } sub _type { @@ -391,7 +391,7 @@ sub _base_offset { sub _fh { my $self = $_[0]->_get_self; - return $self->_fileobj->{fh}; + return $self->_storage->{fh}; } ## @@ -478,7 +478,7 @@ sub STORE { $lhs = "\$db->put(q{$orig_key},$rhs);"; } - $self->_fileobj->audit($lhs); + $self->_storage->audit($lhs); } ## @@ -488,8 +488,8 @@ sub STORE { # User may be storing a complex value, in which case we do not want it run # through the filtering system. - if ( !ref($value) && $self->_fileobj->{filter_store_value} ) { - $value = $self->_fileobj->{filter_store_value}->( $value ); + if ( !ref($value) && $self->_storage->{filter_store_value} ) { + $value = $self->_storage->{filter_store_value}->( $value ); } $self->_engine->write_value( $self->_base_offset, $key, $value, $orig_key ); @@ -518,8 +518,8 @@ sub FETCH { # Filters only apply to scalar values, so the ref check is making # sure the fetched bucket is a scalar, not a child hash or array. - return ($result && !ref($result) && $self->_fileobj->{filter_fetch_value}) - ? $self->_fileobj->{filter_fetch_value}->($result) + return ($result && !ref($result) && $self->_storage->{filter_fetch_value}) + ? $self->_storage->{filter_fetch_value}->($result) : $result; } @@ -538,10 +538,10 @@ sub DELETE { if ( defined $orig_key ) { my $lhs = $self->_find_parent; if ( $lhs ) { - $self->_fileobj->audit( "delete $lhs;" ); + $self->_storage->audit( "delete $lhs;" ); } else { - $self->_fileobj->audit( "\$db->delete('$orig_key');" ); + $self->_storage->audit( "\$db->delete('$orig_key');" ); } } @@ -555,8 +555,8 @@ sub DELETE { ## my $value = $self->_engine->delete_key( $self->_base_offset, $key, $orig_key ); - if (defined $value && !ref($value) && $self->_fileobj->{filter_fetch_value}) { - $value = $self->_fileobj->{filter_fetch_value}->($value); + if (defined $value && !ref($value) && $self->_storage->{filter_fetch_value}) { + $value = $self->_storage->{filter_fetch_value}->($value); } $self->unlock(); @@ -603,7 +603,7 @@ sub CLEAR { $lhs = '@{' . $lhs . '}'; } - $self->_fileobj->audit( "$lhs = ();" ); + $self->_storage->audit( "$lhs = ();" ); } ## @@ -1418,9 +1418,9 @@ This method can be called on the root level of the datbase, or any child hashes or arrays. All levels share a I structure, which contains things like the filehandle, a reference counter, and all the options specified when you created the object. You can get access to this file object by -calling the C<_fileobj()> method. +calling the C<_storage()> method. - my $file_obj = $db->_fileobj(); + my $file_obj = $db->_storage(); This is useful for changing options after the object has already been created, such as enabling/disabling locking. You can also store your own temporary user diff --git a/lib/DBM/Deep/Array.pm b/lib/DBM/Deep/Array.pm index 74dad6c..de78ec9 100644 --- a/lib/DBM/Deep/Array.pm +++ b/lib/DBM/Deep/Array.pm @@ -161,12 +161,12 @@ sub FETCHSIZE { $self->lock( $self->LOCK_SH ); - my $SAVE_FILTER = $self->_fileobj->{filter_fetch_value}; - $self->_fileobj->{filter_fetch_value} = undef; + my $SAVE_FILTER = $self->_storage->{filter_fetch_value}; + $self->_storage->{filter_fetch_value} = undef; my $packed_size = $self->FETCH('length'); - $self->_fileobj->{filter_fetch_value} = $SAVE_FILTER; + $self->_storage->{filter_fetch_value} = $SAVE_FILTER; $self->unlock; @@ -183,12 +183,12 @@ sub STORESIZE { $self->lock( $self->LOCK_EX ); - my $SAVE_FILTER = $self->_fileobj->{filter_store_value}; - $self->_fileobj->{filter_store_value} = undef; + my $SAVE_FILTER = $self->_storage->{filter_store_value}; + $self->_storage->{filter_store_value} = undef; my $result = $self->STORE('length', pack($self->_engine->{long_pack}, $new_length), 'length'); - $self->_fileobj->{filter_store_value} = $SAVE_FILTER; + $self->_storage->{filter_store_value} = $SAVE_FILTER; $self->unlock; diff --git a/lib/DBM/Deep/Engine.pm b/lib/DBM/Deep/Engine.pm index e0e9fc2..e863c9e 100644 --- a/lib/DBM/Deep/Engine.pm +++ b/lib/DBM/Deep/Engine.pm @@ -16,7 +16,7 @@ use Scalar::Util (); # - _get_key_subloc() # - add_bucket() - where the buckets are printed # -# * Every method in here assumes that the _fileobj has been appropriately +# * Every method in here assumes that the _storage has been appropriately # safeguarded. This can be anything from flock() to some sort of manual # mutex. But, it's the caller's responsability to make sure that this has # been done. @@ -43,15 +43,6 @@ sub SIG_SIZE () { 1 } # ################################################################################ -sub write_value { - my $self = shift; - my ($offset, $key, $value, $orig_key) = @_; - - my $dig_key = $self->apply_digest( $key ); - my $tag = $self->find_blist( $offset, $dig_key, { create => 1 } ); - return $self->add_bucket( $tag, $dig_key, $key, $value, undef, $orig_key ); -} - sub read_value { my $self = shift; my ($offset, $key, $orig_key) = @_; @@ -61,16 +52,39 @@ sub read_value { return $self->get_bucket_value( $tag, $dig_key, $orig_key ); } -sub delete_key { +=pod +sub read_value { my $self = shift; - my ($offset, $key, $orig_key) = @_; - - my $dig_key = $self->apply_digest( $key ); - my $tag = $self->find_blist( $offset, $dig_key ) or return; - my $value = $self->get_bucket_value( $tag, $dig_key, $orig_key ); - $self->delete_bucket( $tag, $dig_key, $orig_key ); - return $value; + my ($trans_id, $base_offset, $key) = @_; + + my ($_val_offset, $_is_del) = $self->_find_value_offset({ + offset => $base_offset, + trans_id => $trans_id, + allow_head => 1, + }); + die "Attempt to use a deleted value" if $_is_del; + die "Internal error!" if !$_val_offset; + + my ($key_offset) = $self->_find_key_offset({ + offset => $_val_offset, + key_md5 => $self->_apply_digest( $key ), + create => 0, + }); + return if !$key_offset; + + my ($val_offset, $is_del) = $self->_find_value_offset({ + offset => $key_offset, + trans_id => $trans_id, + allow_head => 1, + }); + return if $is_del; + die "Internal error!" if !$val_offset; + + return $self->_read_value({ + offset => $val_offset, + }); } +=cut sub key_exists { my $self = shift; @@ -82,6 +96,39 @@ sub key_exists { return $self->bucket_exists( $tag, $dig_key, $key ); } +=pod +sub key_exists { + my $self = shift; + my ($trans_id, $base_offset, $key) = @_; + + my ($_val_offset, $_is_del) = $self->_find_value_offset({ + offset => $base_offset, + trans_id => $trans_id, + allow_head => 1, + }); + die "Attempt to use a deleted value" if $_is_del; + die "Internal error!" if !$_val_offset; + + my ($key_offset) = $self->_find_key_offset({ + offset => $_val_offset, + key_md5 => $self->_apply_digest( $key ), + create => 0, + }); + return if !$key_offset; + + my ($val_offset, $is_del) = $self->_find_value_offset({ + offset => $key_offset, + trans_id => $trans_id, + allow_head => 1, + }); + + return 1 if $is_del; + + die "Internal error!" if !$_val_offset; + return ''; +} +=cut + sub get_next_key { my $self = shift; my ($offset) = @_; @@ -105,6 +152,95 @@ sub get_next_key { return $self->traverse_index( $temp, $offset, 0 ); } +sub delete_key { + my $self = shift; + my ($offset, $key, $orig_key) = @_; + + my $dig_key = $self->apply_digest( $key ); + my $tag = $self->find_blist( $offset, $dig_key ) or return; + my $value = $self->get_bucket_value( $tag, $dig_key, $orig_key ); + $self->delete_bucket( $tag, $dig_key, $orig_key ); + return $value; +} + +=pod +sub delete_key { + my $self = shift; + my ($trans_id, $base_offset, $key) = @_; + + my ($_val_offset, $_is_del) = $self->_find_value_offset({ + offset => $base_offset, + trans_id => $trans_id, + allow_head => 1, + }); + die "Attempt to use a deleted value" if $_is_del; + die "Internal error!" if !$_val_offset; + + my ($key_offset) = $self->_find_key_offset({ + offset => $_val_offset, + key_md5 => $self->_apply_digest( $key ), + create => 0, + }); + return if !$key_offset; + + if ( $trans_id ) { + $self->_mark_as_deleted({ + offset => $key_offset, + trans_id => $trans_id, + }); + } + else { + my $value = $self->read_value( $trans_id, $base_offset, $key ); + if ( @transactions ) { + foreach my $other_trans_id ( @transactions ) { + #XXX Finish this! + # next if the $trans_id has an entry in the keyloc + # store $value for $other_trans_id + } + } + else { + $self->_remove_key_offset({ + offset => $_val_offset, + key_md5 => $self->_apply_digest( $key ), + }); + } + } +} +=cut + +sub write_value { + my $self = shift; + my ($offset, $key, $value, $orig_key) = @_; + + my $dig_key = $self->apply_digest( $key ); + my $tag = $self->find_blist( $offset, $dig_key, { create => 1 } ); + return $self->add_bucket( $tag, $dig_key, $key, $value, undef, $orig_key ); +} + +=pod +sub write_value { + my $self = shift; + my ($trans_id, $base_offset, $key) = @_; + + my ($_val_offset, $_is_del) = $self->_find_value_offset({ + offset => $base_offset, + trans_id => $trans_id, + allow_head => 1, + }); + die "Attempt to use a deleted value" if $_is_del; + die "Internal error!" if !$_val_offset; + + my ($key_offset, $is_new) = $self->_find_key_offset({ + offset => $_val_offset, + key_md5 => $self->_apply_digest( $key ), + create => 1, + }); + die "Cannot find/create new key offset!" if !$key_offset; + + +} +=cut + ################################################################################ # # Below here is the old code. It will be folded into the code above as it can. @@ -132,7 +268,7 @@ sub new { ## max_buckets => 16, - fileobj => undef, + storage => undef, obj => undef, }, $class; @@ -169,7 +305,7 @@ sub new { return $self; } -sub _fileobj { return $_[0]{fileobj} } +sub _storage { return $_[0]{storage} } sub apply_digest { my $self = shift; @@ -197,9 +333,9 @@ sub calculate_sizes { sub write_file_header { my $self = shift; - my $loc = $self->_fileobj->request_space( length( SIG_FILE ) + 33 ); + my $loc = $self->_storage->request_space( length( SIG_FILE ) + 33 ); - $self->_fileobj->print_at( $loc, + $self->_storage->print_at( $loc, SIG_FILE, SIG_HEADER, pack('N', 1), # header version @@ -212,7 +348,7 @@ sub write_file_header { pack('n', $self->{max_buckets}), ); - $self->_fileobj->set_transaction_offset( 13 ); + $self->_storage->set_transaction_offset( 13 ); return; } @@ -220,7 +356,7 @@ sub write_file_header { sub read_file_header { my $self = shift; - my $buffer = $self->_fileobj->read_at( 0, length(SIG_FILE) + 9 ); + my $buffer = $self->_storage->read_at( 0, length(SIG_FILE) + 9 ); return unless length($buffer); my ($file_signature, $sig_header, $header_version, $size) = unpack( @@ -228,22 +364,22 @@ sub read_file_header { ); unless ( $file_signature eq SIG_FILE ) { - $self->_fileobj->close; + $self->_storage->close; $self->_throw_error( "Signature not found -- file is not a Deep DB" ); } unless ( $sig_header eq SIG_HEADER ) { - $self->_fileobj->close; + $self->_storage->close; $self->_throw_error( "Old file version found." ); } - my $buffer2 = $self->_fileobj->read_at( undef, $size ); + my $buffer2 = $self->_storage->read_at( undef, $size ); my ($a1, $a2, $a3, $a4, @values) = unpack( 'N4 n A n A n', $buffer2 ); - $self->_fileobj->set_transaction_offset( 13 ); + $self->_storage->set_transaction_offset( 13 ); if ( @values < 5 || grep { !defined } @values ) { - $self->_fileobj->close; + $self->_storage->close; $self->_throw_error("Corrupted file - bad header"); } @@ -258,7 +394,7 @@ sub setup_fh { my ($obj) = @_; # Need to remove use of $fh here - my $fh = $self->_fileobj->{fh}; + my $fh = $self->_storage->{fh}; flock $fh, LOCK_EX; #XXX The duplication of calculate_sizes needs to go away @@ -271,11 +407,11 @@ sub setup_fh { # File is empty -- write header and master index ## if (!$bytes_read) { - $self->_fileobj->audit( "# Database created on" ); + $self->_storage->audit( "# Database created on" ); $self->write_file_header; - $obj->{base_offset} = $self->_fileobj->request_space( + $obj->{base_offset} = $self->_storage->request_space( $self->tag_size( $self->{index_size} ), ); @@ -312,7 +448,7 @@ sub setup_fh { } #XXX We have to make sure we don't mess up when autoflush isn't turned on - $self->_fileobj->set_inode; + $self->_storage->set_inode; flock $fh, LOCK_UN; @@ -333,7 +469,7 @@ sub write_tag { my ($offset, $sig, $content) = @_; my $size = length( $content ); - $self->_fileobj->print_at( + $self->_storage->print_at( $offset, $sig, pack($self->{data_pack}, $size), $content, ); @@ -356,25 +492,25 @@ sub load_tag { my $self = shift; my ($offset) = @_; - my $fileobj = $self->_fileobj; + my $storage = $self->_storage; my ($sig, $size) = unpack( "A $self->{data_pack}", - $fileobj->read_at( $offset, SIG_SIZE + $self->{data_size} ), + $storage->read_at( $offset, SIG_SIZE + $self->{data_size} ), ); return { signature => $sig, size => $size, #XXX Is this even used? offset => $offset + SIG_SIZE + $self->{data_size}, - content => $fileobj->read_at( undef, $size ), + content => $storage->read_at( undef, $size ), }; } sub find_keyloc { my $self = shift; my ($tag, $transaction_id) = @_; - $transaction_id = $self->_fileobj->transaction_id + $transaction_id = $self->_storage->transaction_id unless defined $transaction_id; for ( my $i = 0; $i < $self->{max_buckets}; $i++ ) { @@ -416,14 +552,14 @@ sub add_bucket { ); } - my $fileobj = $self->_fileobj; + my $storage = $self->_storage; #ACID - This is a mutation. Must only find the exact transaction my ($keyloc, $offset) = $self->_find_in_buckets( $tag, $md5, 1 ); my @transactions; - if ( $fileobj->transaction_id == 0 ) { - @transactions = $fileobj->current_transactions; + if ( $storage->transaction_id == 0 ) { + @transactions = $storage->current_transactions; } # $self->_release_space( $size, $subloc ); @@ -444,8 +580,8 @@ sub add_bucket { for my $trans_id ( @transactions ) { my ($loc, $is_deleted, $offset2) = $self->find_keyloc( $keytag, $trans_id ); unless ($loc) { - my $location2 = $fileobj->request_space( $old_size ); - $fileobj->print_at( $keytag->{offset} + $offset2, + my $location2 = $storage->request_space( $old_size ); + $storage->print_at( $keytag->{offset} + $offset2, pack($self->{long_pack}, $location2 ), pack( 'C C', $trans_id, 0 ), ); @@ -454,20 +590,20 @@ sub add_bucket { } } - $location = $self->_fileobj->request_space( $size ); + $location = $self->_storage->request_space( $size ); #XXX This needs to be transactionally-aware in terms of which keytag->{offset} to use - $fileobj->print_at( $keytag->{offset} + $offset, + $storage->print_at( $keytag->{offset} + $offset, pack($self->{long_pack}, $location ), - pack( 'C C', $fileobj->transaction_id, 0 ), + pack( 'C C', $storage->transaction_id, 0 ), ); } # Adding a new md5 else { - my $keyloc = $fileobj->request_space( $self->tag_size( $self->{keyloc_size} ) ); + my $keyloc = $storage->request_space( $self->tag_size( $self->{keyloc_size} ) ); # The bucket fit into list if ( defined $offset ) { - $fileobj->print_at( $tag->{offset} + $offset, + $storage->print_at( $tag->{offset} + $offset, $md5, pack( $self->{long_pack}, $keyloc ), ); } @@ -480,15 +616,15 @@ sub add_bucket { $keyloc, SIG_KEYS, chr(0)x$self->{keyloc_size}, ); - $location = $self->_fileobj->request_space( $size ); - $fileobj->print_at( $keytag->{offset}, + $location = $self->_storage->request_space( $size ); + $storage->print_at( $keytag->{offset}, pack( $self->{long_pack}, $location ), - pack( 'C C', $fileobj->transaction_id, 0 ), + pack( 'C C', $storage->transaction_id, 0 ), ); my $offset = 1; for my $trans_id ( @transactions ) { - $fileobj->print_at( $keytag->{offset} + $self->{key_size} * $offset++, + $storage->print_at( $keytag->{offset} + $self->{key_size} * $offset++, pack( $self->{long_pack}, 0 ), pack( 'C C', $trans_id, 1 ), ); @@ -504,10 +640,10 @@ sub _write_value { my $self = shift; my ($location, $key, $value, $orig_key) = @_; - my $fileobj = $self->_fileobj; + my $storage = $self->_storage; my $dbm_deep_obj = _get_dbm_object( $value ); - if ( $dbm_deep_obj && $dbm_deep_obj->_fileobj ne $fileobj ) { + if ( $dbm_deep_obj && $dbm_deep_obj->_storage ne $storage ) { $self->_throw_error( "Cannot cross-reference. Use export() instead" ); } @@ -541,7 +677,7 @@ sub _write_value { ## # Plain key is stored AFTER value, as keys are typically fetched less often. ## - $fileobj->print_at( undef, pack($self->{data_pack}, length($key)) . $key ); + $storage->print_at( undef, pack($self->{data_pack}, length($key)) . $key ); # Internal references don't care about autobless return 1 if $dbm_deep_obj; @@ -549,12 +685,12 @@ sub _write_value { ## # If value is blessed, preserve class name ## - if ( $fileobj->{autobless} ) { + if ( $storage->{autobless} ) { if ( defined( my $c = Scalar::Util::blessed($value) ) ) { - $fileobj->print_at( undef, chr(1), pack($self->{data_pack}, length($c)) . $c ); + $storage->print_at( undef, chr(1), pack($self->{data_pack}, length($c)) . $c ); } else { - $fileobj->print_at( undef, chr(0) ); + $storage->print_at( undef, chr(0) ); } } @@ -570,7 +706,7 @@ sub _write_value { my %x = %$value; tie %$value, 'DBM::Deep', { base_offset => $location, - fileobj => $fileobj, + storage => $storage, parent => $self->{obj}, parent_key => $orig_key, }; @@ -581,7 +717,7 @@ sub _write_value { my @x = @$value; tie @$value, 'DBM::Deep', { base_offset => $location, - fileobj => $fileobj, + storage => $storage, parent => $self->{obj}, parent_key => $orig_key, }; @@ -596,13 +732,13 @@ sub split_index { my $self = shift; my ($tag, $md5, $keyloc) = @_; - my $fileobj = $self->_fileobj; + my $storage = $self->_storage; - my $loc = $fileobj->request_space( + my $loc = $storage->request_space( $self->tag_size( $self->{index_size} ), ); - $fileobj->print_at( $tag->{ref_loc}, pack($self->{long_pack}, $loc) ); + $storage->print_at( $tag->{ref_loc}, pack($self->{long_pack}, $loc) ); my $index_tag = $self->write_tag( $loc, SIG_INDEX, @@ -625,14 +761,14 @@ sub split_index { my $num = ord(substr($key, $tag->{ch} + 1, 1)); if ($newloc[$num]) { - my $subkeys = $fileobj->read_at( $newloc[$num], $self->{bucket_list_size} ); + my $subkeys = $storage->read_at( $newloc[$num], $self->{bucket_list_size} ); # This is looking for the first empty spot my ($subloc, $offset) = $self->_find_in_buckets( { content => $subkeys }, '', ); - $fileobj->print_at( + $storage->print_at( $newloc[$num] + $offset, $key, pack($self->{long_pack}, $old_subloc), ); @@ -640,11 +776,11 @@ sub split_index { next; } - my $loc = $fileobj->request_space( + my $loc = $storage->request_space( $self->tag_size( $self->{bucket_list_size} ), ); - $fileobj->print_at( + $storage->print_at( $index_tag->{offset} + ($num * $self->{long_size}), pack($self->{long_pack}, $loc), ); @@ -654,7 +790,7 @@ sub split_index { chr(0)x$self->{bucket_list_size}, ); - $fileobj->print_at( $blist_tag->{offset}, $key . pack($self->{long_pack}, $old_subloc) ); + $storage->print_at( $blist_tag->{offset}, $key . pack($self->{long_pack}, $old_subloc) ); $newloc[$num] = $blist_tag->{offset}; } @@ -671,9 +807,9 @@ sub read_from_loc { my $self = shift; my ($subloc, $orig_key) = @_; - my $fileobj = $self->_fileobj; + my $storage = $self->_storage; - my $signature = $fileobj->read_at( $subloc, SIG_SIZE ); + my $signature = $storage->read_at( $subloc, SIG_SIZE ); ## # If value is a hash or array, return new DBM::Deep object with correct offset @@ -686,21 +822,21 @@ sub read_from_loc { # $new_obj = {}; # tie %$new_obj, 'DBM::Deep', { # base_offset => $subloc, -# fileobj => $self->_fileobj, +# storage => $self->_storage, # parent => $self->{obj}, # parent_key => $orig_key, # }; -# $is_autobless = tied(%$new_obj)->_fileobj->{autobless}; +# $is_autobless = tied(%$new_obj)->_storage->{autobless}; # } # else { # $new_obj = []; # tie @$new_obj, 'DBM::Deep', { # base_offset => $subloc, -# fileobj => $self->_fileobj, +# storage => $self->_storage, # parent => $self->{obj}, # parent_key => $orig_key, # }; -# $is_autobless = tied(@$new_obj)->_fileobj->{autobless}; +# $is_autobless = tied(@$new_obj)->_storage->{autobless}; # } # # if ($is_autobless) { @@ -708,31 +844,31 @@ sub read_from_loc { my $new_obj = DBM::Deep->new({ type => $signature, base_offset => $subloc, - fileobj => $self->_fileobj, + storage => $self->_storage, parent => $self->{obj}, parent_key => $orig_key, }); - if ($new_obj->_fileobj->{autobless}) { + if ($new_obj->_storage->{autobless}) { ## # Skip over value and plain key to see if object needs # to be re-blessed ## - $fileobj->increment_pointer( $self->{data_size} + $self->{index_size} ); + $storage->increment_pointer( $self->{data_size} + $self->{index_size} ); - my $size = $fileobj->read_at( undef, $self->{data_size} ); + my $size = $storage->read_at( undef, $self->{data_size} ); $size = unpack($self->{data_pack}, $size); - if ($size) { $fileobj->increment_pointer( $size ); } + if ($size) { $storage->increment_pointer( $size ); } - my $bless_bit = $fileobj->read_at( undef, 1 ); + my $bless_bit = $storage->read_at( undef, 1 ); if ( ord($bless_bit) ) { my $size = unpack( $self->{data_pack}, - $fileobj->read_at( undef, $self->{data_size} ), + $storage->read_at( undef, $self->{data_size} ), ); if ( $size ) { - $new_obj = bless $new_obj, $fileobj->read_at( undef, $size ); + $new_obj = bless $new_obj, $storage->read_at( undef, $size ); } } } @@ -740,11 +876,11 @@ sub read_from_loc { return $new_obj; } elsif ( $signature eq SIG_INTERNAL ) { - my $size = $fileobj->read_at( undef, $self->{data_size} ); + my $size = $storage->read_at( undef, $self->{data_size} ); $size = unpack($self->{data_pack}, $size); if ( $size ) { - my $new_loc = $fileobj->read_at( undef, $size ); + my $new_loc = $storage->read_at( undef, $size ); $new_loc = unpack( $self->{long_pack}, $new_loc ); return $self->read_from_loc( $new_loc, $orig_key ); } @@ -756,10 +892,10 @@ sub read_from_loc { # Otherwise return actual value ## elsif ( $signature eq SIG_DATA ) { - my $size = $fileobj->read_at( undef, $self->{data_size} ); + my $size = $storage->read_at( undef, $self->{data_size} ); $size = unpack($self->{data_pack}, $size); - my $value = $size ? $fileobj->read_at( undef, $size ) : ''; + my $value = $size ? $storage->read_at( undef, $size ) : ''; return $value; } @@ -812,14 +948,14 @@ sub delete_bucket { return if !$keyloc; - my $fileobj = $self->_fileobj; + my $storage = $self->_storage; my @transactions; - if ( $fileobj->transaction_id == 0 ) { - @transactions = $fileobj->current_transactions; + if ( $storage->transaction_id == 0 ) { + @transactions = $storage->current_transactions; } - if ( $fileobj->transaction_id == 0 ) { + if ( $storage->transaction_id == 0 ) { my $keytag = $self->load_tag( $keyloc ); my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag ); @@ -832,8 +968,8 @@ sub delete_bucket { for my $trans_id ( @transactions ) { my ($loc, $is_deleted, $offset2) = $self->find_keyloc( $keytag, $trans_id ); unless ($loc) { - my $location2 = $fileobj->request_space( $size ); - $fileobj->print_at( $keytag->{offset} + $offset2, + my $location2 = $storage->request_space( $size ); + $storage->print_at( $keytag->{offset} + $offset2, pack($self->{long_pack}, $location2 ), pack( 'C C', $trans_id, 0 ), ); @@ -843,7 +979,7 @@ sub delete_bucket { $keytag = $self->load_tag( $keyloc ); ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag ); - $fileobj->print_at( $keytag->{offset} + $offset, + $storage->print_at( $keytag->{offset} + $offset, substr( $keytag->{content}, $offset + $self->{key_size} ), chr(0) x $self->{key_size}, ); @@ -853,9 +989,9 @@ sub delete_bucket { my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag ); - $fileobj->print_at( $keytag->{offset} + $offset, + $storage->print_at( $keytag->{offset} + $offset, pack($self->{long_pack}, 0 ), - pack( 'C C', $fileobj->transaction_id, 1 ), + pack( 'C C', $storage->transaction_id, 1 ), ); } @@ -903,11 +1039,11 @@ sub find_blist { if (!$tag) { return if !$args->{create}; - my $loc = $self->_fileobj->request_space( + my $loc = $self->_storage->request_space( $self->tag_size( $self->{bucket_list_size} ), ); - $self->_fileobj->print_at( $ref_loc, pack($self->{long_pack}, $loc) ); + $self->_storage->print_at( $ref_loc, pack($self->{long_pack}, $loc) ); $tag = $self->write_tag( $loc, SIG_BLIST, @@ -989,7 +1125,7 @@ sub traverse_index { ## # Iterate through buckets, looking for a key match ## - my $transaction_id = $self->_fileobj->transaction_id; + my $transaction_id = $self->_storage->transaction_id; for (my $i = 0; $i < $self->{max_buckets}; $i++) { my ($key, $keyloc) = $self->_get_key_subloc( $keys, $i ); @@ -1005,7 +1141,7 @@ sub traverse_index { } # Seek to bucket location and skip over signature elsif ($xxxx->{return_next}) { - my $fileobj = $self->_fileobj; + my $storage = $self->_storage; my $keytag = $self->load_tag( $keyloc ); my ($subloc, $is_deleted) = $self->find_keyloc( $keytag ); @@ -1015,18 +1151,18 @@ sub traverse_index { next if $is_deleted; # Skip over value to get to plain key - my $sig = $fileobj->read_at( $subloc, SIG_SIZE ); + my $sig = $storage->read_at( $subloc, SIG_SIZE ); - my $size = $fileobj->read_at( undef, $self->{data_size} ); + my $size = $storage->read_at( undef, $self->{data_size} ); $size = unpack($self->{data_pack}, $size); - if ($size) { $fileobj->increment_pointer( $size ); } + if ($size) { $storage->increment_pointer( $size ); } # Read in plain key and return as scalar - $size = $fileobj->read_at( undef, $self->{data_size} ); + $size = $storage->read_at( undef, $self->{data_size} ); $size = unpack($self->{data_pack}, $size); my $plain_key; - if ($size) { $plain_key = $fileobj->read_at( undef, $size); } + if ($size) { $plain_key = $storage->read_at( undef, $size); } return $plain_key; } } @@ -1085,7 +1221,7 @@ sub _release_space { my $next_loc = 0; - $self->_fileobj->print_at( $loc, + $self->_storage->print_at( $loc, SIG_FREE, pack($self->{long_pack}, $size ), pack($self->{long_pack}, $next_loc ), @@ -1151,12 +1287,12 @@ sub _length_needed { + $self->{data_size} # size for key + length( $key ); # length of key - if ( $is_dbm_deep && $value->_fileobj eq $self->_fileobj ) { + if ( $is_dbm_deep && $value->_storage eq $self->_storage ) { # long_size is for the internal reference return $len + $self->{long_size}; } - if ( $self->_fileobj->{autobless} ) { + if ( $self->_storage->{autobless} ) { # This is for the bit saying whether or not this thing is blessed. $len += 1; } @@ -1173,7 +1309,7 @@ sub _length_needed { # if autobless is enabled, must also take into consideration # the class name as it is stored after the key. - if ( $self->_fileobj->{autobless} ) { + if ( $self->_storage->{autobless} ) { my $c = Scalar::Util::blessed($value); if ( defined $c && !$is_dbm_deep ) { $len += $self->{data_size} + length($c); diff --git a/lib/DBM/Deep/Hash.pm b/lib/DBM/Deep/Hash.pm index ddaae9d..65775b8 100644 --- a/lib/DBM/Deep/Hash.pm +++ b/lib/DBM/Deep/Hash.pm @@ -46,8 +46,8 @@ sub TIEHASH { sub FETCH { my $self = shift->_get_self; - my $key = ($self->_fileobj->{filter_store_key}) - ? $self->_fileobj->{filter_store_key}->($_[0]) + my $key = ($self->_storage->{filter_store_key}) + ? $self->_storage->{filter_store_key}->($_[0]) : $_[0]; return $self->SUPER::FETCH( $key, $_[0] ); @@ -55,8 +55,8 @@ sub FETCH { sub STORE { my $self = shift->_get_self; - my $key = ($self->_fileobj->{filter_store_key}) - ? $self->_fileobj->{filter_store_key}->($_[0]) + my $key = ($self->_storage->{filter_store_key}) + ? $self->_storage->{filter_store_key}->($_[0]) : $_[0]; my $value = $_[1]; @@ -65,8 +65,8 @@ sub STORE { sub EXISTS { my $self = shift->_get_self; - my $key = ($self->_fileobj->{filter_store_key}) - ? $self->_fileobj->{filter_store_key}->($_[0]) + my $key = ($self->_storage->{filter_store_key}) + ? $self->_storage->{filter_store_key}->($_[0]) : $_[0]; return $self->SUPER::EXISTS( $key ); @@ -74,8 +74,8 @@ sub EXISTS { sub DELETE { my $self = shift->_get_self; - my $key = ($self->_fileobj->{filter_store_key}) - ? $self->_fileobj->{filter_store_key}->($_[0]) + my $key = ($self->_storage->{filter_store_key}) + ? $self->_storage->{filter_store_key}->($_[0]) : $_[0]; return $self->SUPER::DELETE( $key, $_[0] ); @@ -96,8 +96,8 @@ sub FIRSTKEY { $self->unlock(); - return ($result && $self->_fileobj->{filter_fetch_key}) - ? $self->_fileobj->{filter_fetch_key}->($result) + return ($result && $self->_storage->{filter_fetch_key}) + ? $self->_storage->{filter_fetch_key}->($result) : $result; } @@ -107,8 +107,8 @@ sub NEXTKEY { ## my $self = shift->_get_self; - my $prev_key = ($self->_fileobj->{filter_store_key}) - ? $self->_fileobj->{filter_store_key}->($_[0]) + my $prev_key = ($self->_storage->{filter_store_key}) + ? $self->_storage->{filter_store_key}->($_[0]) : $_[0]; ## @@ -120,8 +120,8 @@ sub NEXTKEY { $self->unlock(); - return ($result && $self->_fileobj->{filter_fetch_key}) - ? $self->_fileobj->{filter_fetch_key}->($result) + return ($result && $self->_storage->{filter_fetch_key}) + ? $self->_storage->{filter_fetch_key}->($result) : $result; } diff --git a/t/23_misc.t b/t/23_misc.t index c2137b8..c46064c 100644 --- a/t/23_misc.t +++ b/t/23_misc.t @@ -16,7 +16,7 @@ is( $db->{key1}, "value1", "Value set correctly" ); # Testing to verify that the close() will occur if open is called on an open DB. #XXX WOW is this hacky ... -$db->_get_self->_fileobj->open; +$db->_get_self->_storage->open; is( $db->{key1}, "value1", "Value still set after re-open" ); throws_ok { @@ -32,7 +32,7 @@ throws_ok { file => $filename, locking => 1, ); - $db->_get_self->_fileobj->close( $db->_get_self ); + $db->_get_self->_storage->close( $db->_get_self ); ok( !$db->lock, "Calling lock() on a closed database returns false" ); } @@ -42,6 +42,6 @@ throws_ok { locking => 1, ); $db->lock; - $db->_get_self->_fileobj->close( $db->_get_self ); + $db->_get_self->_storage->close( $db->_get_self ); ok( !$db->unlock, "Calling unlock() on a closed database returns false" ); } diff --git a/t/27_filehandle.t b/t/27_filehandle.t index 4b7196f..7ae1a52 100644 --- a/t/27_filehandle.t +++ b/t/27_filehandle.t @@ -33,7 +33,7 @@ use_ok( 'DBM::Deep' ); ok( !$db->exists( 'foo' ), "foo doesn't exist" ); my $db_obj = $db->_get_self; - ok( $db_obj->_fileobj->{inode}, "The inode has been set" ); + ok( $db_obj->_storage->{inode}, "The inode has been set" ); close($fh); } diff --git a/t/33_transactions.t b/t/33_transactions.t index 6f813b2..bde1f0e 100644 --- a/t/33_transactions.t +++ b/t/33_transactions.t @@ -147,7 +147,7 @@ SKIP: { $db1->begin_work; - cmp_ok( $db1->_fileobj->transaction_id, '==', 1, "Transaction ID has been reset after optimize" ); + cmp_ok( $db1->_storage->transaction_id, '==', 1, "Transaction ID has been reset after optimize" ); $db1->rollback; }