package DBM::Deep::Engine;
-use 5.6.0;
+use 5.006_000;
use strict;
-use warnings;
+use warnings FATAL => 'all';
-our $VERSION = q(0.99_03);
-
-use Fcntl qw( :DEFAULT :flock );
+# Never import symbols into our namespace. We are a class, not a library.
+# -RobK, 2008-05-27
use Scalar::Util ();
+#use Data::Dumper ();
+
# File-wide notes:
-# * To add to bucket_size, make sure you modify the following:
-# - calculate_sizes()
-# - _get_key_subloc()
-# - add_bucket() - where the buckets are printed
-#
-# * Every method in here assumes that the _fileobj has been appropriately
+# * Every method in here assumes that the storage has been appropriately
# safeguarded. This can be anything from flock() to some sort of manual
# mutex. But, it's the caller's responsability to make sure that this has
# been done.
-##
# Setup file and tag signatures. These should never change.
-##
sub SIG_FILE () { 'DPDB' }
sub SIG_HEADER () { 'h' }
-sub SIG_INTERNAL () { 'i' }
sub SIG_HASH () { 'H' }
sub SIG_ARRAY () { 'A' }
sub SIG_NULL () { 'N' }
sub SIG_INDEX () { 'I' }
sub SIG_BLIST () { 'B' }
sub SIG_FREE () { 'F' }
-sub SIG_KEYS () { 'K' }
sub SIG_SIZE () { 1 }
-################################################################################
-#
-# This is new code. It is a complete rewrite of the engine based on a new API
-#
-################################################################################
-
-sub write_value {
- my $self = shift;
- my ($offset, $key, $value, $orig_key) = @_;
-
- my $dig_key = $self->apply_digest( $key );
- my $tag = $self->find_blist( $offset, $dig_key, { create => 1 } );
- return $self->add_bucket( $tag, $dig_key, $key, $value, undef, $orig_key );
-}
-
-sub read_value {
- my $self = shift;
- my ($offset, $key, $orig_key) = @_;
-
- my $dig_key = $self->apply_digest( $key );
- my $tag = $self->find_blist( $offset, $dig_key ) or return;
- return $self->get_bucket_value( $tag, $dig_key, $orig_key );
-}
-
-sub delete_key {
- my $self = shift;
- my ($offset, $key, $orig_key) = @_;
-
- my $dig_key = $self->apply_digest( $key );
- my $tag = $self->find_blist( $offset, $dig_key ) or return;
- my $value = $self->get_bucket_value( $tag, $dig_key, $orig_key );
- $self->delete_bucket( $tag, $dig_key, $orig_key );
- return $value;
-}
-
-sub key_exists {
- my $self = shift;
- my ($offset, $key) = @_;
-
- my $dig_key = $self->apply_digest( $key );
- # exists() returns the empty string, not undef
- my $tag = $self->find_blist( $offset, $dig_key ) or return '';
- return $self->bucket_exists( $tag, $dig_key, $key );
-}
-
-sub get_next_key {
- my $self = shift;
- my ($offset) = @_;
-
- # If the previous key was not specifed, start at the top and
- # return the first one found.
- my $temp;
- if ( @_ > 1 ) {
- $temp = {
- prev_md5 => $self->apply_digest($_[1]),
- return_next => 0,
- };
- }
- else {
- $temp = {
- prev_md5 => chr(0) x $self->{hash_size},
- return_next => 1,
- };
- }
-
- return $self->traverse_index( $temp, $offset, 0 );
-}
+our $STALE_SIZE = 2;
+
+# Please refer to the pack() documentation for further information
+my %StP = (
+ 1 => 'C', # Unsigned char value (no order needed as it's just one byte)
+ 2 => 'n', # Unsigned short in "network" (big-endian) order
+ 4 => 'N', # Unsigned long in "network" (big-endian) order
+ 8 => 'Q', # Usigned quad (no order specified, presumably machine-dependent)
+);
+sub StP { $StP{$_[1]} }
+
+# Import these after the SIG_* definitions because those definitions are used
+# in the headers of these classes. -RobK, 2008-06-20
+use DBM::Deep::Engine::Sector::BucketList;
+use DBM::Deep::Engine::Sector::FileHeader;
+use DBM::Deep::Engine::Sector::Index;
+use DBM::Deep::Engine::Sector::Null;
+use DBM::Deep::Engine::Sector::Reference;
+use DBM::Deep::Engine::Sector::Scalar;
+use DBM::Deep::Iterator;
################################################################################
-#
-# Below here is the old code. It will be folded into the code above as it can.
-#
-################################################################################
sub new {
my $class = shift;
my ($args) = @_;
+ $args->{storage} = DBM::Deep::File->new( $args )
+ unless exists $args->{storage};
+
my $self = bless {
- long_size => 4,
- long_pack => 'N',
- data_size => 4,
- data_pack => 'N',
-
- digest => \&Digest::MD5::md5,
- hash_size => 16, # In bytes
-
- ##
- # Number of buckets per blist before another level of indexing is
- # done. Increase this value for slightly greater speed, but larger database
- # files. DO NOT decrease this value below 16, due to risk of recursive
- # reindex overrun.
- ##
+ byte_size => 4,
+
+ digest => undef,
+ hash_size => 16, # In bytes
+ hash_chars => 256, # Number of chars the algorithm uses per byte
max_buckets => 16,
+ num_txns => 1, # The HEAD
+ trans_id => 0, # Default to the HEAD
+
+ data_sector_size => 64, # Size in bytes of each data sector
- fileobj => undef,
- obj => undef,
+ entries => {}, # This is the list of entries for transactions
+ storage => undef,
}, $class;
+ # Never allow byte_size to be set directly.
+ delete $args->{byte_size};
if ( defined $args->{pack_size} ) {
if ( lc $args->{pack_size} eq 'small' ) {
- $args->{long_size} = 2;
- $args->{long_pack} = 'n';
+ $args->{byte_size} = 2;
}
elsif ( lc $args->{pack_size} eq 'medium' ) {
- $args->{long_size} = 4;
- $args->{long_pack} = 'N';
+ $args->{byte_size} = 4;
}
elsif ( lc $args->{pack_size} eq 'large' ) {
- $args->{long_size} = 8;
- $args->{long_pack} = 'Q';
+ $args->{byte_size} = 8;
}
else {
- die "Unknown pack_size value: '$args->{pack_size}'\n";
+ DBM::Deep->_throw_error( "Unknown pack_size value: '$args->{pack_size}'" );
}
}
next unless exists $args->{$param};
$self->{$param} = $args->{$param};
}
- Scalar::Util::weaken( $self->{obj} ) if $self->{obj};
- if ( $self->{max_buckets} < 16 ) {
- warn "Floor of max_buckets is 16. Setting it to 16 from '$self->{max_buckets}'\n";
- $self->{max_buckets} = 16;
+ my %validations = (
+ max_buckets => { floor => 16, ceil => 256 },
+ num_txns => { floor => 1, ceil => 255 },
+ data_sector_size => { floor => 32, ceil => 256 },
+ );
+
+ while ( my ($attr, $c) = each %validations ) {
+ if ( !defined $self->{$attr}
+ || !length $self->{$attr}
+ || $self->{$attr} =~ /\D/
+ || $self->{$attr} < $c->{floor}
+ ) {
+ $self->{$attr} = '(undef)' if !defined $self->{$attr};
+ warn "Floor of $attr is $c->{floor}. Setting it to $c->{floor} from '$self->{$attr}'\n";
+ $self->{$attr} = $c->{floor};
+ }
+ elsif ( $self->{$attr} > $c->{ceil} ) {
+ warn "Ceiling of $attr is $c->{ceil}. Setting it to $c->{ceil} from '$self->{$attr}'\n";
+ $self->{$attr} = $c->{ceil};
+ }
+ }
+
+ if ( !$self->{digest} ) {
+ require Digest::MD5;
+ $self->{digest} = \&Digest::MD5::md5;
}
return $self;
}
-sub _fileobj { return $_[0]{fileobj} }
-
-sub apply_digest {
- my $self = shift;
- return $self->{digest}->(@_);
-}
+################################################################################
-sub calculate_sizes {
+sub read_value {
my $self = shift;
+ my ($obj, $key) = @_;
- # The 2**8 here indicates the number of different characters in the
- # current hashing algorithm
- #XXX Does this need to be updated with different hashing algorithms?
- $self->{hash_chars_used} = (2**8);
- $self->{index_size} = $self->{hash_chars_used} * $self->{long_size};
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or return;
- $self->{bucket_size} = $self->{hash_size} + $self->{long_size} * 2;
- $self->{bucket_list_size} = $self->{max_buckets} * $self->{bucket_size};
-
- $self->{key_size} = $self->{long_size} * 2;
- $self->{keyloc_size} = $self->{max_buckets} * $self->{key_size};
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
+ }
- return;
-}
+ my $key_md5 = $self->_apply_digest( $key );
-sub write_file_header {
- my $self = shift;
+ my $value_sector = $sector->get_data_for({
+ key_md5 => $key_md5,
+ allow_head => 1,
+ });
- my $loc = $self->_fileobj->request_space( length( SIG_FILE ) + 33 );
-
- $self->_fileobj->print_at( $loc,
- SIG_FILE,
- SIG_HEADER,
- pack('N', 1), # header version
- pack('N', 24), # header size
- pack('N4', 0, 0, 0, 0), # currently running transaction IDs
- pack('n', $self->{long_size}),
- pack('A', $self->{long_pack}),
- pack('n', $self->{data_size}),
- pack('A', $self->{data_pack}),
- pack('n', $self->{max_buckets}),
- );
+ unless ( $value_sector ) {
+ $value_sector = DBM::Deep::Engine::Sector::Null->new({
+ engine => $self,
+ data => undef,
+ });
- $self->_fileobj->set_transaction_offset( 13 );
+ $sector->write_data({
+ key_md5 => $key_md5,
+ key => $key,
+ value => $value_sector,
+ });
+ }
- return;
+ return $value_sector->data;
}
-sub read_file_header {
+sub get_classname {
my $self = shift;
+ my ($obj) = @_;
- my $buffer = $self->_fileobj->read_at( 0, length(SIG_FILE) + 9 );
- return unless length($buffer);
-
- my ($file_signature, $sig_header, $header_version, $size) = unpack(
- 'A4 A N N', $buffer
- );
-
- unless ( $file_signature eq SIG_FILE ) {
- $self->_fileobj->close;
- $self->_throw_error( "Signature not found -- file is not a Deep DB" );
- }
-
- unless ( $sig_header eq SIG_HEADER ) {
- $self->_fileobj->close;
- $self->_throw_error( "Old file version found." );
- }
-
- my $buffer2 = $self->_fileobj->read_at( undef, $size );
- my ($a1, $a2, $a3, $a4, @values) = unpack( 'N4 n A n A n', $buffer2 );
-
- $self->_fileobj->set_transaction_offset( 13 );
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or DBM::Deep->_throw_error( "How did get_classname fail (no sector for '$obj')?!" );
- if ( @values < 5 || grep { !defined } @values ) {
- $self->_fileobj->close;
- $self->_throw_error("Corrupted file - bad header");
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
}
- #XXX Add warnings if values weren't set right
- @{$self}{qw(long_size long_pack data_size data_pack max_buckets)} = @values;
-
- return length($buffer) + length($buffer2);
+ return $sector->get_classname;
}
-sub setup_fh {
+sub make_reference {
my $self = shift;
- my ($obj) = @_;
+ my ($obj, $old_key, $new_key) = @_;
- # Need to remove use of $fh here
- my $fh = $self->_fileobj->{fh};
- flock $fh, LOCK_EX;
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or DBM::Deep->_throw_error( "How did make_reference fail (no sector for '$obj')?!" );
- #XXX The duplication of calculate_sizes needs to go away
- unless ( $obj->{base_offset} ) {
- my $bytes_read = $self->read_file_header;
-
- $self->calculate_sizes;
-
- ##
- # File is empty -- write header and master index
- ##
- if (!$bytes_read) {
- $self->_fileobj->audit( "# Database created on" );
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
+ }
- $self->write_file_header;
+ my $old_md5 = $self->_apply_digest( $old_key );
- $obj->{base_offset} = $self->_fileobj->request_space(
- $self->tag_size( $self->{index_size} ),
- );
+ my $value_sector = $sector->get_data_for({
+ key_md5 => $old_md5,
+ allow_head => 1,
+ });
- $self->write_tag(
- $obj->_base_offset, $obj->_type,
- chr(0)x$self->{index_size},
- );
+ unless ( $value_sector ) {
+ $value_sector = DBM::Deep::Engine::Sector::Null->new({
+ engine => $self,
+ data => undef,
+ });
- # Flush the filehandle
- my $old_fh = select $fh;
- my $old_af = $|; $| = 1; $| = $old_af;
- select $old_fh;
- }
- else {
- $obj->{base_offset} = $bytes_read;
-
- ##
- # Get our type from master index header
- ##
- my $tag = $self->load_tag($obj->_base_offset);
- unless ( $tag ) {
- flock $fh, LOCK_UN;
- $self->_throw_error("Corrupted file, no master index record");
- }
+ $sector->write_data({
+ key_md5 => $old_md5,
+ key => $old_key,
+ value => $value_sector,
+ });
+ }
- unless ($obj->_type eq $tag->{signature}) {
- flock $fh, LOCK_UN;
- $self->_throw_error("File type mismatch");
- }
- }
+ if ( $value_sector->isa( 'DBM::Deep::Engine::Sector::Reference' ) ) {
+ $sector->write_data({
+ key => $new_key,
+ key_md5 => $self->_apply_digest( $new_key ),
+ value => $value_sector,
+ });
+ $value_sector->increment_refcount;
}
else {
- $self->calculate_sizes;
+ $sector->write_data({
+ key => $new_key,
+ key_md5 => $self->_apply_digest( $new_key ),
+ value => $value_sector->clone,
+ });
}
-
- #XXX We have to make sure we don't mess up when autoflush isn't turned on
- $self->_fileobj->set_inode;
-
- flock $fh, LOCK_UN;
-
- return 1;
}
-sub tag_size {
+sub key_exists {
my $self = shift;
- my ($size) = @_;
- return SIG_SIZE + $self->{data_size} + $size;
-}
+ my ($obj, $key) = @_;
-sub write_tag {
- ##
- # Given offset, signature and content, create tag and write to disk
- ##
- my $self = shift;
- my ($offset, $sig, $content) = @_;
- my $size = length( $content );
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or return '';
- $self->_fileobj->print_at(
- $offset,
- $sig, pack($self->{data_pack}, $size), $content,
- );
+ if ( $sector->staleness != $obj->_staleness ) {
+ return '';
+ }
- return unless defined $offset;
+ my $data = $sector->get_data_for({
+ key_md5 => $self->_apply_digest( $key ),
+ allow_head => 1,
+ });
- return {
- signature => $sig,
- #XXX Is this even used?
- size => $size,
- offset => $offset + SIG_SIZE + $self->{data_size},
- content => $content
- };
+ # exists() returns 1 or '' for true/false.
+ return $data ? 1 : '';
}
-sub load_tag {
- ##
- # Given offset, load single tag and return signature, size and data
- ##
- my $self = shift;
- my ($offset) = @_;
-
- my $fileobj = $self->_fileobj;
-
- my ($sig, $size) = unpack(
- "A $self->{data_pack}",
- $fileobj->read_at( $offset, SIG_SIZE + $self->{data_size} ),
- );
-
- return {
- signature => $sig,
- size => $size, #XXX Is this even used?
- offset => $offset + SIG_SIZE + $self->{data_size},
- content => $fileobj->read_at( undef, $size ),
- };
-}
-
-sub find_keyloc {
+sub delete_key {
my $self = shift;
- my ($tag, $transaction_id) = @_;
- $transaction_id = $self->_fileobj->transaction_id
- unless defined $transaction_id;
-
- for ( my $i = 0; $i < $self->{max_buckets}; $i++ ) {
- my ($loc, $trans_id, $is_deleted) = unpack(
- "$self->{long_pack} C C",
- substr( $tag->{content}, $i * $self->{key_size}, $self->{key_size} ),
- );
+ my ($obj, $key) = @_;
- if ( $loc == 0 ) {
- return ( $loc, $is_deleted, $i * $self->{key_size} );
- }
-
- next if $transaction_id != $trans_id;
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or return;
- return ( $loc, $is_deleted, $i * $self->{key_size} );
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
}
- return;
+ return $sector->delete_key({
+ key_md5 => $self->_apply_digest( $key ),
+ allow_head => 0,
+ });
}
-sub add_bucket {
- ##
- # Adds one key/value pair to bucket list, given offset, MD5 digest of key,
- # plain (undigested) key and value.
- ##
+sub write_value {
my $self = shift;
- my ($tag, $md5, $plain_key, $value, $deleted, $orig_key) = @_;
+ my ($obj, $key, $value) = @_;
- # This verifies that only supported values will be stored.
+ my $r = Scalar::Util::reftype( $value ) || '';
{
- my $r = Scalar::Util::reftype( $value );
-
- last if !defined $r;
+ last if $r eq '';
last if $r eq 'HASH';
last if $r eq 'ARRAY';
- $self->_throw_error(
+ DBM::Deep->_throw_error(
"Storage of references of type '$r' is not supported."
);
}
- my $fileobj = $self->_fileobj;
-
- #ACID - This is a mutation. Must only find the exact transaction
- my ($keyloc, $offset) = $self->_find_in_buckets( $tag, $md5, 1 );
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or DBM::Deep->_throw_error( "1: Cannot write to a deleted spot in DBM::Deep." );
- my @transactions;
- if ( $fileobj->transaction_id == 0 ) {
- @transactions = $fileobj->current_transactions;
+ if ( $sector->staleness != $obj->_staleness ) {
+ DBM::Deep->_throw_error( "2: Cannot write to a deleted spot in DBM::Deep." );
}
-# $self->_release_space( $size, $subloc );
-#XXX This needs updating to use _release_space
-
- my $location;
- my $size = $self->_length_needed( $value, $plain_key );
-
- # Updating a known md5
- if ( $keyloc ) {
- my $keytag = $self->load_tag( $keyloc );
- my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
-
- if ( $subloc && !$is_deleted && @transactions ) {
- my $old_value = $self->read_from_loc( $subloc, $orig_key );
- my $old_size = $self->_length_needed( $old_value, $plain_key );
-
- for my $trans_id ( @transactions ) {
- my ($loc, $is_deleted, $offset2) = $self->find_keyloc( $keytag, $trans_id );
- unless ($loc) {
- my $location2 = $fileobj->request_space( $old_size );
- $fileobj->print_at( $keytag->{offset} + $offset2,
- pack($self->{long_pack}, $location2 ),
- pack( 'C C', $trans_id, 0 ),
- );
- $self->_write_value( $location2, $plain_key, $old_value, $orig_key );
- }
- }
- }
-
- $location = $self->_fileobj->request_space( $size );
- #XXX This needs to be transactionally-aware in terms of which keytag->{offset} to use
- $fileobj->print_at( $keytag->{offset} + $offset,
- pack($self->{long_pack}, $location ),
- pack( 'C C', $fileobj->transaction_id, 0 ),
- );
+ my ($class, $type);
+ if ( !defined $value ) {
+ $class = 'DBM::Deep::Engine::Sector::Null';
}
- # Adding a new md5
- else {
- my $keyloc = $fileobj->request_space( $self->tag_size( $self->{keyloc_size} ) );
-
- # The bucket fit into list
- if ( defined $offset ) {
- $fileobj->print_at( $tag->{offset} + $offset,
- $md5, pack( $self->{long_pack}, $keyloc ),
- );
- }
- # If bucket didn't fit into list, split into a new index level
- else {
- $self->split_index( $tag, $md5, $keyloc );
+ elsif ( $r eq 'ARRAY' || $r eq 'HASH' ) {
+ my $tmpvar;
+ if ( $r eq 'ARRAY' ) {
+ $tmpvar = tied @$value;
+ } elsif ( $r eq 'HASH' ) {
+ $tmpvar = tied %$value;
}
- my $keytag = $self->write_tag(
- $keyloc, SIG_KEYS, chr(0)x$self->{keyloc_size},
- );
+ if ( $tmpvar ) {
+ my $is_dbm_deep = eval { local $SIG{'__DIE__'}; $tmpvar->isa( 'DBM::Deep' ); };
- $location = $self->_fileobj->request_space( $size );
- $fileobj->print_at( $keytag->{offset},
- pack( $self->{long_pack}, $location ),
- pack( 'C C', $fileobj->transaction_id, 0 ),
- );
-
- my $offset = 1;
- for my $trans_id ( @transactions ) {
- $fileobj->print_at( $keytag->{offset} + $self->{key_size} * $offset++,
- pack( $self->{long_pack}, 0 ),
- pack( 'C C', $trans_id, 1 ),
- );
- }
- }
-
- $self->_write_value( $location, $plain_key, $value, $orig_key );
+ unless ( $is_dbm_deep ) {
+ DBM::Deep->_throw_error( "Cannot store something that is tied." );
+ }
- return 1;
-}
+ unless ( $tmpvar->_engine->storage == $self->storage ) {
+ DBM::Deep->_throw_error( "Cannot store values across DBM::Deep files. Please use export() instead." );
+ }
-sub _write_value {
- my $self = shift;
- my ($location, $key, $value, $orig_key) = @_;
+ # First, verify if we're storing the same thing to this spot. If we are, then
+ # this should be a no-op. -EJS, 2008-05-19
+ my $loc = $sector->get_data_location_for({
+ key_md5 => $self->_apply_digest( $key ),
+ allow_head => 1,
+ });
- my $fileobj = $self->_fileobj;
+ if ( defined($loc) && $loc == $tmpvar->_base_offset ) {
+ return 1;
+ }
- my $dbm_deep_obj = _get_dbm_object( $value );
- if ( $dbm_deep_obj && $dbm_deep_obj->_fileobj ne $fileobj ) {
- $self->_throw_error( "Cannot cross-reference. Use export() instead" );
- }
+ #XXX Can this use $loc?
+ my $value_sector = $self->_load_sector( $tmpvar->_base_offset );
+ $sector->write_data({
+ key => $key,
+ key_md5 => $self->_apply_digest( $key ),
+ value => $value_sector,
+ });
+ $value_sector->increment_refcount;
- ##
- # Write signature based on content type, set content length and write
- # actual value.
- ##
- my $r = Scalar::Util::reftype( $value ) || '';
- if ( $dbm_deep_obj ) {
- $self->write_tag( $location, SIG_INTERNAL,pack($self->{long_pack}, $dbm_deep_obj->_base_offset) );
- }
- elsif ($r eq 'HASH') {
- if ( !$dbm_deep_obj && tied %{$value} ) {
- $self->_throw_error( "Cannot store something that is tied" );
+ return 1;
}
- $self->write_tag( $location, SIG_HASH, chr(0)x$self->{index_size} );
- }
- elsif ($r eq 'ARRAY') {
- if ( !$dbm_deep_obj && tied @{$value} ) {
- $self->_throw_error( "Cannot store something that is tied" );
- }
- $self->write_tag( $location, SIG_ARRAY, chr(0)x$self->{index_size} );
- }
- elsif (!defined($value)) {
- $self->write_tag( $location, SIG_NULL, '' );
+
+ $class = 'DBM::Deep::Engine::Sector::Reference';
+ $type = substr( $r, 0, 1 );
}
else {
- $self->write_tag( $location, SIG_DATA, $value );
- }
-
- ##
- # Plain key is stored AFTER value, as keys are typically fetched less often.
- ##
- $fileobj->print_at( undef, pack($self->{data_pack}, length($key)) . $key );
-
- # Internal references don't care about autobless
- return 1 if $dbm_deep_obj;
-
- ##
- # If value is blessed, preserve class name
- ##
- if ( $fileobj->{autobless} ) {
- if ( defined( my $c = Scalar::Util::blessed($value) ) ) {
- $fileobj->print_at( undef, chr(1), pack($self->{data_pack}, length($c)) . $c );
- }
- else {
- $fileobj->print_at( undef, chr(0) );
+ if ( tied($value) ) {
+ DBM::Deep->_throw_error( "Cannot store something that is tied." );
}
+ $class = 'DBM::Deep::Engine::Sector::Scalar';
+ }
+
+ # Create this after loading the reference sector in case something bad happens.
+ # This way, we won't allocate value sector(s) needlessly.
+ my $value_sector = $class->new({
+ engine => $self,
+ data => $value,
+ type => $type,
+ });
+
+ $sector->write_data({
+ key => $key,
+ key_md5 => $self->_apply_digest( $key ),
+ value => $value_sector,
+ });
+
+ # This code is to make sure we write all the values in the $value to the disk
+ # and to make sure all changes to $value after the assignment are reflected
+ # on disk. This may be counter-intuitive at first, but it is correct dwimmery.
+ # NOTE - simply tying $value won't perform a STORE on each value. Hence, the
+ # copy to a temp value.
+ if ( $r eq 'ARRAY' ) {
+ my @temp = @$value;
+ tie @$value, 'DBM::Deep', {
+ base_offset => $value_sector->offset,
+ staleness => $value_sector->staleness,
+ storage => $self->storage,
+ engine => $self,
+ };
+ @$value = @temp;
+ bless $value, 'DBM::Deep::Array' unless Scalar::Util::blessed( $value );
}
-
- ##
- # Tie the passed in reference so that changes to it are reflected in the
- # datafile. The use of $location as the base_offset will act as the
- # the linkage between parent and child.
- #
- # The overall assignment is a hack around the fact that just tying doesn't
- # store the values. This may not be the wrong thing to do.
- ##
- if ($r eq 'HASH') {
- my %x = %$value;
+ elsif ( $r eq 'HASH' ) {
+ my %temp = %$value;
tie %$value, 'DBM::Deep', {
- base_offset => $location,
- fileobj => $fileobj,
- parent => $self->{obj},
- parent_key => $orig_key,
+ base_offset => $value_sector->offset,
+ staleness => $value_sector->staleness,
+ storage => $self->storage,
+ engine => $self,
};
- %$value = %x;
+
+ %$value = %temp;
bless $value, 'DBM::Deep::Hash' unless Scalar::Util::blessed( $value );
}
- elsif ($r eq 'ARRAY') {
- my @x = @$value;
- tie @$value, 'DBM::Deep', {
- base_offset => $location,
- fileobj => $fileobj,
- parent => $self->{obj},
- parent_key => $orig_key,
- };
- @$value = @x;
- bless $value, 'DBM::Deep::Array' unless Scalar::Util::blessed( $value );
- }
return 1;
}
-sub split_index {
+# XXX Add staleness here
+sub get_next_key {
my $self = shift;
- my ($tag, $md5, $keyloc) = @_;
-
- my $fileobj = $self->_fileobj;
+ my ($obj, $prev_key) = @_;
- my $loc = $fileobj->request_space(
- $self->tag_size( $self->{index_size} ),
- );
-
- $fileobj->print_at( $tag->{ref_loc}, pack($self->{long_pack}, $loc) );
-
- my $index_tag = $self->write_tag(
- $loc, SIG_INDEX,
- chr(0)x$self->{index_size},
- );
-
- my $keys = $tag->{content}
- . $md5 . pack($self->{long_pack}, $keyloc);
+ # XXX Need to add logic about resetting the iterator if any key in the reference has changed
+ unless ( $prev_key ) {
+ $obj->{iterator} = DBM::Deep::Iterator->new({
+ base_offset => $obj->_base_offset,
+ engine => $self,
+ });
+ }
- my @newloc = ();
- BUCKET:
- # The <= here is deliberate - we have max_buckets+1 keys to iterate
- # through, unlike every other loop that uses max_buckets as a stop.
- for (my $i = 0; $i <= $self->{max_buckets}; $i++) {
- my ($key, $old_subloc) = $self->_get_key_subloc( $keys, $i );
+ return $obj->{iterator}->get_next_key( $obj );
+}
- die "[INTERNAL ERROR]: No key in split_index()\n" unless $key;
- die "[INTERNAL ERROR]: No subloc in split_index()\n" unless $old_subloc;
+################################################################################
- my $num = ord(substr($key, $tag->{ch} + 1, 1));
+sub setup_fh {
+ my $self = shift;
+ my ($obj) = @_;
- if ($newloc[$num]) {
- my $subkeys = $fileobj->read_at( $newloc[$num], $self->{bucket_list_size} );
+ return 1 if $obj->_base_offset;
- # This is looking for the first empty spot
- my ($subloc, $offset) = $self->_find_in_buckets(
- { content => $subkeys }, '',
- );
+ my $header = $self->_load_header;
- $fileobj->print_at(
- $newloc[$num] + $offset,
- $key, pack($self->{long_pack}, $old_subloc),
- );
+ # Creating a new file
+ if ( $header->is_new ) {
+ # 1) Create Array/Hash entry
+ my $sector = DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ type => $obj->_type,
+ });
+ $obj->{base_offset} = $sector->offset;
+ $obj->{staleness} = $sector->staleness;
- next;
+ $self->flush;
+ }
+ # Reading from an existing file
+ else {
+ $obj->{base_offset} = $header->size;
+ my $sector = DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ offset => $obj->_base_offset,
+ });
+ unless ( $sector ) {
+ DBM::Deep->_throw_error("Corrupted file, no master index record");
}
- my $loc = $fileobj->request_space(
- $self->tag_size( $self->{bucket_list_size} ),
- );
-
- $fileobj->print_at(
- $index_tag->{offset} + ($num * $self->{long_size}),
- pack($self->{long_pack}, $loc),
- );
-
- my $blist_tag = $self->write_tag(
- $loc, SIG_BLIST,
- chr(0)x$self->{bucket_list_size},
- );
-
- $fileobj->print_at( $blist_tag->{offset}, $key . pack($self->{long_pack}, $old_subloc) );
+ unless ($obj->_type eq $sector->type) {
+ DBM::Deep->_throw_error("File type mismatch");
+ }
- $newloc[$num] = $blist_tag->{offset};
+ $obj->{staleness} = $sector->staleness;
}
- $self->_release_space(
- $self->tag_size( $self->{bucket_list_size} ),
- $tag->{offset} - SIG_SIZE - $self->{data_size},
- );
+ $self->storage->set_inode;
return 1;
}
-sub read_from_loc {
+sub begin_work {
my $self = shift;
- my ($subloc, $orig_key) = @_;
-
- my $fileobj = $self->_fileobj;
-
- my $signature = $fileobj->read_at( $subloc, SIG_SIZE );
-
- ##
- # If value is a hash or array, return new DBM::Deep object with correct offset
- ##
- if (($signature eq SIG_HASH) || ($signature eq SIG_ARRAY)) {
- #XXX This needs to be a singleton
-# my $new_obj;
-# my $is_autobless;
-# if ( $signature eq SIG_HASH ) {
-# $new_obj = {};
-# tie %$new_obj, 'DBM::Deep', {
-# base_offset => $subloc,
-# fileobj => $self->_fileobj,
-# parent => $self->{obj},
-# parent_key => $orig_key,
-# };
-# $is_autobless = tied(%$new_obj)->_fileobj->{autobless};
-# }
-# else {
-# $new_obj = [];
-# tie @$new_obj, 'DBM::Deep', {
-# base_offset => $subloc,
-# fileobj => $self->_fileobj,
-# parent => $self->{obj},
-# parent_key => $orig_key,
-# };
-# $is_autobless = tied(@$new_obj)->_fileobj->{autobless};
-# }
-#
-# if ($is_autobless) {
-
- my $new_obj = DBM::Deep->new({
- type => $signature,
- base_offset => $subloc,
- fileobj => $self->_fileobj,
- parent => $self->{obj},
- parent_key => $orig_key,
- });
-
- if ($new_obj->_fileobj->{autobless}) {
- ##
- # Skip over value and plain key to see if object needs
- # to be re-blessed
- ##
- $fileobj->increment_pointer( $self->{data_size} + $self->{index_size} );
-
- my $size = $fileobj->read_at( undef, $self->{data_size} );
- $size = unpack($self->{data_pack}, $size);
- if ($size) { $fileobj->increment_pointer( $size ); }
-
- my $bless_bit = $fileobj->read_at( undef, 1 );
- if ( ord($bless_bit) ) {
- my $size = unpack(
- $self->{data_pack},
- $fileobj->read_at( undef, $self->{data_size} ),
- );
-
- if ( $size ) {
- $new_obj = bless $new_obj, $fileobj->read_at( undef, $size );
- }
- }
- }
+ my ($obj) = @_;
- return $new_obj;
+ if ( $self->trans_id ) {
+ DBM::Deep->_throw_error( "Cannot begin_work within an active transaction" );
}
- elsif ( $signature eq SIG_INTERNAL ) {
- my $size = $fileobj->read_at( undef, $self->{data_size} );
- $size = unpack($self->{data_pack}, $size);
-
- if ( $size ) {
- my $new_loc = $fileobj->read_at( undef, $size );
- $new_loc = unpack( $self->{long_pack}, $new_loc );
- return $self->read_from_loc( $new_loc, $orig_key );
- }
- else {
- return;
- }
- }
- ##
- # Otherwise return actual value
- ##
- elsif ( $signature eq SIG_DATA ) {
- my $size = $fileobj->read_at( undef, $self->{data_size} );
- $size = unpack($self->{data_pack}, $size);
-
- my $value = $size ? $fileobj->read_at( undef, $size ) : '';
- return $value;
- }
-
- ##
- # Key exists, but content is null
- ##
- return;
-}
-sub get_bucket_value {
- ##
- # Fetch single value given tag and MD5 digested key.
- ##
- my $self = shift;
- my ($tag, $md5, $orig_key) = @_;
+ my @slots = $self->read_txn_slots;
+ my $found;
+ for my $i ( 0 .. $#slots ) {
+ next if $slots[$i];
- #ACID - This is a read. Can find exact or HEAD
- my ($keyloc, $offset) = $self->_find_in_buckets( $tag, $md5 );
-
- if ( !$keyloc ) {
- #XXX Need to use real key
-# $self->add_bucket( $tag, $md5, $orig_key, undef, $orig_key );
-# return;
+ $slots[$i] = 1;
+ $self->set_trans_id( $i + 1 );
+ $found = 1;
+ last;
}
-# elsif ( !$is_deleted ) {
- else {
- my $keytag = $self->load_tag( $keyloc );
- my ($subloc, $is_deleted) = $self->find_keyloc( $keytag );
- if (!$subloc && !$is_deleted) {
- ($subloc, $is_deleted) = $self->find_keyloc( $keytag, 0 );
- }
- if ( $subloc && !$is_deleted ) {
- return $self->read_from_loc( $subloc, $orig_key );
- }
+ unless ( $found ) {
+ DBM::Deep->_throw_error( "Cannot allocate transaction ID" );
+ }
+ $self->write_txn_slots( @slots );
+
+ if ( !$self->trans_id ) {
+ DBM::Deep->_throw_error( "Cannot begin_work - no available transactions" );
}
return;
}
-sub delete_bucket {
- ##
- # Delete single key/value pair given tag and MD5 digested key.
- ##
+sub rollback {
my $self = shift;
- my ($tag, $md5, $orig_key) = @_;
-
- #ACID - Although this is a mutation, we must find any transaction.
- # This is because we need to mark something as deleted that is in the HEAD.
- my ($keyloc, $offset) = $self->_find_in_buckets( $tag, $md5 );
-
- return if !$keyloc;
+ my ($obj) = @_;
- my $fileobj = $self->_fileobj;
+ if ( !$self->trans_id ) {
+ DBM::Deep->_throw_error( "Cannot rollback without an active transaction" );
+ }
- my @transactions;
- if ( $fileobj->transaction_id == 0 ) {
- @transactions = $fileobj->current_transactions;
+ foreach my $entry ( @{ $self->get_entries } ) {
+ my ($sector, $idx) = split ':', $entry;
+ $self->_load_sector( $sector )->rollback( $idx );
}
- if ( $fileobj->transaction_id == 0 ) {
- my $keytag = $self->load_tag( $keyloc );
+ $self->clear_entries;
- my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
- return if !$subloc || $is_deleted;
+ my @slots = $self->read_txn_slots;
+ $slots[$self->trans_id-1] = 0;
+ $self->write_txn_slots( @slots );
+ $self->inc_txn_staleness_counter( $self->trans_id );
+ $self->set_trans_id( 0 );
- my $value = $self->read_from_loc( $subloc, $orig_key );
+ return 1;
+}
- my $size = $self->_length_needed( $value, $orig_key );
+sub commit {
+ my $self = shift;
+ my ($obj) = @_;
- for my $trans_id ( @transactions ) {
- my ($loc, $is_deleted, $offset2) = $self->find_keyloc( $keytag, $trans_id );
- unless ($loc) {
- my $location2 = $fileobj->request_space( $size );
- $fileobj->print_at( $keytag->{offset} + $offset2,
- pack($self->{long_pack}, $location2 ),
- pack( 'C C', $trans_id, 0 ),
- );
- $self->_write_value( $location2, $orig_key, $value, $orig_key );
- }
- }
+ if ( !$self->trans_id ) {
+ DBM::Deep->_throw_error( "Cannot commit without an active transaction" );
+ }
- $keytag = $self->load_tag( $keyloc );
- ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
- $fileobj->print_at( $keytag->{offset} + $offset,
- substr( $keytag->{content}, $offset + $self->{key_size} ),
- chr(0) x $self->{key_size},
- );
+ foreach my $entry ( @{ $self->get_entries } ) {
+ my ($sector, $idx) = split ':', $entry;
+ $self->_load_sector( $sector )->commit( $idx );
}
- else {
- my $keytag = $self->load_tag( $keyloc );
- my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
+ $self->clear_entries;
- $fileobj->print_at( $keytag->{offset} + $offset,
- pack($self->{long_pack}, 0 ),
- pack( 'C C', $fileobj->transaction_id, 1 ),
- );
- }
+ my @slots = $self->read_txn_slots;
+ $slots[$self->trans_id-1] = 0;
+ $self->write_txn_slots( @slots );
+ $self->inc_txn_staleness_counter( $self->trans_id );
+ $self->set_trans_id( 0 );
return 1;
}
-sub bucket_exists {
- ##
- # Check existence of single key given tag and MD5 digested key.
- ##
+sub read_txn_slots {
my $self = shift;
- my ($tag, $md5) = @_;
-
- #ACID - This is a read. Can find exact or HEAD
- my ($keyloc) = $self->_find_in_buckets( $tag, $md5 );
- my $keytag = $self->load_tag( $keyloc );
- my ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag );
- if ( !$subloc && !$is_deleted ) {
- ($subloc, $is_deleted, $offset) = $self->find_keyloc( $keytag, 0 );
- }
- return ($subloc && !$is_deleted) && 1;
+ return $self->_load_header->read_txn_slots(@_);
}
-sub find_blist {
- ##
- # Locate offset for bucket list, given digested key
- ##
+sub write_txn_slots {
my $self = shift;
- my ($offset, $md5, $args) = @_;
- $args = {} unless $args;
-
- ##
- # Locate offset for bucket list using digest index system
- ##
- my $tag = $self->load_tag( $offset )
- or $self->_throw_error( "INTERNAL ERROR - Cannot find tag" );
+ return $self->_load_header->write_txn_slots(@_);
+}
- my $ch = 0;
- while ($tag->{signature} ne SIG_BLIST) {
- my $num = ord substr($md5, $ch, 1);
+sub get_running_txn_ids {
+ my $self = shift;
+ my @transactions = $self->read_txn_slots;
+ my @trans_ids = map { $_+1} grep { $transactions[$_] } 0 .. $#transactions;
+}
- my $ref_loc = $tag->{offset} + ($num * $self->{long_size});
- $tag = $self->index_lookup( $tag, $num );
+sub get_txn_staleness_counter {
+ my $self = shift;
+ return $self->_load_header->get_txn_staleness_counter(@_);
+}
- if (!$tag) {
- return if !$args->{create};
+sub inc_txn_staleness_counter {
+ my $self = shift;
+ return $self->_load_header->inc_txn_staleness_counter(@_);
+}
- my $loc = $self->_fileobj->request_space(
- $self->tag_size( $self->{bucket_list_size} ),
- );
+sub get_entries {
+ my $self = shift;
+ return [ keys %{ $self->{entries}{$self->trans_id} ||= {} } ];
+}
- $self->_fileobj->print_at( $ref_loc, pack($self->{long_pack}, $loc) );
+sub add_entry {
+ my $self = shift;
+ my ($trans_id, $loc, $idx) = @_;
- $tag = $self->write_tag(
- $loc, SIG_BLIST,
- chr(0)x$self->{bucket_list_size},
- );
+ return unless $trans_id;
- $tag->{ref_loc} = $ref_loc;
- $tag->{ch} = $ch;
+ $self->{entries}{$trans_id} ||= {};
+ $self->{entries}{$trans_id}{"$loc:$idx"} = undef;
+}
- last;
+# If the buckets are being relocated because of a reindexing, the entries
+# mechanism needs to be made aware of it.
+sub reindex_entry {
+ my $self = shift;
+ my ($old_loc, $old_idx, $new_loc, $new_idx) = @_;
+
+ TRANS:
+ while ( my ($trans_id, $locs) = each %{ $self->{entries} } ) {
+ if ( exists $locs->{"$old_loc:$old_idx"} ) {
+ delete $locs->{"$old_loc:$old_idx"};
+ $locs->{"$new_loc:$new_idx"} = undef;
+ next TRANS;
}
-
- $tag->{ch} = $ch++;
- $tag->{ref_loc} = $ref_loc;
}
-
- return $tag;
}
-sub index_lookup {
- ##
- # Given index tag, lookup single entry in index and return .
- ##
+sub clear_entries {
my $self = shift;
- my ($tag, $index) = @_;
-
- my $location = unpack(
- $self->{long_pack},
- substr(
- $tag->{content},
- $index * $self->{long_size},
- $self->{long_size},
- ),
- );
+ delete $self->{entries}{$self->trans_id};
+}
- if (!$location) { return; }
+################################################################################
- return $self->load_tag( $location );
+sub _apply_digest {
+ my $self = shift;
+ return $self->{digest}->(@_);
}
-sub traverse_index {
- ##
- # Scan index and recursively step into deeper levels, looking for next key.
- ##
- my $self = shift;
- my ($xxxx, $offset, $ch, $force_return_next) = @_;
+sub _add_free_blist_sector { shift->_add_free_sector( 0, @_ ) }
+sub _add_free_data_sector { shift->_add_free_sector( 1, @_ ) }
+sub _add_free_index_sector { shift->_add_free_sector( 2, @_ ) }
+sub _add_free_sector { shift->_load_header->add_free_sector( @_ ) }
- my $tag = $self->load_tag( $offset );
+sub _request_blist_sector { shift->_request_sector( 0, @_ ) }
+sub _request_data_sector { shift->_request_sector( 1, @_ ) }
+sub _request_index_sector { shift->_request_sector( 2, @_ ) }
+sub _request_sector { shift->_load_header->request_sector( @_ ) }
- if ($tag->{signature} ne SIG_BLIST) {
- my $start = $xxxx->{return_next} ? 0 : ord(substr($xxxx->{prev_md5}, $ch, 1));
+################################################################################
- for (my $idx = $start; $idx < $self->{hash_chars_used}; $idx++) {
- my $subloc = unpack(
- $self->{long_pack},
- substr(
- $tag->{content},
- $idx * $self->{long_size},
- $self->{long_size},
- ),
- );
+{
+ my %t = (
+ SIG_ARRAY => 'Reference',
+ SIG_HASH => 'Reference',
+ SIG_BLIST => 'BucketList',
+ SIG_INDEX => 'Index',
+ SIG_NULL => 'Null',
+ SIG_DATA => 'Scalar',
+ );
- if ($subloc) {
- my $result = $self->traverse_index(
- $xxxx, $subloc, $ch + 1, $force_return_next,
- );
+ my %class_for;
+ while ( my ($k,$v) = each %t ) {
+ $class_for{ DBM::Deep::Engine->$k } = "DBM::Deep::Engine::Sector::$v";
+ }
- if (defined $result) { return $result; }
- }
- } # index loop
+ sub load_sector {
+ my $self = shift;
+ my ($offset) = @_;
- $xxxx->{return_next} = 1;
+ my $data = $self->get_data( $offset )
+ or return;#die "Cannot read from '$offset'\n";
+ my $type = substr( $$data, 0, 1 );
+ my $class = $class_for{ $type };
+ return $class->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
}
- # This is the bucket list
- else {
- my $keys = $tag->{content};
- if ($force_return_next) { $xxxx->{return_next} = 1; }
-
- ##
- # Iterate through buckets, looking for a key match
- ##
- my $transaction_id = $self->_fileobj->transaction_id;
- for (my $i = 0; $i < $self->{max_buckets}; $i++) {
- my ($key, $keyloc) = $self->_get_key_subloc( $keys, $i );
-
- # End of bucket list -- return to outer loop
- if (!$keyloc) {
- $xxxx->{return_next} = 1;
- last;
- }
- # Located previous key -- return next one found
- elsif ($key eq $xxxx->{prev_md5}) {
- $xxxx->{return_next} = 1;
- next;
- }
- # Seek to bucket location and skip over signature
- elsif ($xxxx->{return_next}) {
- my $fileobj = $self->_fileobj;
-
- my $keytag = $self->load_tag( $keyloc );
- my ($subloc, $is_deleted) = $self->find_keyloc( $keytag );
- if ( $subloc == 0 && !$is_deleted ) {
- ($subloc, $is_deleted) = $self->find_keyloc( $keytag, 0 );
- }
- next if $is_deleted;
+ *_load_sector = \&load_sector;
- # Skip over value to get to plain key
- my $sig = $fileobj->read_at( $subloc, SIG_SIZE );
+ sub load_header {
+ my $self = shift;
- my $size = $fileobj->read_at( undef, $self->{data_size} );
- $size = unpack($self->{data_pack}, $size);
- if ($size) { $fileobj->increment_pointer( $size ); }
+ #XXX Does this mean we make too many objects? -RobK, 2008-06-23
+ return DBM::Deep::Engine::Sector::FileHeader->new({
+ engine => $self,
+ offset => 0,
+ });
+ }
+ *_load_header = \&load_header;
+
+ sub get_data {
+ my $self = shift;
+ my ($offset, $size) = @_;
+ return unless defined $offset;
- # Read in plain key and return as scalar
- $size = $fileobj->read_at( undef, $self->{data_size} );
- $size = unpack($self->{data_pack}, $size);
+ unless ( exists $self->sector_cache->{$offset} ) {
+ # Don't worry about the header sector. It will manage itself.
+ return unless $offset;
+
+ if ( !defined $size ) {
+ my $type = $self->storage->read_at( $offset, 1 )
+ or die "($offset): Cannot read from '$offset' to find the type\n";
+
+ if ( $type eq $self->SIG_FREE ) {
+ return;
+ }
- my $plain_key;
- if ($size) { $plain_key = $fileobj->read_at( undef, $size); }
- return $plain_key;
+ my $class = $class_for{$type}
+ or die "($offset): Cannot find class for '$type'\n";
+ $size = $class->size( $self )
+ or die "($offset): '$class' doesn't return a size\n";
+ $self->sector_cache->{$offset} = $type . $self->storage->read_at( undef, $size - 1 );
+ }
+ else {
+ $self->sector_cache->{$offset} = $self->storage->read_at( $offset, $size )
+ or return;
}
}
- $xxxx->{return_next} = 1;
+ return \$self->sector_cache->{$offset};
}
-
- return;
}
-# Utilities
+sub sector_cache {
+ my $self = shift;
+ return $self->{sector_cache} ||= {};
+}
-sub _get_key_subloc {
+sub clear_sector_cache {
my $self = shift;
- my ($keys, $idx) = @_;
-
- return unpack(
- # This is 'a', not 'A'. Please read the pack() documentation for the
- # difference between the two and why it's important.
- "a$self->{hash_size} $self->{long_pack}",
- substr(
- $keys,
- ($idx * $self->{bucket_size}),
- $self->{bucket_size},
- ),
- );
+ $self->{sector_cache} = {};
}
-sub _find_in_buckets {
+sub dirty_sectors {
my $self = shift;
- my ($tag, $md5) = @_;
+ return $self->{dirty_sectors} ||= {};
+}
- BUCKET:
- for ( my $i = 0; $i < $self->{max_buckets}; $i++ ) {
- my ($key, $subloc) = $self->_get_key_subloc(
- $tag->{content}, $i,
- );
+sub clear_dirty_sectors {
+ my $self = shift;
+ $self->{dirty_sectors} = {};
+}
- my @rv = ($subloc, $i * $self->{bucket_size});
+sub add_dirty_sector {
+ my $self = shift;
+ my ($offset) = @_;
- unless ( $subloc ) {
- return @rv;
- }
+ $self->dirty_sectors->{ $offset } = undef;
+}
- next BUCKET if $key ne $md5;
+sub flush {
+ my $self = shift;
- return @rv;
+ my $sectors = $self->dirty_sectors;
+ for my $offset (sort { $a <=> $b } keys %{ $sectors }) {
+ $self->storage->print_at( $offset, $self->sector_cache->{$offset} );
}
- return;
+ # Why do we need to have the storage flush? Shouldn't autoflush take care of things?
+ # -RobK, 2008-06-26
+ $self->storage->flush;
+
+ $self->clear_dirty_sectors;
+
+ $self->clear_sector_cache;
}
-sub _release_space {
+################################################################################
+
+sub lock_exclusive {
my $self = shift;
- my ($size, $loc) = @_;
+ my ($obj) = @_;
+ return $self->storage->lock_exclusive( $obj );
+}
- my $next_loc = 0;
+sub lock_shared {
+ my $self = shift;
+ my ($obj) = @_;
+ return $self->storage->lock_shared( $obj );
+}
- $self->_fileobj->print_at( $loc,
- SIG_FREE,
- pack($self->{long_pack}, $size ),
- pack($self->{long_pack}, $next_loc ),
- );
+sub unlock {
+ my $self = shift;
+ my ($obj) = @_;
- return;
-}
+ my $rv = $self->storage->unlock( $obj );
+
+ $self->flush if $rv;
-sub _throw_error {
- die "DBM::Deep: $_[1]\n";
+ return $rv;
}
-sub _get_dbm_object {
- my $item = shift;
+################################################################################
- my $obj = eval {
- local $SIG{__DIE__};
- if ($item->isa( 'DBM::Deep' )) {
- return $item;
+sub storage { $_[0]{storage} }
+sub byte_size { $_[0]{byte_size} }
+sub hash_size { $_[0]{hash_size} }
+sub hash_chars { $_[0]{hash_chars} }
+sub num_txns { $_[0]{num_txns} }
+sub max_buckets { $_[0]{max_buckets} }
+sub blank_md5 { chr(0) x $_[0]->hash_size }
+sub data_sector_size { $_[0]{data_sector_size} }
+
+# This is a calculated value
+sub txn_bitfield_len {
+ my $self = shift;
+ unless ( exists $self->{txn_bitfield_len} ) {
+ my $temp = ($self->num_txns) / 8;
+ if ( $temp > int( $temp ) ) {
+ $temp = int( $temp ) + 1;
}
- return;
- };
- return $obj if $obj;
-
- my $r = Scalar::Util::reftype( $item ) || '';
- if ( $r eq 'HASH' ) {
- my $obj = eval {
- local $SIG{__DIE__};
- my $obj = tied(%$item);
- if ($obj->isa( 'DBM::Deep' )) {
- return $obj;
- }
- return;
- };
- return $obj if $obj;
+ $self->{txn_bitfield_len} = $temp;
}
- elsif ( $r eq 'ARRAY' ) {
- my $obj = eval {
- local $SIG{__DIE__};
- my $obj = tied(@$item);
- if ($obj->isa( 'DBM::Deep' )) {
- return $obj;
- }
- return;
- };
- return $obj if $obj;
- }
-
- return;
+ return $self->{txn_bitfield_len};
}
-sub _length_needed {
+sub trans_id { $_[0]{trans_id} }
+sub set_trans_id { $_[0]{trans_id} = $_[1] }
+
+sub trans_loc { $_[0]{trans_loc} }
+sub set_trans_loc { $_[0]{trans_loc} = $_[1] }
+
+sub chains_loc { $_[0]{chains_loc} }
+sub set_chains_loc { $_[0]{chains_loc} = $_[1] }
+
+sub cache { $_[0]{cache} ||= {} }
+sub clear_cache { %{$_[0]->cache} = () }
+
+sub _dump_file {
my $self = shift;
- my ($value, $key) = @_;
+ $self->flush;
- my $is_dbm_deep = eval {
- local $SIG{'__DIE__'};
- $value->isa( 'DBM::Deep' );
- };
+ # Read the header
+ my $header_sector = DBM::Deep::Engine::Sector::FileHeader->new({
+ engine => $self,
+ });
- my $len = SIG_SIZE
- + $self->{data_size} # size for value
- + $self->{data_size} # size for key
- + length( $key ); # length of key
+ my %types = (
+ 0 => 'B',
+ 1 => 'D',
+ 2 => 'I',
+ );
- if ( $is_dbm_deep && $value->_fileobj eq $self->_fileobj ) {
- # long_size is for the internal reference
- return $len + $self->{long_size};
- }
+ my %sizes = (
+ 'D' => $self->data_sector_size,
+ 'B' => DBM::Deep::Engine::Sector::BucketList->new({engine=>$self,offset=>1})->size,
+ 'I' => DBM::Deep::Engine::Sector::Index->new({engine=>$self,offset=>1})->size,
+ );
- if ( $self->_fileobj->{autobless} ) {
- # This is for the bit saying whether or not this thing is blessed.
- $len += 1;
- }
+ my $return = "";
- my $r = Scalar::Util::reftype( $value ) || '';
- unless ( $r eq 'HASH' || $r eq 'ARRAY' ) {
- if ( defined $value ) {
- $len += length( $value );
+ # Filesize
+ $return .= "Size: " . (-s $self->storage->{fh}) . $/;
+
+ # Header values
+ $return .= "NumTxns: " . $self->num_txns . $/;
+
+ # Read the free sector chains
+ my %sectors;
+ foreach my $multiple ( 0 .. 2 ) {
+ $return .= "Chains($types{$multiple}):";
+ my $old_loc = $self->chains_loc + $multiple * $self->byte_size;
+ while ( 1 ) {
+ my $loc = unpack(
+ $StP{$self->byte_size},
+ $self->storage->read_at( $old_loc, $self->byte_size ),
+ );
+
+ # We're now out of free sectors of this kind.
+ unless ( $loc ) {
+ last;
+ }
+
+ $sectors{ $types{$multiple} }{ $loc } = undef;
+ $old_loc = $loc + SIG_SIZE + $STALE_SIZE;
+ $return .= " $loc";
}
- return $len;
- }
+ $return .= $/;
+ }
+
+ my $spot = $header_sector->size;
+ SECTOR:
+ while ( $spot < $self->storage->{end} ) {
+ # Read each sector in order.
+ my $sector = $self->_load_sector( $spot );
+ if ( !$sector ) {
+ # Find it in the free-sectors that were found already
+ foreach my $type ( keys %sectors ) {
+ if ( exists $sectors{$type}{$spot} ) {
+ my $size = $sizes{$type};
+ $return .= sprintf "%08d: %s %04d\n", $spot, 'F' . $type, $size;
+ $spot += $size;
+ next SECTOR;
+ }
+ }
- $len += $self->{index_size};
+ die "********\n$return\nDidn't find free sector for $spot in chains\n********\n";
+ }
+ else {
+ $return .= sprintf "%08d: %s %04d", $spot, $sector->type, $sector->size;
+ if ( $sector->type eq 'D' ) {
+ $return .= ' ' . $sector->data;
+ }
+ elsif ( $sector->type eq 'A' || $sector->type eq 'H' ) {
+ $return .= ' REF: ' . $sector->get_refcount;
+ }
+ elsif ( $sector->type eq 'B' ) {
+ foreach my $bucket ( $sector->chopped_up ) {
+ $return .= "\n ";
+ $return .= sprintf "%08d", unpack($StP{$self->byte_size},
+ substr( $bucket->[-1], $self->hash_size, $self->byte_size),
+ );
+ my $l = unpack( $StP{$self->byte_size},
+ substr( $bucket->[-1],
+ $self->hash_size + $self->byte_size,
+ $self->byte_size,
+ ),
+ );
+ $return .= sprintf " %08d", $l;
+ foreach my $txn ( 0 .. $self->num_txns - 2 ) {
+ my $l = unpack( $StP{$self->byte_size},
+ substr( $bucket->[-1],
+ $self->hash_size + 2 * $self->byte_size + $txn * ($self->byte_size + $STALE_SIZE),
+ $self->byte_size,
+ ),
+ );
+ $return .= sprintf " %08d", $l;
+ }
+ }
+ }
+ $return .= $/;
- # if autobless is enabled, must also take into consideration
- # the class name as it is stored after the key.
- if ( $self->_fileobj->{autobless} ) {
- my $c = Scalar::Util::blessed($value);
- if ( defined $c && !$is_dbm_deep ) {
- $len += $self->{data_size} + length($c);
+ $spot += $sector->size;
}
}
- return $len;
+ return $return;
}
1;