use 5.006_000;
use strict;
+use warnings FATAL => 'all';
-our $VERSION = q(0.99_04);
-
+# Never import symbols into our namespace. We are a class, not a library.
+# -RobK, 2008-05-27
use Scalar::Util ();
+#use Data::Dumper ();
+
# File-wide notes:
# * Every method in here assumes that the storage has been appropriately
# safeguarded. This can be anything from flock() to some sort of manual
# Setup file and tag signatures. These should never change.
sub SIG_FILE () { 'DPDB' }
sub SIG_HEADER () { 'h' }
-sub SIG_INTERNAL () { 'i' }
sub SIG_HASH () { 'H' }
sub SIG_ARRAY () { 'A' }
sub SIG_NULL () { 'N' }
sub SIG_INDEX () { 'I' }
sub SIG_BLIST () { 'B' }
sub SIG_FREE () { 'F' }
-sub SIG_KEYS () { 'K' }
sub SIG_SIZE () { 1 }
-sub STALE_SIZE () { 1 }
+
+our $STALE_SIZE = 2;
# Please refer to the pack() documentation for further information
my %StP = (
- 1 => 'C', # Unsigned char value (no order specified, presumably ASCII)
+ 1 => 'C', # Unsigned char value (no order needed as it's just one byte)
2 => 'n', # Unsigned short in "network" (big-endian) order
4 => 'N', # Unsigned long in "network" (big-endian) order
8 => 'Q', # Usigned quad (no order specified, presumably machine-dependent)
);
+sub StP { $StP{$_[1]} }
+
+# Import these after the SIG_* definitions because those definitions are used
+# in the headers of these classes. -RobK, 2008-06-20
+use DBM::Deep::Engine::Sector::BucketList;
+use DBM::Deep::Engine::Sector::FileHeader;
+use DBM::Deep::Engine::Sector::Index;
+use DBM::Deep::Engine::Sector::Null;
+use DBM::Deep::Engine::Sector::Reference;
+use DBM::Deep::Engine::Sector::Scalar;
+use DBM::Deep::Iterator;
################################################################################
my $class = shift;
my ($args) = @_;
+ $args->{storage} = DBM::Deep::File->new( $args )
+ unless exists $args->{storage};
+
my $self = bless {
byte_size => 4,
hash_size => 16, # In bytes
hash_chars => 256, # Number of chars the algorithm uses per byte
max_buckets => 16,
- num_txns => 2, # HEAD plus 1 additional transaction for importing
+ num_txns => 1, # The HEAD
trans_id => 0, # Default to the HEAD
+ data_sector_size => 64, # Size in bytes of each data sector
+
entries => {}, # This is the list of entries for transactions
storage => undef,
}, $class;
+ # Never allow byte_size to be set directly.
+ delete $args->{byte_size};
if ( defined $args->{pack_size} ) {
if ( lc $args->{pack_size} eq 'small' ) {
$args->{byte_size} = 2;
$self->{$param} = $args->{$param};
}
- ##
- # Number of buckets per blist before another level of indexing is
- # done. Increase this value for slightly greater speed, but larger database
- # files. DO NOT decrease this value below 16, due to risk of recursive
- # reindex overrun.
- ##
- if ( $self->{max_buckets} < 16 ) {
- warn "Floor of max_buckets is 16. Setting it to 16 from '$self->{max_buckets}'\n";
- $self->{max_buckets} = 16;
+ my %validations = (
+ max_buckets => { floor => 16, ceil => 256 },
+ num_txns => { floor => 1, ceil => 255 },
+ data_sector_size => { floor => 32, ceil => 256 },
+ );
+
+ while ( my ($attr, $c) = each %validations ) {
+ if ( !defined $self->{$attr}
+ || !length $self->{$attr}
+ || $self->{$attr} =~ /\D/
+ || $self->{$attr} < $c->{floor}
+ ) {
+ $self->{$attr} = '(undef)' if !defined $self->{$attr};
+ warn "Floor of $attr is $c->{floor}. Setting it to $c->{floor} from '$self->{$attr}'\n";
+ $self->{$attr} = $c->{floor};
+ }
+ elsif ( $self->{$attr} > $c->{ceil} ) {
+ warn "Ceiling of $attr is $c->{ceil}. Setting it to $c->{ceil} from '$self->{$attr}'\n";
+ $self->{$attr} = $c->{ceil};
+ }
}
if ( !$self->{digest} ) {
return $sector->get_classname;
}
+sub make_reference {
+ my $self = shift;
+ my ($obj, $old_key, $new_key) = @_;
+
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or DBM::Deep->_throw_error( "How did make_reference fail (no sector for '$obj')?!" );
+
+ if ( $sector->staleness != $obj->_staleness ) {
+ return;
+ }
+
+ my $old_md5 = $self->_apply_digest( $old_key );
+
+ my $value_sector = $sector->get_data_for({
+ key_md5 => $old_md5,
+ allow_head => 1,
+ });
+
+ unless ( $value_sector ) {
+ $value_sector = DBM::Deep::Engine::Sector::Null->new({
+ engine => $self,
+ data => undef,
+ });
+
+ $sector->write_data({
+ key_md5 => $old_md5,
+ key => $old_key,
+ value => $value_sector,
+ });
+ }
+
+ if ( $value_sector->isa( 'DBM::Deep::Engine::Sector::Reference' ) ) {
+ $sector->write_data({
+ key => $new_key,
+ key_md5 => $self->_apply_digest( $new_key ),
+ value => $value_sector,
+ });
+ $value_sector->increment_refcount;
+ }
+ else {
+ $sector->write_data({
+ key => $new_key,
+ key_md5 => $self->_apply_digest( $new_key ),
+ value => $value_sector->clone,
+ });
+ }
+}
+
sub key_exists {
my $self = shift;
my ($obj, $key) = @_;
);
}
+ # This will be a Reference sector
+ my $sector = $self->_load_sector( $obj->_base_offset )
+ or DBM::Deep->_throw_error( "1: Cannot write to a deleted spot in DBM::Deep." );
+
+ if ( $sector->staleness != $obj->_staleness ) {
+ DBM::Deep->_throw_error( "2: Cannot write to a deleted spot in DBM::Deep." );
+ }
+
my ($class, $type);
if ( !defined $value ) {
$class = 'DBM::Deep::Engine::Sector::Null';
}
elsif ( $r eq 'ARRAY' || $r eq 'HASH' ) {
- if ( $r eq 'ARRAY' && tied(@$value) ) {
- DBM::Deep->_throw_error( "Cannot store something that is tied." );
+ my $tmpvar;
+ if ( $r eq 'ARRAY' ) {
+ $tmpvar = tied @$value;
+ } elsif ( $r eq 'HASH' ) {
+ $tmpvar = tied %$value;
}
- if ( $r eq 'HASH' && tied(%$value) ) {
- DBM::Deep->_throw_error( "Cannot store something that is tied." );
+
+ if ( $tmpvar ) {
+ my $is_dbm_deep = eval { local $SIG{'__DIE__'}; $tmpvar->isa( 'DBM::Deep' ); };
+
+ unless ( $is_dbm_deep ) {
+ DBM::Deep->_throw_error( "Cannot store something that is tied." );
+ }
+
+ unless ( $tmpvar->_engine->storage == $self->storage ) {
+ DBM::Deep->_throw_error( "Cannot store values across DBM::Deep files. Please use export() instead." );
+ }
+
+ # First, verify if we're storing the same thing to this spot. If we are, then
+ # this should be a no-op. -EJS, 2008-05-19
+ my $loc = $sector->get_data_location_for({
+ key_md5 => $self->_apply_digest( $key ),
+ allow_head => 1,
+ });
+
+ if ( defined($loc) && $loc == $tmpvar->_base_offset ) {
+ return 1;
+ }
+
+ #XXX Can this use $loc?
+ my $value_sector = $self->_load_sector( $tmpvar->_base_offset );
+ $sector->write_data({
+ key => $key,
+ key_md5 => $self->_apply_digest( $key ),
+ value => $value_sector,
+ });
+ $value_sector->increment_refcount;
+
+ return 1;
}
+
$class = 'DBM::Deep::Engine::Sector::Reference';
$type = substr( $r, 0, 1 );
}
else {
+ if ( tied($value) ) {
+ DBM::Deep->_throw_error( "Cannot store something that is tied." );
+ }
$class = 'DBM::Deep::Engine::Sector::Scalar';
}
- # This will be a Reference sector
- my $sector = $self->_load_sector( $obj->_base_offset )
- or DBM::Deep->_throw_error( "Cannot write to a deleted spot in DBM::Deep." );
-
- if ( $sector->staleness != $obj->_staleness ) {
- DBM::Deep->_throw_error( "Cannot write to a deleted spot in DBM::Deep.n" );
- }
-
# Create this after loading the reference sector in case something bad happens.
# This way, we won't allocate value sector(s) needlessly.
my $value_sector = $class->new({
my $self = shift;
my ($obj) = @_;
- # We're opening the file.
- unless ( $obj->_base_offset ) {
- my $bytes_read = $self->_read_file_header;
+ return 1 if $obj->_base_offset;
- # Creating a new file
- unless ( $bytes_read ) {
- $self->_write_file_header;
+ my $header = $self->_load_header;
- # 1) Create Array/Hash entry
- my $initial_reference = DBM::Deep::Engine::Sector::Reference->new({
- engine => $self,
- type => $obj->_type,
- });
- $obj->{base_offset} = $initial_reference->offset;
- $obj->{staleness} = $initial_reference->staleness;
+ # Creating a new file
+ if ( $header->is_new ) {
+ # 1) Create Array/Hash entry
+ my $sector = DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ type => $obj->_type,
+ });
+ $obj->{base_offset} = $sector->offset;
+ $obj->{staleness} = $sector->staleness;
- $self->storage->flush;
+ $self->flush;
+ }
+ # Reading from an existing file
+ else {
+ $obj->{base_offset} = $header->size;
+ my $sector = DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ offset => $obj->_base_offset,
+ });
+ unless ( $sector ) {
+ DBM::Deep->_throw_error("Corrupted file, no master index record");
}
- # Reading from an existing file
- else {
- $obj->{base_offset} = $bytes_read;
- my $initial_reference = DBM::Deep::Engine::Sector::Reference->new({
- engine => $self,
- offset => $obj->_base_offset,
- });
- unless ( $initial_reference ) {
- DBM::Deep->_throw_error("Corrupted file, no master index record");
- }
-
- unless ($obj->_type eq $initial_reference->type) {
- DBM::Deep->_throw_error("File type mismatch");
- }
- $obj->{staleness} = $initial_reference->staleness;
+ unless ($obj->_type eq $sector->type) {
+ DBM::Deep->_throw_error("File type mismatch");
}
+
+ $obj->{staleness} = $sector->staleness;
}
+ $self->storage->set_inode;
+
return 1;
}
}
my @slots = $self->read_txn_slots;
- for my $i ( 1 .. @slots ) {
+ my $found;
+ for my $i ( 0 .. $#slots ) {
next if $slots[$i];
+
$slots[$i] = 1;
- $self->set_trans_id( $i );
+ $self->set_trans_id( $i + 1 );
+ $found = 1;
last;
}
+ unless ( $found ) {
+ DBM::Deep->_throw_error( "Cannot allocate transaction ID" );
+ }
$self->write_txn_slots( @slots );
if ( !$self->trans_id ) {
DBM::Deep->_throw_error( "Cannot rollback without an active transaction" );
}
- # Each entry is the file location for a bucket that has a modification for
- # this transaction. The entries need to be expunged.
- foreach my $entry (@{ $self->get_entries } ) {
- # Remove the entry here
- my $read_loc = $entry
- + $self->hash_size
- + $self->byte_size
- + $self->trans_id * ( $self->byte_size + 4 );
-
- my $data_loc = $self->storage->read_at( $read_loc, $self->byte_size );
- $data_loc = unpack( $StP{$self->byte_size}, $data_loc );
- $self->storage->print_at( $read_loc, pack( $StP{$self->byte_size}, 0 ) );
-
- if ( $data_loc > 1 ) {
- $self->_load_sector( $data_loc )->free;
- }
+ foreach my $entry ( @{ $self->get_entries } ) {
+ my ($sector, $idx) = split ':', $entry;
+ $self->_load_sector( $sector )->rollback( $idx );
}
$self->clear_entries;
my @slots = $self->read_txn_slots;
- $slots[$self->trans_id] = 0;
+ $slots[$self->trans_id-1] = 0;
$self->write_txn_slots( @slots );
$self->inc_txn_staleness_counter( $self->trans_id );
$self->set_trans_id( 0 );
DBM::Deep->_throw_error( "Cannot commit without an active transaction" );
}
- foreach my $entry (@{ $self->get_entries } ) {
- # Overwrite the entry in head with the entry in trans_id
- my $base = $entry
- + $self->hash_size
- + $self->byte_size;
-
- my $head_loc = $self->storage->read_at( $base, $self->byte_size );
- $head_loc = unpack( $StP{$self->byte_size}, $head_loc );
- my $trans_loc = $self->storage->read_at(
- $base + $self->trans_id * ( $self->byte_size + 4 ), $self->byte_size,
- );
-
- $self->storage->print_at( $base, $trans_loc );
- $self->storage->print_at(
- $base + $self->trans_id * ( $self->byte_size + 4 ),
- pack( $StP{$self->byte_size} . ' N', (0) x 2 ),
- );
-
- if ( $head_loc > 1 ) {
- $self->_load_sector( $head_loc )->free;
- }
+ foreach my $entry ( @{ $self->get_entries } ) {
+ my ($sector, $idx) = split ':', $entry;
+ $self->_load_sector( $sector )->commit( $idx );
}
$self->clear_entries;
my @slots = $self->read_txn_slots;
- $slots[$self->trans_id] = 0;
+ $slots[$self->trans_id-1] = 0;
$self->write_txn_slots( @slots );
$self->inc_txn_staleness_counter( $self->trans_id );
$self->set_trans_id( 0 );
sub read_txn_slots {
my $self = shift;
- return split '', unpack( 'b32',
- $self->storage->read_at(
- $self->trans_loc, 4,
- )
- );
+ return $self->_load_header->read_txn_slots(@_);
}
sub write_txn_slots {
my $self = shift;
- $self->storage->print_at( $self->trans_loc,
- pack( 'b32', join('', @_) ),
- );
+ return $self->_load_header->write_txn_slots(@_);
}
sub get_running_txn_ids {
my $self = shift;
my @transactions = $self->read_txn_slots;
- my @trans_ids = grep { $transactions[$_] } 0 .. $#transactions;
+ my @trans_ids = map { $_+1} grep { $transactions[$_] } 0 .. $#transactions;
}
sub get_txn_staleness_counter {
my $self = shift;
- my ($trans_id) = @_;
-
- # Hardcode staleness of 0 for the HEAD
- return 0 unless $trans_id;
-
- my $x = unpack( 'N',
- $self->storage->read_at(
- $self->trans_loc + 4 * $trans_id,
- 4,
- )
- );
- return $x;
+ return $self->_load_header->get_txn_staleness_counter(@_);
}
sub inc_txn_staleness_counter {
my $self = shift;
- my ($trans_id) = @_;
-
- # Hardcode staleness of 0 for the HEAD
- return unless $trans_id;
-
- $self->storage->print_at(
- $self->trans_loc + 4 * $trans_id,
- pack( 'N', $self->get_txn_staleness_counter( $trans_id ) + 1 ),
- );
+ return $self->_load_header->inc_txn_staleness_counter(@_);
}
sub get_entries {
sub add_entry {
my $self = shift;
- my ($trans_id, $loc) = @_;
+ my ($trans_id, $loc, $idx) = @_;
+
+ return unless $trans_id;
$self->{entries}{$trans_id} ||= {};
- $self->{entries}{$trans_id}{$loc} = undef;
+ $self->{entries}{$trans_id}{"$loc:$idx"} = undef;
}
# If the buckets are being relocated because of a reindexing, the entries
# mechanism needs to be made aware of it.
sub reindex_entry {
my $self = shift;
- my ($old_loc, $new_loc) = @_;
+ my ($old_loc, $old_idx, $new_loc, $new_idx) = @_;
TRANS:
while ( my ($trans_id, $locs) = each %{ $self->{entries} } ) {
- foreach my $orig_loc ( keys %{ $locs } ) {
- if ( $orig_loc == $old_loc ) {
- delete $locs->{orig_loc};
- $locs->{$new_loc} = undef;
- next TRANS;
- }
+ if ( exists $locs->{"$old_loc:$old_idx"} ) {
+ delete $locs->{"$old_loc:$old_idx"};
+ $locs->{"$new_loc:$new_idx"} = undef;
+ next TRANS;
}
}
}
################################################################################
-{
- my $header_fixed = length( SIG_FILE ) + 1 + 4 + 4;
+sub _apply_digest {
+ my $self = shift;
+ return $self->{digest}->(@_);
+}
- sub _write_file_header {
- my $self = shift;
+sub _add_free_blist_sector { shift->_add_free_sector( 0, @_ ) }
+sub _add_free_data_sector { shift->_add_free_sector( 1, @_ ) }
+sub _add_free_index_sector { shift->_add_free_sector( 2, @_ ) }
+sub _add_free_sector { shift->_load_header->add_free_sector( @_ ) }
- my $header_var = 1 + 1 + 1 + 4 + 4 * $self->num_txns + 3 * $self->byte_size;
-
- my $loc = $self->storage->request_space( $header_fixed + $header_var );
-
- $self->storage->print_at( $loc,
- SIG_FILE,
- SIG_HEADER,
- pack('N', 1), # header version - at this point, we're at 9 bytes
- pack('N', $header_var), # header size
- # --- Above is $header_fixed. Below is $header_var
- pack('C', $self->byte_size),
- pack('C', $self->max_buckets),
- pack('C', $self->num_txns),
- pack('N', 0 ), # Transaction activeness bitfield
- pack('N' . $self->num_txns, 0 x $self->num_txns ), # Transaction staleness counters
- pack($StP{$self->byte_size}, 0), # Start of free chain (blist size)
- pack($StP{$self->byte_size}, 0), # Start of free chain (data size)
- pack($StP{$self->byte_size}, 0), # Start of free chain (index size)
- );
+sub _request_blist_sector { shift->_request_sector( 0, @_ ) }
+sub _request_data_sector { shift->_request_sector( 1, @_ ) }
+sub _request_index_sector { shift->_request_sector( 2, @_ ) }
+sub _request_sector { shift->_load_header->request_sector( @_ ) }
+
+################################################################################
- $self->set_trans_loc( $header_fixed + 3 );
- $self->set_chains_loc( $header_fixed + 3 + 4 + 4 * $self->num_txns );
+{
+ my %t = (
+ SIG_ARRAY => 'Reference',
+ SIG_HASH => 'Reference',
+ SIG_BLIST => 'BucketList',
+ SIG_INDEX => 'Index',
+ SIG_NULL => 'Null',
+ SIG_DATA => 'Scalar',
+ );
- return;
+ my %class_for;
+ while ( my ($k,$v) = each %t ) {
+ $class_for{ DBM::Deep::Engine->$k } = "DBM::Deep::Engine::Sector::$v";
}
- sub _read_file_header {
+ sub load_sector {
my $self = shift;
+ my ($offset) = @_;
- my $buffer = $self->storage->read_at( 0, $header_fixed );
- return unless length($buffer);
-
- my ($file_signature, $sig_header, $header_version, $size) = unpack(
- 'A4 A N N', $buffer
- );
+ my $data = $self->get_data( $offset )
+ or return;#die "Cannot read from '$offset'\n";
+ my $type = substr( $$data, 0, 1 );
+ my $class = $class_for{ $type };
+ return $class->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ *_load_sector = \&load_sector;
- unless ( $file_signature eq SIG_FILE ) {
- $self->storage->close;
- DBM::Deep->_throw_error( "Signature not found -- file is not a Deep DB" );
- }
+ sub load_header {
+ my $self = shift;
- unless ( $sig_header eq SIG_HEADER ) {
- $self->storage->close;
- DBM::Deep->_throw_error( "Old file version found." );
- }
+ #XXX Does this mean we make too many objects? -RobK, 2008-06-23
+ return DBM::Deep::Engine::Sector::FileHeader->new({
+ engine => $self,
+ offset => 0,
+ });
+ }
+ *_load_header = \&load_header;
- my $buffer2 = $self->storage->read_at( undef, $size );
- my @values = unpack( 'C C C', $buffer2 );
+ sub get_data {
+ my $self = shift;
+ my ($offset, $size) = @_;
+ return unless defined $offset;
- if ( @values != 3 || grep { !defined } @values ) {
- $self->storage->close;
- DBM::Deep->_throw_error("Corrupted file - bad header");
- }
+ unless ( exists $self->sector_cache->{$offset} ) {
+ # Don't worry about the header sector. It will manage itself.
+ return unless $offset;
- $self->set_trans_loc( $header_fixed + scalar(@values) );
- $self->set_chains_loc( $header_fixed + scalar(@values) + 4 + 4 * $self->num_txns );
+ if ( !defined $size ) {
+ my $type = $self->storage->read_at( $offset, 1 )
+ or die "($offset): Cannot read from '$offset' to find the type\n";
- #XXX Add warnings if values weren't set right
- @{$self}{qw(byte_size max_buckets num_txns)} = @values;
+ if ( $type eq $self->SIG_FREE ) {
+ return;
+ }
- my $header_var = scalar(@values) + 4 + 4 * $self->num_txns + 3 * $self->byte_size;
- unless ( $size == $header_var ) {
- $self->storage->close;
- DBM::Deep->_throw_error( "Unexpected size found ($size <-> $header_var)." );
+ my $class = $class_for{$type}
+ or die "($offset): Cannot find class for '$type'\n";
+ $size = $class->size( $self )
+ or die "($offset): '$class' doesn't return a size\n";
+ $self->sector_cache->{$offset} = $type . $self->storage->read_at( undef, $size - 1 );
+ }
+ else {
+ $self->sector_cache->{$offset} = $self->storage->read_at( $offset, $size )
+ or return;
+ }
}
- return length($buffer) + length($buffer2);
+ return \$self->sector_cache->{$offset};
}
}
-sub _load_sector {
+sub sector_cache {
my $self = shift;
- my ($offset) = @_;
-
- # Add a catch for offset of 0 or 1
- return if $offset <= 1;
-
- my $type = $self->storage->read_at( $offset, 1 );
- return if $type eq chr(0);
-
- if ( $type eq $self->SIG_ARRAY || $type eq $self->SIG_HASH ) {
- return DBM::Deep::Engine::Sector::Reference->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- # XXX Don't we need key_md5 here?
- elsif ( $type eq $self->SIG_BLIST ) {
- return DBM::Deep::Engine::Sector::BucketList->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- elsif ( $type eq $self->SIG_INDEX ) {
- return DBM::Deep::Engine::Sector::Index->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- elsif ( $type eq $self->SIG_NULL ) {
- return DBM::Deep::Engine::Sector::Null->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- elsif ( $type eq $self->SIG_DATA ) {
- return DBM::Deep::Engine::Sector::Scalar->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- # This was deleted from under us, so just return and let the caller figure it out.
- elsif ( $type eq $self->SIG_FREE ) {
- return;
- }
+ return $self->{sector_cache} ||= {};
+}
- DBM::Deep->_throw_error( "'$offset': Don't know what to do with type '$type'" );
+sub clear_sector_cache {
+ my $self = shift;
+ $self->{sector_cache} = {};
}
-sub _apply_digest {
+sub dirty_sectors {
my $self = shift;
- return $self->{digest}->(@_);
+ return $self->{dirty_sectors} ||= {};
}
-sub _add_free_blist_sector { shift->_add_free_sector( 0, @_ ) }
-sub _add_free_data_sector { shift->_add_free_sector( 1, @_ ) }
-sub _add_free_index_sector { shift->_add_free_sector( 2, @_ ) }
+sub clear_dirty_sectors {
+ my $self = shift;
+ $self->{dirty_sectors} = {};
+}
-sub _add_free_sector {
+sub add_dirty_sector {
my $self = shift;
- my ($multiple, $offset, $size) = @_;
+ my ($offset) = @_;
- my $chains_offset = $multiple * $self->byte_size;
+ $self->dirty_sectors->{ $offset } = undef;
+}
- my $storage = $self->storage;
+sub flush {
+ my $self = shift;
- # Increment staleness.
- # XXX Can this increment+modulo be done by "&= 0x1" ?
- my $staleness = unpack( $StP{STALE_SIZE()}, $storage->read_at( $offset + SIG_SIZE, STALE_SIZE ) );
- $staleness = ($staleness + 1 ) % ( 2 ** ( 8 * STALE_SIZE ) );
- $storage->print_at( $offset + SIG_SIZE, pack( $StP{STALE_SIZE()}, $staleness ) );
+ my $sectors = $self->dirty_sectors;
+ for my $offset (sort { $a <=> $b } keys %{ $sectors }) {
+ $self->storage->print_at( $offset, $self->sector_cache->{$offset} );
+ }
- my $old_head = $storage->read_at( $self->chains_loc + $chains_offset, $self->byte_size );
+ # Why do we need to have the storage flush? Shouldn't autoflush take care of things?
+ # -RobK, 2008-06-26
+ $self->storage->flush;
- $storage->print_at( $self->chains_loc + $chains_offset,
- pack( $StP{$self->byte_size}, $offset ),
- );
+ $self->clear_dirty_sectors;
- # Record the old head in the new sector after the signature and staleness counter
- $storage->print_at( $offset + SIG_SIZE + STALE_SIZE, $old_head );
+ $self->clear_sector_cache;
}
-sub _request_blist_sector { shift->_request_sector( 0, @_ ) }
-sub _request_data_sector { shift->_request_sector( 1, @_ ) }
-sub _request_index_sector { shift->_request_sector( 2, @_ ) }
+################################################################################
-sub _request_sector {
+sub lock_exclusive {
my $self = shift;
- my ($multiple, $size) = @_;
-
- my $chains_offset = $multiple * $self->byte_size;
-
- my $old_head = $self->storage->read_at( $self->chains_loc + $chains_offset, $self->byte_size );
- my $loc = unpack( $StP{$self->byte_size}, $old_head );
+ my ($obj) = @_;
+ return $self->storage->lock_exclusive( $obj );
+}
- # We don't have any free sectors of the right size, so allocate a new one.
- unless ( $loc ) {
- my $offset = $self->storage->request_space( $size );
+sub lock_shared {
+ my $self = shift;
+ my ($obj) = @_;
+ return $self->storage->lock_shared( $obj );
+}
- # Zero out the new sector. This also guarantees correct increases
- # in the filesize.
- $self->storage->print_at( $offset, chr(0) x $size );
+sub unlock {
+ my $self = shift;
+ my ($obj) = @_;
- return $offset;
- }
+ my $rv = $self->storage->unlock( $obj );
- # Read the new head after the signature and the staleness counter
- my $new_head = $self->storage->read_at( $loc + SIG_SIZE + STALE_SIZE, $self->byte_size );
- $self->storage->print_at( $self->chains_loc + $chains_offset, $new_head );
- $self->storage->print_at(
- $loc + SIG_SIZE + STALE_SIZE,
- pack( $StP{$self->byte_size}, 0 ),
- );
+ $self->flush if $rv;
- return $loc;
+ return $rv;
}
################################################################################
sub num_txns { $_[0]{num_txns} }
sub max_buckets { $_[0]{max_buckets} }
sub blank_md5 { chr(0) x $_[0]->hash_size }
+sub data_sector_size { $_[0]{data_sector_size} }
+
+# This is a calculated value
+sub txn_bitfield_len {
+ my $self = shift;
+ unless ( exists $self->{txn_bitfield_len} ) {
+ my $temp = ($self->num_txns) / 8;
+ if ( $temp > int( $temp ) ) {
+ $temp = int( $temp ) + 1;
+ }
+ $self->{txn_bitfield_len} = $temp;
+ }
+ return $self->{txn_bitfield_len};
+}
sub trans_id { $_[0]{trans_id} }
sub set_trans_id { $_[0]{trans_id} = $_[1] }
sub chains_loc { $_[0]{chains_loc} }
sub set_chains_loc { $_[0]{chains_loc} = $_[1] }
-################################################################################
-
-package DBM::Deep::Iterator;
-
-sub new {
- my $class = shift;
- my ($args) = @_;
-
- my $self = bless {
- breadcrumbs => [],
- engine => $args->{engine},
- base_offset => $args->{base_offset},
- }, $class;
-
- Scalar::Util::weaken( $self->{engine} );
-
- return $self;
-}
-
-sub reset { $_[0]{breadcrumbs} = [] }
-
-sub get_sector_iterator {
- my $self = shift;
- my ($loc) = @_;
-
- my $sector = $self->{engine}->_load_sector( $loc )
- or return;
-
- if ( $sector->isa( 'DBM::Deep::Engine::Sector::Index' ) ) {
- return DBM::Deep::Iterator::Index->new({
- iterator => $self,
- sector => $sector,
- });
- }
- elsif ( $sector->isa( 'DBM::Deep::Engine::Sector::BucketList' ) ) {
- return DBM::Deep::Iterator::BucketList->new({
- iterator => $self,
- sector => $sector,
- });
- }
-
- DBM::Deep->_throw_error( "get_sector_iterator(): Why did $loc make a $sector?" );
-}
+sub cache { $_[0]{cache} ||= {} }
+sub clear_cache { %{$_[0]->cache} = () }
-sub get_next_key {
+sub _dump_file {
my $self = shift;
- my ($obj) = @_;
+ $self->flush;
- my $crumbs = $self->{breadcrumbs};
- my $e = $self->{engine};
+ # Read the header
+ my $header_sector = DBM::Deep::Engine::Sector::FileHeader->new({
+ engine => $self,
+ });
- unless ( @$crumbs ) {
- # This will be a Reference sector
- my $sector = $e->_load_sector( $self->{base_offset} )
- # If no sector is found, thist must have been deleted from under us.
- or return;
+ my %types = (
+ 0 => 'B',
+ 1 => 'D',
+ 2 => 'I',
+ );
- if ( $sector->staleness != $obj->_staleness ) {
- return;
- }
+ my %sizes = (
+ 'D' => $self->data_sector_size,
+ 'B' => DBM::Deep::Engine::Sector::BucketList->new({engine=>$self,offset=>1})->size,
+ 'I' => DBM::Deep::Engine::Sector::Index->new({engine=>$self,offset=>1})->size,
+ );
- my $loc = $sector->get_blist_loc
- or return;
+ my $return = "";
- push @$crumbs, $self->get_sector_iterator( $loc );
- }
+ # Filesize
+ $return .= "Size: " . (-s $self->storage->{fh}) . $/;
- FIND_NEXT_KEY: {
- # We're at the end.
- unless ( @$crumbs ) {
- $self->reset;
- return;
- }
+ # Header values
+ $return .= "NumTxns: " . $self->num_txns . $/;
- my $iterator = $crumbs->[-1];
+ # Read the free sector chains
+ my %sectors;
+ foreach my $multiple ( 0 .. 2 ) {
+ $return .= "Chains($types{$multiple}):";
+ my $old_loc = $self->chains_loc + $multiple * $self->byte_size;
+ while ( 1 ) {
+ my $loc = unpack(
+ $StP{$self->byte_size},
+ $self->storage->read_at( $old_loc, $self->byte_size ),
+ );
- # This level is done.
- if ( $iterator->at_end ) {
- pop @$crumbs;
- redo FIND_NEXT_KEY;
- }
+ # We're now out of free sectors of this kind.
+ unless ( $loc ) {
+ last;
+ }
- if ( $iterator->isa( 'DBM::Deep::Iterator::Index' ) ) {
- # If we don't have any more, it will be caught at the
- # prior check.
- if ( my $next = $iterator->get_next_iterator ) {
- push @$crumbs, $next;
+ $sectors{ $types{$multiple} }{ $loc } = undef;
+ $old_loc = $loc + SIG_SIZE + $STALE_SIZE;
+ $return .= " $loc";
+ }
+ $return .= $/;
+ }
+
+ my $spot = $header_sector->size;
+ SECTOR:
+ while ( $spot < $self->storage->{end} ) {
+ # Read each sector in order.
+ my $sector = $self->_load_sector( $spot );
+ if ( !$sector ) {
+ # Find it in the free-sectors that were found already
+ foreach my $type ( keys %sectors ) {
+ if ( exists $sectors{$type}{$spot} ) {
+ my $size = $sizes{$type};
+ $return .= sprintf "%08d: %s %04d\n", $spot, 'F' . $type, $size;
+ $spot += $size;
+ next SECTOR;
+ }
}
- redo FIND_NEXT_KEY;
- }
- unless ( $iterator->isa( 'DBM::Deep::Iterator::BucketList' ) ) {
- DBM::Deep->_throw_error(
- "Should have a bucketlist iterator here - instead have $iterator"
- );
+ die "********\n$return\nDidn't find free sector for $spot in chains\n********\n";
}
+ else {
+ $return .= sprintf "%08d: %s %04d", $spot, $sector->type, $sector->size;
+ if ( $sector->type eq 'D' ) {
+ $return .= ' ' . $sector->data;
+ }
+ elsif ( $sector->type eq 'A' || $sector->type eq 'H' ) {
+ $return .= ' REF: ' . $sector->get_refcount;
+ }
+ elsif ( $sector->type eq 'B' ) {
+ foreach my $bucket ( $sector->chopped_up ) {
+ $return .= "\n ";
+ $return .= sprintf "%08d", unpack($StP{$self->byte_size},
+ substr( $bucket->[-1], $self->hash_size, $self->byte_size),
+ );
+ my $l = unpack( $StP{$self->byte_size},
+ substr( $bucket->[-1],
+ $self->hash_size + $self->byte_size,
+ $self->byte_size,
+ ),
+ );
+ $return .= sprintf " %08d", $l;
+ foreach my $txn ( 0 .. $self->num_txns - 2 ) {
+ my $l = unpack( $StP{$self->byte_size},
+ substr( $bucket->[-1],
+ $self->hash_size + 2 * $self->byte_size + $txn * ($self->byte_size + $STALE_SIZE),
+ $self->byte_size,
+ ),
+ );
+ $return .= sprintf " %08d", $l;
+ }
+ }
+ }
+ $return .= $/;
- # At this point, we have a BucketList iterator
- my $key = $iterator->get_next_key;
- if ( defined $key ) {
- return $key;
+ $spot += $sector->size;
}
- #XXX else { $iterator->set_to_end() } ?
-
- # We hit the end of the bucketlist iterator, so redo
- redo FIND_NEXT_KEY;
}
- DBM::Deep->_throw_error( "get_next_key(): How did we get here?" );
-}
-
-package DBM::Deep::Iterator::Index;
-
-sub new {
- my $self = bless $_[1] => $_[0];
- $self->{curr_index} = 0;
- return $self;
-}
-
-sub at_end {
- my $self = shift;
- return $self->{curr_index} >= $self->{iterator}{engine}->hash_chars;
-}
-
-sub get_next_iterator {
- my $self = shift;
-
- my $loc;
- while ( !$loc ) {
- return if $self->at_end;
- $loc = $self->{sector}->get_entry( $self->{curr_index}++ );
- }
-
- return $self->{iterator}->get_sector_iterator( $loc );
-}
-
-package DBM::Deep::Iterator::BucketList;
-
-sub new {
- my $self = bless $_[1] => $_[0];
- $self->{curr_index} = 0;
- return $self;
-}
-
-sub at_end {
- my $self = shift;
- return $self->{curr_index} >= $self->{iterator}{engine}->max_buckets;
-}
-
-sub get_next_key {
- my $self = shift;
-
- return if $self->at_end;
-
- my $idx = $self->{curr_index}++;
-
- my $data_loc = $self->{sector}->get_data_location_for({
- allow_head => 1,
- idx => $idx,
- }) or return;
-
- #XXX Do we want to add corruption checks here?
- return $self->{sector}->get_key_for( $idx )->data;
-}
-
-package DBM::Deep::Engine::Sector;
-
-sub new {
- my $self = bless $_[1], $_[0];
- Scalar::Util::weaken( $self->{engine} );
- $self->_init;
- return $self;
-}
-
-#sub _init {}
-#sub clone { DBM::Deep->_throw_error( "Must be implemented in the child class" ); }
-
-sub engine { $_[0]{engine} }
-sub offset { $_[0]{offset} }
-sub type { $_[0]{type} }
-
-sub base_size {
- my $self = shift;
- return $self->engine->SIG_SIZE + $self->engine->STALE_SIZE;
-}
-
-sub free {
- my $self = shift;
-
- my $e = $self->engine;
-
- $e->storage->print_at( $self->offset, $e->SIG_FREE );
- # Skip staleness counter
- $e->storage->print_at( $self->offset + $self->base_size,
- chr(0) x ($self->size - $self->base_size),
- );
-
- my $free_meth = $self->free_meth;
- $e->$free_meth( $self->offset, $self->size );
-
- return;
-}
-
-package DBM::Deep::Engine::Sector::Data;
-
-our @ISA = qw( DBM::Deep::Engine::Sector );
-
-# This is in bytes
-sub size { return 256 }
-sub free_meth { return '_add_free_data_sector' }
-
-sub clone {
- my $self = shift;
- return ref($self)->new({
- engine => $self->engine,
- data => $self->data,
- type => $self->type,
- });
-}
-
-package DBM::Deep::Engine::Sector::Scalar;
-
-our @ISA = qw( DBM::Deep::Engine::Sector::Data );
-
-sub free {
- my $self = shift;
-
- my $chain_loc = $self->chain_loc;
-
- $self->SUPER::free();
-
- if ( $chain_loc ) {
- $self->engine->_load_sector( $chain_loc )->free;
- }
-
- return;
-}
-
-sub type { $_[0]{engine}->SIG_DATA }
-sub _init {
- my $self = shift;
-
- my $engine = $self->engine;
-
- unless ( $self->offset ) {
- my $data_section = $self->size - $self->base_size - 1 * $engine->byte_size - 1;
-
- $self->{offset} = $engine->_request_data_sector( $self->size );
-
- my $data = delete $self->{data};
- my $dlen = length $data;
- my $continue = 1;
- my $curr_offset = $self->offset;
- while ( $continue ) {
-
- my $next_offset = 0;
-
- my ($leftover, $this_len, $chunk);
- if ( $dlen > $data_section ) {
- $leftover = 0;
- $this_len = $data_section;
- $chunk = substr( $data, 0, $this_len );
-
- $dlen -= $data_section;
- $next_offset = $engine->_request_data_sector( $self->size );
- $data = substr( $data, $this_len );
- }
- else {
- $leftover = $data_section - $dlen;
- $this_len = $dlen;
- $chunk = $data;
-
- $continue = 0;
- }
-
- $engine->storage->print_at( $curr_offset, $self->type ); # Sector type
- # Skip staleness
- $engine->storage->print_at( $curr_offset + $self->base_size,
- pack( $StP{$engine->byte_size}, $next_offset ), # Chain loc
- pack( $StP{1}, $this_len ), # Data length
- $chunk, # Data to be stored in this sector
- chr(0) x $leftover, # Zero-fill the rest
- );
-
- $curr_offset = $next_offset;
- }
-
- return;
- }
-}
-
-sub data_length {
- my $self = shift;
-
- my $buffer = $self->engine->storage->read_at(
- $self->offset + $self->base_size + $self->engine->byte_size, 1
- );
-
- return unpack( $StP{1}, $buffer );
-}
-
-sub chain_loc {
- my $self = shift;
- return unpack(
- $StP{$self->engine->byte_size},
- $self->engine->storage->read_at(
- $self->offset + $self->base_size,
- $self->engine->byte_size,
- ),
- );
-}
-
-sub data {
- my $self = shift;
-
- my $data;
- while ( 1 ) {
- my $chain_loc = $self->chain_loc;
-
- $data .= $self->engine->storage->read_at(
- $self->offset + $self->base_size + $self->engine->byte_size + 1, $self->data_length,
- );
-
- last unless $chain_loc;
-
- $self = $self->engine->_load_sector( $chain_loc );
- }
-
- return $data;
-}
-
-package DBM::Deep::Engine::Sector::Null;
-
-our @ISA = qw( DBM::Deep::Engine::Sector::Data );
-
-sub type { $_[0]{engine}->SIG_NULL }
-sub data_length { 0 }
-sub data { return }
-
-sub _init {
- my $self = shift;
-
- my $engine = $self->engine;
-
- unless ( $self->offset ) {
- my $leftover = $self->size - $self->base_size - 1 * $engine->byte_size - 1;
-
- $self->{offset} = $engine->_request_data_sector( $self->size );
- $engine->storage->print_at( $self->offset, $self->type ); # Sector type
- # Skip staleness counter
- $engine->storage->print_at( $self->offset + $self->base_size,
- pack( $StP{$engine->byte_size}, 0 ), # Chain loc
- pack( $StP{1}, $self->data_length ), # Data length
- chr(0) x $leftover, # Zero-fill the rest
- );
-
- return;
- }
-}
-
-package DBM::Deep::Engine::Sector::Reference;
-
-our @ISA = qw( DBM::Deep::Engine::Sector::Data );
-
-sub _init {
- my $self = shift;
-
- my $e = $self->engine;
-
- unless ( $self->offset ) {
- my $classname = Scalar::Util::blessed( delete $self->{data} );
- my $leftover = $self->size - $self->base_size - 2 * $e->byte_size;
-
- my $class_offset = 0;
- if ( defined $classname ) {
- my $class_sector = DBM::Deep::Engine::Sector::Scalar->new({
- engine => $e,
- data => $classname,
- });
- $class_offset = $class_sector->offset;
- }
-
- $self->{offset} = $e->_request_data_sector( $self->size );
- $e->storage->print_at( $self->offset, $self->type ); # Sector type
- # Skip staleness counter
- $e->storage->print_at( $self->offset + $self->base_size,
- pack( $StP{$e->byte_size}, 0 ), # Index/BList loc
- pack( $StP{$e->byte_size}, $class_offset ), # Classname loc
- chr(0) x $leftover, # Zero-fill the rest
- );
- }
- else {
- $self->{type} = $e->storage->read_at( $self->offset, 1 );
- }
-
- $self->{staleness} = unpack(
- $StP{$e->STALE_SIZE},
- $e->storage->read_at( $self->offset + $e->SIG_SIZE, $e->STALE_SIZE ),
- );
-
- return;
-}
-
-sub free {
- my $self = shift;
-
- my $blist_loc = $self->get_blist_loc;
- $self->engine->_load_sector( $blist_loc )->free if $blist_loc;
-
- my $class_loc = $self->get_class_offset;
- $self->engine->_load_sector( $class_loc )->free if $class_loc;
-
- $self->SUPER::free();
-}
-
-sub staleness { $_[0]{staleness} }
-
-sub get_data_for {
- my $self = shift;
- my ($args) = @_;
-
- # Assume that the head is not allowed unless otherwise specified.
- $args->{allow_head} = 0 unless exists $args->{allow_head};
-
- # Assume we don't create a new blist location unless otherwise specified.
- $args->{create} = 0 unless exists $args->{create};
-
- my $blist = $self->get_bucket_list({
- key_md5 => $args->{key_md5},
- key => $args->{key},
- create => $args->{create},
- });
- return unless $blist && $blist->{found};
-
- # At this point, $blist knows where the md5 is. What it -doesn't- know yet
- # is whether or not this transaction has this key. That's part of the next
- # function call.
- my $location = $blist->get_data_location_for({
- allow_head => $args->{allow_head},
- }) or return;
-
- return $self->engine->_load_sector( $location );
-}
-
-sub write_data {
- my $self = shift;
- my ($args) = @_;
-
- my $blist = $self->get_bucket_list({
- key_md5 => $args->{key_md5},
- key => $args->{key},
- create => 1,
- }) or DBM::Deep->_throw_error( "How did write_data fail (no blist)?!" );
-
- # Handle any transactional bookkeeping.
- if ( $self->engine->trans_id ) {
- if ( ! $blist->has_md5 ) {
- $blist->mark_deleted({
- trans_id => 0,
- });
- }
- }
- else {
- my @trans_ids = $self->engine->get_running_txn_ids;
- if ( $blist->has_md5 ) {
- if ( @trans_ids ) {
- my $old_value = $blist->get_data_for;
- foreach my $other_trans_id ( @trans_ids ) {
- next if $blist->get_data_location_for({
- trans_id => $other_trans_id,
- allow_head => 0,
- });
- $blist->write_md5({
- trans_id => $other_trans_id,
- key => $args->{key},
- key_md5 => $args->{key_md5},
- value => $old_value->clone,
- });
- }
- }
- }
- else {
- if ( @trans_ids ) {
- foreach my $other_trans_id ( @trans_ids ) {
- #XXX This doesn't seem to possible to ever happen . . .
- next if $blist->get_data_location_for({ trans_id => $other_trans_id, allow_head => 0 });
- $blist->mark_deleted({
- trans_id => $other_trans_id,
- });
- }
- }
- }
- }
-
- #XXX Is this safe to do transactionally?
- # Free the place we're about to write to.
- if ( $blist->get_data_location_for({ allow_head => 0 }) ) {
- $blist->get_data_for({ allow_head => 0 })->free;
- }
-
- $blist->write_md5({
- key => $args->{key},
- key_md5 => $args->{key_md5},
- value => $args->{value},
- });
-}
-
-sub delete_key {
- my $self = shift;
- my ($args) = @_;
-
- # XXX What should happen if this fails?
- my $blist = $self->get_bucket_list({
- key_md5 => $args->{key_md5},
- }) or DBM::Deep->_throw_error( "How did delete_key fail (no blist)?!" );
-
- # Save the location so that we can free the data
- my $location = $blist->get_data_location_for({
- allow_head => 0,
- });
- my $old_value = $location && $self->engine->_load_sector( $location );
-
- my @trans_ids = $self->engine->get_running_txn_ids;
-
- if ( $self->engine->trans_id == 0 ) {
- if ( @trans_ids ) {
- foreach my $other_trans_id ( @trans_ids ) {
- next if $blist->get_data_location_for({ trans_id => $other_trans_id, allow_head => 0 });
- $blist->write_md5({
- trans_id => $other_trans_id,
- key => $args->{key},
- key_md5 => $args->{key_md5},
- value => $old_value->clone,
- });
- }
- }
- }
-
- my $data;
- if ( @trans_ids ) {
- $blist->mark_deleted( $args );
-
- if ( $old_value ) {
- $data = $old_value->data;
- $old_value->free;
- }
- }
- else {
- $data = $blist->delete_md5( $args );
- }
-
- return $data;
-}
-
-sub get_blist_loc {
- my $self = shift;
-
- my $e = $self->engine;
- my $blist_loc = $e->storage->read_at( $self->offset + $self->base_size, $e->byte_size );
- return unpack( $StP{$e->byte_size}, $blist_loc );
-}
-
-sub get_bucket_list {
- my $self = shift;
- my ($args) = @_;
- $args ||= {};
-
- # XXX Add in check here for recycling?
-
- my $engine = $self->engine;
-
- my $blist_loc = $self->get_blist_loc;
-
- # There's no index or blist yet
- unless ( $blist_loc ) {
- return unless $args->{create};
-
- my $blist = DBM::Deep::Engine::Sector::BucketList->new({
- engine => $engine,
- key_md5 => $args->{key_md5},
- });
-
- $engine->storage->print_at( $self->offset + $self->base_size,
- pack( $StP{$engine->byte_size}, $blist->offset ),
- );
-
- return $blist;
- }
-
- my $sector = $engine->_load_sector( $blist_loc )
- or DBM::Deep->_throw_error( "Cannot read sector at $blist_loc in get_bucket_list()" );
- my $i = 0;
- my $last_sector = undef;
- while ( $sector->isa( 'DBM::Deep::Engine::Sector::Index' ) ) {
- $blist_loc = $sector->get_entry( ord( substr( $args->{key_md5}, $i++, 1 ) ) );
- $last_sector = $sector;
- if ( $blist_loc ) {
- $sector = $engine->_load_sector( $blist_loc )
- or DBM::Deep->_throw_error( "Cannot read sector at $blist_loc in get_bucket_list()" );
- }
- else {
- $sector = undef;
- last;
- }
- }
-
- # This means we went through the Index sector(s) and found an empty slot
- unless ( $sector ) {
- return unless $args->{create};
-
- DBM::Deep->_throw_error( "No last_sector when attempting to build a new entry" )
- unless $last_sector;
-
- my $blist = DBM::Deep::Engine::Sector::BucketList->new({
- engine => $engine,
- key_md5 => $args->{key_md5},
- });
-
- $last_sector->set_entry( ord( substr( $args->{key_md5}, $i - 1, 1 ) ) => $blist->offset );
-
- return $blist;
- }
-
- $sector->find_md5( $args->{key_md5} );
-
- # See whether or not we need to reindex the bucketlist
- if ( !$sector->has_md5 && $args->{create} && $sector->{idx} == -1 ) {
- my $new_index = DBM::Deep::Engine::Sector::Index->new({
- engine => $engine,
- });
-
- my %blist_cache;
- #XXX q.v. the comments for this function.
- foreach my $entry ( $sector->chopped_up ) {
- my ($spot, $md5) = @{$entry};
- my $idx = ord( substr( $md5, $i, 1 ) );
-
- # XXX This is inefficient
- my $blist = $blist_cache{$idx}
- ||= DBM::Deep::Engine::Sector::BucketList->new({
- engine => $engine,
- });
-
- $new_index->set_entry( $idx => $blist->offset );
-
- my $new_spot = $blist->write_at_next_open( $md5 );
- $engine->reindex_entry( $spot => $new_spot );
- }
-
- # Handle the new item separately.
- {
- my $idx = ord( substr( $args->{key_md5}, $i, 1 ) );
- my $blist = $blist_cache{$idx}
- ||= DBM::Deep::Engine::Sector::BucketList->new({
- engine => $engine,
- });
-
- $new_index->set_entry( $idx => $blist->offset );
-
- #XXX THIS IS HACKY!
- $blist->find_md5( $args->{key_md5} );
- $blist->write_md5({
- key => $args->{key},
- key_md5 => $args->{key_md5},
- value => DBM::Deep::Engine::Sector::Null->new({
- engine => $engine,
- data => undef,
- }),
- });
- }
-
- if ( $last_sector ) {
- $last_sector->set_entry(
- ord( substr( $args->{key_md5}, $i - 1, 1 ) ),
- $new_index->offset,
- );
- } else {
- $engine->storage->print_at( $self->offset + $self->base_size,
- pack( $StP{$engine->byte_size}, $new_index->offset ),
- );
- }
-
- $sector->free;
-
- $sector = $blist_cache{ ord( substr( $args->{key_md5}, $i, 1 ) ) };
- $sector->find_md5( $args->{key_md5} );
- }
-
- return $sector;
-}
-
-sub get_class_offset {
- my $self = shift;
-
- my $e = $self->engine;
- return unpack(
- $StP{$e->byte_size},
- $e->storage->read_at(
- $self->offset + $self->base_size + 1 * $e->byte_size, $e->byte_size,
- ),
- );
-}
-
-sub get_classname {
- my $self = shift;
-
- my $class_offset = $self->get_class_offset;
-
- return unless $class_offset;
-
- return $self->engine->_load_sector( $class_offset )->data;
-}
-
-#XXX Add singleton handling here
-sub data {
- my $self = shift;
-
- my $new_obj = DBM::Deep->new({
- type => $self->type,
- base_offset => $self->offset,
- staleness => $self->staleness,
- storage => $self->engine->storage,
- engine => $self->engine,
- });
-
- if ( $self->engine->storage->{autobless} ) {
- my $classname = $self->get_classname;
- if ( defined $classname ) {
- bless $new_obj, $classname;
- }
- }
-
- return $new_obj;
-}
-
-package DBM::Deep::Engine::Sector::BucketList;
-
-our @ISA = qw( DBM::Deep::Engine::Sector );
-
-sub _init {
- my $self = shift;
-
- my $engine = $self->engine;
-
- unless ( $self->offset ) {
- my $leftover = $self->size - $self->base_size;
-
- $self->{offset} = $engine->_request_blist_sector( $self->size );
- $engine->storage->print_at( $self->offset, $engine->SIG_BLIST ); # Sector type
- # Skip staleness counter
- $engine->storage->print_at( $self->offset + $self->base_size,
- chr(0) x $leftover, # Zero-fill the data
- );
- }
-
- if ( $self->{key_md5} ) {
- $self->find_md5;
- }
-
- return $self;
-}
-
-sub size {
- my $self = shift;
- unless ( $self->{size} ) {
- my $e = $self->engine;
- # Base + numbuckets * bucketsize
- $self->{size} = $self->base_size + $e->max_buckets * $self->bucket_size;
- }
- return $self->{size};
-}
-
-sub free_meth { return '_add_free_blist_sector' }
-
-sub bucket_size {
- my $self = shift;
- unless ( $self->{bucket_size} ) {
- my $e = $self->engine;
- # Key + head (location) + transactions (location + staleness-counter)
- my $location_size = $e->byte_size + $e->num_txns * ( $e->byte_size + 4 );
- $self->{bucket_size} = $e->hash_size + $location_size;
- }
- return $self->{bucket_size};
-}
-
-# XXX This is such a poor hack. I need to rethink this code.
-sub chopped_up {
- my $self = shift;
-
- my $e = $self->engine;
-
- my @buckets;
- foreach my $idx ( 0 .. $e->max_buckets - 1 ) {
- my $spot = $self->offset + $self->base_size + $idx * $self->bucket_size;
- my $md5 = $e->storage->read_at( $spot, $e->hash_size );
-
- #XXX If we're chopping, why would we ever have the blank_md5?
- last if $md5 eq $e->blank_md5;
-
- my $rest = $e->storage->read_at( undef, $self->bucket_size - $e->hash_size );
- push @buckets, [ $spot, $md5 . $rest ];
- }
-
- return @buckets;
-}
-
-sub write_at_next_open {
- my $self = shift;
- my ($entry) = @_;
-
- #XXX This is such a hack!
- $self->{_next_open} = 0 unless exists $self->{_next_open};
-
- my $spot = $self->offset + $self->base_size + $self->{_next_open}++ * $self->bucket_size;
- $self->engine->storage->print_at( $spot, $entry );
-
- return $spot;
-}
-
-sub has_md5 {
- my $self = shift;
- unless ( exists $self->{found} ) {
- $self->find_md5;
- }
- return $self->{found};
-}
-
-sub find_md5 {
- my $self = shift;
-
- $self->{found} = undef;
- $self->{idx} = -1;
-
- if ( @_ ) {
- $self->{key_md5} = shift;
- }
-
- # If we don't have an MD5, then what are we supposed to do?
- unless ( exists $self->{key_md5} ) {
- DBM::Deep->_throw_error( "Cannot find_md5 without a key_md5 set" );
- }
-
- my $e = $self->engine;
- foreach my $idx ( 0 .. $e->max_buckets - 1 ) {
- my $potential = $e->storage->read_at(
- $self->offset + $self->base_size + $idx * $self->bucket_size, $e->hash_size,
- );
-
- if ( $potential eq $e->blank_md5 ) {
- $self->{idx} = $idx;
- return;
- }
-
- if ( $potential eq $self->{key_md5} ) {
- $self->{found} = 1;
- $self->{idx} = $idx;
- return;
- }
- }
-
- return;
-}
-
-sub write_md5 {
- my $self = shift;
- my ($args) = @_;
-
- DBM::Deep->_throw_error( "write_md5: no key" ) unless exists $args->{key};
- DBM::Deep->_throw_error( "write_md5: no key_md5" ) unless exists $args->{key_md5};
- DBM::Deep->_throw_error( "write_md5: no value" ) unless exists $args->{value};
-
- my $engine = $self->engine;
-
- $args->{trans_id} = $engine->trans_id unless exists $args->{trans_id};
-
- my $spot = $self->offset + $self->base_size + $self->{idx} * $self->bucket_size;
- $engine->add_entry( $args->{trans_id}, $spot );
-
- unless ($self->{found}) {
- my $key_sector = DBM::Deep::Engine::Sector::Scalar->new({
- engine => $engine,
- data => $args->{key},
- });
-
- $engine->storage->print_at( $spot,
- $args->{key_md5},
- pack( $StP{$engine->byte_size}, $key_sector->offset ),
- );
- }
-
- my $loc = $spot
- + $engine->hash_size
- + $engine->byte_size
- + $args->{trans_id} * ( $engine->byte_size + 4 );
-
- $engine->storage->print_at( $loc,
- pack( $StP{$engine->byte_size}, $args->{value}->offset ),
- pack( 'N', $engine->get_txn_staleness_counter( $args->{trans_id} ) ),
- );
-}
-
-sub mark_deleted {
- my $self = shift;
- my ($args) = @_;
- $args ||= {};
-
- my $engine = $self->engine;
-
- $args->{trans_id} = $engine->trans_id unless exists $args->{trans_id};
-
- my $spot = $self->offset + $self->base_size + $self->{idx} * $self->bucket_size;
- $engine->add_entry( $args->{trans_id}, $spot );
-
- my $loc = $spot
- + $engine->hash_size
- + $engine->byte_size
- + $args->{trans_id} * ( $engine->byte_size + 4 );
-
- $engine->storage->print_at( $loc,
- pack( $StP{$engine->byte_size}, 1 ), # 1 is the marker for deleted
- pack( 'N', $engine->get_txn_staleness_counter( $args->{trans_id} ) ),
- );
-}
-
-sub delete_md5 {
- my $self = shift;
- my ($args) = @_;
-
- my $engine = $self->engine;
- return undef unless $self->{found};
-
- # Save the location so that we can free the data
- my $location = $self->get_data_location_for({
- allow_head => 0,
- });
- my $key_sector = $self->get_key_for;
-
- my $spot = $self->offset + $self->base_size + $self->{idx} * $self->bucket_size;
- $engine->storage->print_at( $spot,
- $engine->storage->read_at(
- $spot + $self->bucket_size,
- $self->bucket_size * ( $engine->max_buckets - $self->{idx} - 1 ),
- ),
- chr(0) x $self->bucket_size,
- );
-
- $key_sector->free;
-
- my $data_sector = $self->engine->_load_sector( $location );
- my $data = $data_sector->data;
- $data_sector->free;
-
- return $data;
-}
-
-sub get_data_location_for {
- my $self = shift;
- my ($args) = @_;
- $args ||= {};
-
- $args->{allow_head} = 0 unless exists $args->{allow_head};
- $args->{trans_id} = $self->engine->trans_id unless exists $args->{trans_id};
- $args->{idx} = $self->{idx} unless exists $args->{idx};
-
- my $e = $self->engine;
-
- my $spot = $self->offset + $self->base_size
- + $args->{idx} * $self->bucket_size
- + $e->hash_size
- + $e->byte_size
- + $args->{trans_id} * ( $e->byte_size + 4 );
-
- my $buffer = $e->storage->read_at(
- $spot,
- $e->byte_size + 4,
- );
- my ($loc, $staleness) = unpack( $StP{$e->byte_size} . ' N', $buffer );
-
- # We have found an entry that is old, so get rid of it
- if ( $staleness != (my $s = $e->get_txn_staleness_counter( $args->{trans_id} ) ) ) {
- $e->storage->print_at(
- $spot,
- pack( $StP{$e->byte_size} . ' N', (0) x 2 ),
- );
- $loc = 0;
- }
-
- # If we're in a transaction and we never wrote to this location, try the
- # HEAD instead.
- if ( $args->{trans_id} && !$loc && $args->{allow_head} ) {
- return $self->get_data_location_for({
- trans_id => 0,
- allow_head => 1,
- idx => $args->{idx},
- });
- }
- return $loc <= 1 ? 0 : $loc;
-}
-
-sub get_data_for {
- my $self = shift;
- my ($args) = @_;
- $args ||= {};
-
- return unless $self->{found};
- my $location = $self->get_data_location_for({
- allow_head => $args->{allow_head},
- });
- return $self->engine->_load_sector( $location );
-}
-
-sub get_key_for {
- my $self = shift;
- my ($idx) = @_;
- $idx = $self->{idx} unless defined $idx;
-
- if ( $idx >= $self->engine->max_buckets ) {
- DBM::Deep->_throw_error( "get_key_for(): Attempting to retrieve $idx" );
- }
-
- my $location = $self->engine->storage->read_at(
- $self->offset + $self->base_size + $idx * $self->bucket_size + $self->engine->hash_size,
- $self->engine->byte_size,
- );
- $location = unpack( $StP{$self->engine->byte_size}, $location );
- DBM::Deep->_throw_error( "get_key_for: No location?" ) unless $location;
-
- return $self->engine->_load_sector( $location );
-}
-
-package DBM::Deep::Engine::Sector::Index;
-
-our @ISA = qw( DBM::Deep::Engine::Sector );
-
-sub _init {
- my $self = shift;
-
- my $engine = $self->engine;
-
- unless ( $self->offset ) {
- my $leftover = $self->size - $self->base_size;
-
- $self->{offset} = $engine->_request_index_sector( $self->size );
- $engine->storage->print_at( $self->offset, $engine->SIG_INDEX ); # Sector type
- # Skip staleness counter
- $engine->storage->print_at( $self->offset + $self->base_size,
- chr(0) x $leftover, # Zero-fill the rest
- );
- }
-
- return $self;
-}
-
-sub size {
- my $self = shift;
- unless ( $self->{size} ) {
- my $e = $self->engine;
- $self->{size} = $self->base_size + $e->byte_size * $e->hash_chars;
- }
- return $self->{size};
-}
-
-sub free_meth { return '_add_free_index_sector' }
-
-sub free {
- my $self = shift;
- my $e = $self->engine;
-
- for my $i ( 0 .. $e->hash_chars - 1 ) {
- my $l = $self->get_entry( $i ) or next;
- $e->_load_sector( $l )->free;
- }
-
- $self->SUPER::free();
-}
-
-sub _loc_for {
- my $self = shift;
- my ($idx) = @_;
- return $self->offset + $self->base_size + $idx * $self->engine->byte_size;
-}
-
-sub get_entry {
- my $self = shift;
- my ($idx) = @_;
-
- my $e = $self->engine;
-
- DBM::Deep->_throw_error( "get_entry: Out of range ($idx)" )
- if $idx < 0 || $idx >= $e->hash_chars;
-
- return unpack(
- $StP{$e->byte_size},
- $e->storage->read_at( $self->_loc_for( $idx ), $e->byte_size ),
- );
-}
-
-sub set_entry {
- my $self = shift;
- my ($idx, $loc) = @_;
-
- my $e = $self->engine;
-
- DBM::Deep->_throw_error( "set_entry: Out of range ($idx)" )
- if $idx < 0 || $idx >= $e->hash_chars;
-
- $self->engine->storage->print_at(
- $self->_loc_for( $idx ),
- pack( $StP{$e->byte_size}, $loc ),
- );
+ return $return;
}
1;