return 1 if $obj->_base_offset;
- my $header = DBM::Deep::Engine::Sector::FileHeader->new({
- engine => $self,
- });
+ my $header = $self->_load_header;
# Creating a new file
if ( $header->is_new ) {
DBM::Deep->_throw_error( "Cannot rollback without an active transaction" );
}
- # Each entry is the file location for a bucket that has a modification for
- # this transaction. The entries need to be expunged.
- foreach my $entry (@{ $self->get_entries } ) {
- # Remove the entry here
- my $read_loc = $entry
- + $self->hash_size
- + $self->byte_size
- + $self->byte_size
- + ($self->trans_id - 1) * ( $self->byte_size + $STALE_SIZE );
-
- my $data_loc = $self->storage->read_at( $read_loc, $self->byte_size );
- $data_loc = unpack( $StP{$self->byte_size}, $data_loc );
- $self->storage->print_at( $read_loc, pack( $StP{$self->byte_size}, 0 ) );
-
- if ( $data_loc > 1 ) {
- $self->_load_sector( $data_loc )->free;
- }
+ foreach my $entry ( @{ $self->get_entries } ) {
+ my ($sector, $idx) = split ':', $entry;
+ $self->_load_sector( $sector )->rollback( $idx );
}
$self->clear_entries;
DBM::Deep->_throw_error( "Cannot commit without an active transaction" );
}
- foreach my $entry (@{ $self->get_entries } ) {
- # Overwrite the entry in head with the entry in trans_id
- my $base = $entry
- + $self->hash_size
- + $self->byte_size;
-
- my $head_loc = $self->storage->read_at( $base, $self->byte_size );
- $head_loc = unpack( $StP{$self->byte_size}, $head_loc );
-
- my $spot = $base + $self->byte_size + ($self->trans_id - 1) * ( $self->byte_size + $STALE_SIZE );
- my $trans_loc = $self->storage->read_at(
- $spot, $self->byte_size,
- );
-
- $self->storage->print_at( $base, $trans_loc );
- $self->storage->print_at(
- $spot,
- pack( $StP{$self->byte_size} . ' ' . $StP{$STALE_SIZE}, (0) x 2 ),
- );
-
- if ( $head_loc > 1 ) {
- $self->_load_sector( $head_loc )->free;
- }
+ foreach my $entry ( @{ $self->get_entries } ) {
+ my ($sector, $idx) = split ':', $entry;
+ $self->_load_sector( $sector )->commit( $idx );
}
$self->clear_entries;
sub read_txn_slots {
my $self = shift;
- my $bl = $self->txn_bitfield_len;
- my $num_bits = $bl * 8;
- return split '', unpack( 'b'.$num_bits,
- $self->storage->read_at(
- $self->trans_loc, $bl,
- )
- );
+ return $self->_load_header->read_txn_slots(@_);
}
sub write_txn_slots {
my $self = shift;
- my $num_bits = $self->txn_bitfield_len * 8;
- $self->storage->print_at( $self->trans_loc,
- pack( 'b'.$num_bits, join('', @_) ),
- );
+ return $self->_load_header->write_txn_slots(@_);
}
sub get_running_txn_ids {
sub get_txn_staleness_counter {
my $self = shift;
- my ($trans_id) = @_;
-
- # Hardcode staleness of 0 for the HEAD
- return 0 unless $trans_id;
-
- return unpack( $StP{$STALE_SIZE},
- $self->storage->read_at(
- $self->trans_loc + $self->txn_bitfield_len + $STALE_SIZE * ($trans_id - 1),
- $STALE_SIZE,
- )
- );
+ return $self->_load_header->get_txn_staleness_counter(@_);
}
sub inc_txn_staleness_counter {
my $self = shift;
- my ($trans_id) = @_;
-
- # Hardcode staleness of 0 for the HEAD
- return 0 unless $trans_id;
-
- $self->storage->print_at(
- $self->trans_loc + $self->txn_bitfield_len + $STALE_SIZE * ($trans_id - 1),
- pack( $StP{$STALE_SIZE}, $self->get_txn_staleness_counter( $trans_id ) + 1 ),
- );
+ return $self->_load_header->inc_txn_staleness_counter(@_);
}
sub get_entries {
sub add_entry {
my $self = shift;
- my ($trans_id, $loc) = @_;
+ my ($trans_id, $loc, $idx) = @_;
$self->{entries}{$trans_id} ||= {};
- $self->{entries}{$trans_id}{$loc} = undef;
+ $self->{entries}{$trans_id}{"$loc:$idx"} = undef;
}
# If the buckets are being relocated because of a reindexing, the entries
# mechanism needs to be made aware of it.
sub reindex_entry {
my $self = shift;
- my ($old_loc, $new_loc) = @_;
+ my ($old_loc, $old_idx, $new_loc, $new_idx) = @_;
TRANS:
while ( my ($trans_id, $locs) = each %{ $self->{entries} } ) {
- if ( exists $locs->{$old_loc} ) {
- delete $locs->{$old_loc};
- $locs->{$new_loc} = undef;
+ if ( exists $locs->{"$old_loc:$old_idx"} ) {
+ delete $locs->{"$old_loc:$old_idx"};
+ $locs->{"$new_loc:$new_idx"} = undef;
next TRANS;
}
}
################################################################################
-sub _load_sector {
- my $self = shift;
- my ($offset) = @_;
-
- # Add a catch for offset of 0 or 1
- return if !$offset || $offset <= 1;
-
- unless ( exists $self->sector_cache->{ $offset } ) {
- my $type = $self->storage->read_at( $offset, $self->SIG_SIZE );
-
- # XXX Don't we want to do something more proactive here? -RobK, 2008-06-19
- return if $type eq chr(0);
-
- if ( $type eq $self->SIG_ARRAY || $type eq $self->SIG_HASH ) {
- $self->sector_cache->{$offset} = DBM::Deep::Engine::Sector::Reference->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- # XXX Don't we need key_md5 here?
- elsif ( $type eq $self->SIG_BLIST ) {
- $self->sector_cache->{$offset} = DBM::Deep::Engine::Sector::BucketList->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- elsif ( $type eq $self->SIG_INDEX ) {
- $self->sector_cache->{$offset} = DBM::Deep::Engine::Sector::Index->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- elsif ( $type eq $self->SIG_NULL ) {
- $self->sector_cache->{$offset} = DBM::Deep::Engine::Sector::Null->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- elsif ( $type eq $self->SIG_DATA ) {
- $self->sector_cache->{$offset} = DBM::Deep::Engine::Sector::Scalar->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- # This was deleted from under us, so just return and let the caller figure it out.
- elsif ( $type eq $self->SIG_FREE ) {
- return;
- }
- else {
- DBM::Deep->_throw_error( "'$offset': Don't know what to do with type '$type'" );
- }
- }
-
- return $self->sector_cache->{$offset};
-}
-
sub _apply_digest {
my $self = shift;
return $self->{digest}->(@_);
}
sub _add_free_blist_sector { shift->_add_free_sector( 0, @_ ) }
-sub _add_free_data_sector { shift->_add_free_sector( 1, @_ ) }
+sub _add_free_data_sector { shift->_add_free_sector( 1, @_ ) }
sub _add_free_index_sector { shift->_add_free_sector( 2, @_ ) }
+sub _add_free_sector { shift->_load_header->add_free_sector( @_ ) }
-sub _add_free_sector {
- my $self = shift;
- my ($multiple, $offset, $size) = @_;
+sub _request_blist_sector { shift->_request_sector( 0, @_ ) }
+sub _request_data_sector { shift->_request_sector( 1, @_ ) }
+sub _request_index_sector { shift->_request_sector( 2, @_ ) }
+sub _request_sector { shift->_load_header->request_sector( @_ ) }
- my $chains_offset = $multiple * $self->byte_size;
+################################################################################
- my $storage = $self->storage;
+{
+ my %t = (
+ SIG_ARRAY => 'Reference',
+ SIG_HASH => 'Reference',
+ SIG_BLIST => 'BucketList',
+ SIG_INDEX => 'Index',
+ SIG_NULL => 'Null',
+ SIG_DATA => 'Scalar',
+ );
- # Increment staleness.
- # XXX Can this increment+modulo be done by "&= 0x1" ?
- my $staleness = unpack( $StP{$STALE_SIZE}, $storage->read_at( $offset + SIG_SIZE, $STALE_SIZE ) );
- $staleness = ($staleness + 1 ) % ( 2 ** ( 8 * $STALE_SIZE ) );
- $storage->print_at( $offset + SIG_SIZE, pack( $StP{$STALE_SIZE}, $staleness ) );
+ my %class_for;
+ while ( my ($k,$v) = each %t ) {
+ $class_for{ DBM::Deep::Engine->$k } = "DBM::Deep::Engine::Sector::$v";
+ }
- my $old_head = $storage->read_at( $self->chains_loc + $chains_offset, $self->byte_size );
+ sub load_sector {
+ my $self = shift;
+ my ($offset) = @_;
- $storage->print_at( $self->chains_loc + $chains_offset,
- pack( $StP{$self->byte_size}, $offset ),
- );
+ my $data = $self->get_data( $offset )
+ or return;#die "Cannot read from '$offset'\n";
+ my $type = substr( $$data, 0, 1 );
+ my $class = $class_for{ $type };
+ return $class->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ *_load_sector = \&load_sector;
- # Record the old head in the new sector after the signature and staleness counter
- $storage->print_at( $offset + SIG_SIZE + $STALE_SIZE, $old_head );
-}
+ sub load_header {
+ my $self = shift;
-sub _request_blist_sector { shift->_request_sector( 0, @_ ) }
-sub _request_data_sector { shift->_request_sector( 1, @_ ) }
-sub _request_index_sector { shift->_request_sector( 2, @_ ) }
+ #XXX Does this mean we make too many objects? -RobK, 2008-06-23
+ return DBM::Deep::Engine::Sector::FileHeader->new({
+ engine => $self,
+ offset => 0,
+ });
+ }
+ *_load_header = \&load_header;
-sub _request_sector {
- my $self = shift;
- my ($multiple, $size) = @_;
+ sub get_data {
+ my $self = shift;
+ my ($offset, $size) = @_;
+ return unless defined $offset;
- my $chains_offset = $multiple * $self->byte_size;
+ unless ( exists $self->sector_cache->{$offset} ) {
+ # Don't worry about the header sector. It will manage itself.
+ return unless $offset;
- my $old_head = $self->storage->read_at( $self->chains_loc + $chains_offset, $self->byte_size );
- my $loc = unpack( $StP{$self->byte_size}, $old_head );
+ if ( !defined $size ) {
+ my $type = $self->storage->read_at( $offset, 1 )
+ or die "($offset): Cannot read from '$offset' to find the type\n";
- # We don't have any free sectors of the right size, so allocate a new one.
- unless ( $loc ) {
- my $offset = $self->storage->request_space( $size );
+ if ( $type eq $self->SIG_FREE ) {
+ return;
+ }
- # Zero out the new sector. This also guarantees correct increases
- # in the filesize.
- $self->storage->print_at( $offset, chr(0) x $size );
+ my $class = $class_for{$type}
+ or die "($offset): Cannot find class for '$type'\n";
+ $size = $class->size( $self )
+ or die "($offset): '$class' doesn't return a size\n";
+ $self->sector_cache->{$offset} = $type . $self->storage->read_at( undef, $size - 1 );
+ }
+ else {
+ $self->sector_cache->{$offset} = $self->storage->read_at( $offset, $size )
+ or return;
+ }
+ }
- return $offset;
+ return \$self->sector_cache->{$offset};
}
-
- # Read the new head after the signature and the staleness counter
- my $new_head = $self->storage->read_at( $loc + SIG_SIZE + $STALE_SIZE, $self->byte_size );
- $self->storage->print_at( $self->chains_loc + $chains_offset, $new_head );
- $self->storage->print_at(
- $loc + SIG_SIZE + $STALE_SIZE,
- pack( $StP{$self->byte_size}, 0 ),
- );
-
- return $loc;
}
-################################################################################
-
sub sector_cache {
my $self = shift;
return $self->{sector_cache} ||= {};
sub add_dirty_sector {
my $self = shift;
- my ($sector) = @_;
-
-# if ( exists $self->dirty_sectors->{ $sector->offset } ) {
-# DBM::Deep->_throw_error( "We have a duplicate sector!! " . $sector->offset );
-# }
+ my ($offset) = @_;
- $self->dirty_sectors->{ $sector->offset } = $sector;
+ $self->dirty_sectors->{ $offset } = undef;
}
sub flush {
my $sectors = $self->dirty_sectors;
for my $offset (sort { $a <=> $b } keys %{ $sectors }) {
- $sectors->{$offset}->flush;
+ $self->storage->print_at( $offset, $self->sector_cache->{$offset} );
}
+ # Why do we need to have the storage flush? Shouldn't autoflush take care of things?
+ # -RobK, 2008-06-26
+ $self->storage->flush;
+
$self->clear_dirty_sectors;
$self->clear_sector_cache;