sub SIG_FREE () { 'F' }
sub SIG_SIZE () { 1 }
-our $STALE_SIZE = 2;
+use DBM::Deep::Storage::File ();
+use DBM::Deep::Iterator ();
+use DBM::Deep::Engine::Sector::Data ();
+use DBM::Deep::Engine::Sector::BucketList ();
+use DBM::Deep::Engine::Sector::Index ();
+use DBM::Deep::Engine::Sector::Null ();
+use DBM::Deep::Engine::Sector::Reference ();
+use DBM::Deep::Engine::Sector::Scalar ();
+use DBM::Deep::Null ();
+
+my $STALE_SIZE = 2;
# Please refer to the pack() documentation for further information
my %StP = (
4 => 'N', # Unsigned long in "network" (big-endian) order
8 => 'Q', # Usigned quad (no order specified, presumably machine-dependent)
);
-sub StP { $StP{$_[1]} }
-# Import these after the SIG_* definitions because those definitions are used
-# in the headers of these classes. -RobK, 2008-06-20
-use DBM::Deep::Engine::Sector::BucketList;
-use DBM::Deep::Engine::Sector::FileHeader;
-use DBM::Deep::Engine::Sector::Index;
-use DBM::Deep::Engine::Sector::Null;
-use DBM::Deep::Engine::Sector::Reference;
-use DBM::Deep::Engine::Sector::Scalar;
-use DBM::Deep::Iterator;
+=head1 NAME
-################################################################################
+DBM::Deep::Engine
+
+=head1 PURPOSE
+
+This is an internal-use-only object for L<DBM::Deep/>. It mediates the low-level
+mapping between the L<DBM::Deep/> objects and the storage medium.
+
+The purpose of this documentation is to provide low-level documentation for
+developers. It is B<not> intended to be used by the general public. This
+documentation and what it documents can and will change without notice.
+
+=head1 OVERVIEW
+
+The engine exposes an API to the DBM::Deep objects (DBM::Deep, DBM::Deep::Array,
+and DBM::Deep::Hash) for their use to access the actual stored values. This API
+is the following:
+
+=over 4
+
+=item * new
+
+=item * read_value
+
+=item * get_classname
+
+=item * make_reference
+
+=item * key_exists
+
+=item * delete_key
+
+=item * write_value
+
+=item * get_next_key
+
+=item * setup_fh
+
+=item * begin_work
+
+=item * commit
+
+=item * rollback
+
+=item * lock_exclusive
+
+=item * lock_shared
+
+=item * unlock
+
+=back
+
+They are explained in their own sections below. These methods, in turn, may
+provide some bounds-checking, but primarily act to instantiate objects in the
+Engine::Sector::* hierarchy and dispatch to them.
+
+=head1 TRANSACTIONS
+
+Transactions in DBM::Deep are implemented using a variant of MVCC. This attempts
+to keep the amount of actual work done against the file low while stil providing
+Atomicity, Consistency, and Isolation. Durability, unfortunately, cannot be done
+with only one file.
+
+=head2 STALENESS
+
+If another process uses a transaction slot and writes stuff to it, then terminates,
+the data that process wrote it still within the file. In order to address this,
+there is also a transaction staleness counter associated within every write.
+Each time a transaction is started, that process increments that transaction's
+staleness counter. If, when it reads a value, the staleness counters aren't
+identical, DBM::Deep will consider the value on disk to be stale and discard it.
+
+=head2 DURABILITY
+
+The fourth leg of ACID is Durability, the guarantee that when a commit returns,
+the data will be there the next time you read from it. This should be regardless
+of any crashes or powerdowns in between the commit and subsequent read. DBM::Deep
+does provide that guarantee; once the commit returns, all of the data has been
+transferred from the transaction shadow to the HEAD. The issue arises with partial
+commits - a commit that is interrupted in some fashion. In keeping with DBM::Deep's
+"tradition" of very light error-checking and non-existent error-handling, there is
+no way to recover from a partial commit. (This is probably a failure in Consistency
+as well as Durability.)
+
+Other DBMSes use transaction logs (a separate file, generally) to achieve Durability.
+As DBM::Deep is a single-file, we would have to do something similar to what SQLite
+and BDB do in terms of committing using synchonized writes. To do this, we would have
+to use a much higher RAM footprint and some serious programming that make my head
+hurts just to think about it.
+
+=head1 EXTERNAL METHODS
+
+=head2 new()
+
+This takes a set of args. These args are described in the documentation for
+L<DBM::Deep/new>.
+
+=cut
sub new {
my $class = shift;
my ($args) = @_;
- $args->{storage} = DBM::Deep::File->new( $args )
+ $args->{storage} = DBM::Deep::Storage::File->new( $args )
unless exists $args->{storage};
my $self = bless {
return $self;
}
-################################################################################
+=head2 read_value( $obj, $key )
+
+This takes an object that provides _base_offset() and a string. It returns the
+value stored in the corresponding Sector::Value's data section.
+
+=cut
sub read_value {
my $self = shift;
return $value_sector->data;
}
+=head2 get_classname( $obj )
+
+This takes an object that provides _base_offset() and returns the classname (if any)
+associated with it.
+
+It delegates to Sector::Reference::get_classname() for the heavy lifting.
+
+It performs a staleness check.
+
+=cut
+
sub get_classname {
my $self = shift;
my ($obj) = @_;
return $sector->get_classname;
}
+=head2 make_reference( $obj, $old_key, $new_key )
+
+This takes an object that provides _base_offset() and two strings. The
+strings correspond to the old key and new key, respectively. This operation
+is equivalent to (given C<< $db->{foo} = []; >>) C<< $db->{bar} = $db->{foo}; >>.
+
+This returns nothing.
+
+=cut
+
sub make_reference {
my $self = shift;
my ($obj, $old_key, $new_key) = @_;
value => $value_sector->clone,
});
}
+
+ return;
}
+=head2 key_exists( $obj, $key )
+
+This takes an object that provides _base_offset() and a string for
+the key to be checked. This returns 1 for true and "" for false.
+
+=cut
+
sub key_exists {
my $self = shift;
my ($obj, $key) = @_;
return $data ? 1 : '';
}
+=head2 delete_key( $obj, $key )
+
+This takes an object that provides _base_offset() and a string for
+the key to be deleted. This returns the result of the Sector::Reference
+delete_key() method.
+
+=cut
+
sub delete_key {
my $self = shift;
my ($obj, $key) = @_;
});
}
+=head2 write_value( $obj, $key, $value )
+
+This takes an object that provides _base_offset(), a string for the
+key, and a value. This value can be anything storable within L<DBM::Deep/>.
+
+This returns 1 upon success.
+
+=cut
+
sub write_value {
my $self = shift;
my ($obj, $key, $value) = @_;
# This will be a Reference sector
my $sector = $self->_load_sector( $obj->_base_offset )
- or DBM::Deep->_throw_error( "1: Cannot write to a deleted spot in DBM::Deep." );
+ or DBM::Deep->_throw_error( "Cannot write to a deleted spot in DBM::Deep." );
if ( $sector->staleness != $obj->_staleness ) {
- DBM::Deep->_throw_error( "2: Cannot write to a deleted spot in DBM::Deep." );
+ DBM::Deep->_throw_error( "Cannot write to a deleted spot in DBM::Deep." );
}
my ($class, $type);
return 1;
}
+=head2 get_next_key( $obj, $prev_key )
+
+This takes an object that provides _base_offset() and an optional string
+representing the prior key returned via a prior invocation of this method.
+
+This method delegates to C<< DBM::Deep::Iterator->get_next_key() >>.
+
+=cut
+
# XXX Add staleness here
sub get_next_key {
my $self = shift;
return $obj->{iterator}->get_next_key( $obj );
}
-################################################################################
+=head2 setup_fh( $obj )
+
+This takes an object that provides _base_offset(). It will do everything needed
+in order to properly initialize all values for necessary functioning. If this is
+called upon an already initialized object, this will also reset the inode.
+
+This returns 1.
+
+=cut
sub setup_fh {
my $self = shift;
my ($obj) = @_;
- return 1 if $obj->_base_offset;
+ # We're opening the file.
+ unless ( $obj->_base_offset ) {
+ my $bytes_read = $self->_read_file_header;
- my $header = $self->_load_header;
+ # Creating a new file
+ unless ( $bytes_read ) {
+ $self->_write_file_header;
- # Creating a new file
- if ( $header->is_new ) {
- # 1) Create Array/Hash entry
- my $sector = DBM::Deep::Engine::Sector::Reference->new({
- engine => $self,
- type => $obj->_type,
- });
- $obj->{base_offset} = $sector->offset;
- $obj->{staleness} = $sector->staleness;
+ # 1) Create Array/Hash entry
+ my $initial_reference = DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ type => $obj->_type,
+ });
+ $obj->{base_offset} = $initial_reference->offset;
+ $obj->{staleness} = $initial_reference->staleness;
- $self->flush;
- }
- # Reading from an existing file
- else {
- $obj->{base_offset} = $header->size;
- my $sector = DBM::Deep::Engine::Sector::Reference->new({
- engine => $self,
- offset => $obj->_base_offset,
- });
- unless ( $sector ) {
- DBM::Deep->_throw_error("Corrupted file, no master index record");
+ $self->storage->flush;
}
+ # Reading from an existing file
+ else {
+ $obj->{base_offset} = $bytes_read;
+ my $initial_reference = DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ offset => $obj->_base_offset,
+ });
+ unless ( $initial_reference ) {
+ DBM::Deep->_throw_error("Corrupted file, no master index record");
+ }
- unless ($obj->_type eq $sector->type) {
- DBM::Deep->_throw_error("File type mismatch");
- }
+ unless ($obj->_type eq $initial_reference->type) {
+ DBM::Deep->_throw_error("File type mismatch");
+ }
- $obj->{staleness} = $sector->staleness;
+ $obj->{staleness} = $initial_reference->staleness;
+ }
}
$self->storage->set_inode;
return 1;
}
+=head2 begin_work( $obj )
+
+This takes an object that provides _base_offset(). It will set up all necessary
+bookkeeping in order to run all work within a transaction.
+
+If $obj is already within a transaction, an error wiill be thrown. If there are
+no more available transactions, an error will be thrown.
+
+This returns undef.
+
+=cut
+
sub begin_work {
my $self = shift;
my ($obj) = @_;
return;
}
+=head2 rollback( $obj )
+
+This takes an object that provides _base_offset(). It will revert all
+actions taken within the running transaction.
+
+If $obj is not within a transaction, an error will be thrown.
+
+This returns 1.
+
+=cut
+
sub rollback {
my $self = shift;
my ($obj) = @_;
return 1;
}
+=head2 commit( $obj )
+
+This takes an object that provides _base_offset(). It will apply all
+actions taken within the transaction to the HEAD.
+
+If $obj is not within a transaction, an error will be thrown.
+
+This returns 1.
+
+=cut
+
sub commit {
my $self = shift;
my ($obj) = @_;
return 1;
}
+=head2 lock_exclusive()
+
+This takes an object that provides _base_offset(). It will guarantee that
+the storage has taken precautions to be safe for a write.
+
+This returns nothing.
+
+=cut
+
+sub lock_exclusive {
+ my $self = shift;
+ my ($obj) = @_;
+ return $self->storage->lock_exclusive( $obj );
+}
+
+=head2 lock_shared()
+
+This takes an object that provides _base_offset(). It will guarantee that
+the storage has taken precautions to be safe for a read.
+
+This returns nothing.
+
+=cut
+
+sub lock_shared {
+ my $self = shift;
+ my ($obj) = @_;
+ return $self->storage->lock_shared( $obj );
+}
+
+=head2 unlock()
+
+This takes an object that provides _base_offset(). It will guarantee that
+the storage has released all locks taken.
+
+This returns nothing.
+
+=cut
+
+sub unlock {
+ my $self = shift;
+ my ($obj) = @_;
+
+ my $rv = $self->storage->unlock( $obj );
+
+ $self->flush if $rv;
+
+ return $rv;
+}
+
+=head1 INTERNAL METHODS
+
+The following methods are internal-use-only to DBM::Deep::Engine.
+
+=cut
+
+=head2 read_txn_slots()
+
+This takes no arguments.
+
+This will return an array with a 1 or 0 in each slot. Each spot represents one
+available transaction. If the slot is 1, that transaction is taken. If it is 0,
+the transaction is available.
+
+=cut
+
sub read_txn_slots {
my $self = shift;
- return $self->_load_header->read_txn_slots(@_);
+ my $bl = $self->txn_bitfield_len;
+ my $num_bits = $bl * 8;
+ return split '', unpack( 'b'.$num_bits,
+ $self->storage->read_at(
+ $self->trans_loc, $bl,
+ )
+ );
}
+=head2 write_txn_slots( @slots )
+
+This takes an array of 1's and 0's. This array represents the transaction slots
+returned by L</read_txn_slots()>. In other words, the following is true:
+
+ @x = read_txn_slots( write_txn_slots( @x ) );
+
+(With the obviously missing object referents added back in.)
+
+=cut
+
sub write_txn_slots {
my $self = shift;
- return $self->_load_header->write_txn_slots(@_);
+ my $num_bits = $self->txn_bitfield_len * 8;
+ $self->storage->print_at( $self->trans_loc,
+ pack( 'b'.$num_bits, join('', @_) ),
+ );
}
+=head2 get_running_txn_ids()
+
+This takes no arguments.
+
+This will return an array of taken transaction IDs. This wraps L</read_txn_slots()>.
+
+=cut
+
sub get_running_txn_ids {
my $self = shift;
my @transactions = $self->read_txn_slots;
- my @trans_ids = map { $_+1} grep { $transactions[$_] } 0 .. $#transactions;
+ my @trans_ids = map { $_+1 } grep { $transactions[$_] } 0 .. $#transactions;
}
+=head2 get_txn_staleness_counter( $trans_id )
+
+This will return the staleness counter for the given transaction ID. Please see
+L</TRANSACTION STALENESS> for more information.
+
+=cut
+
sub get_txn_staleness_counter {
my $self = shift;
- return $self->_load_header->get_txn_staleness_counter(@_);
+ my ($trans_id) = @_;
+
+ # Hardcode staleness of 0 for the HEAD
+ return 0 unless $trans_id;
+
+ return unpack( $StP{$STALE_SIZE},
+ $self->storage->read_at(
+ $self->trans_loc + $self->txn_bitfield_len + $STALE_SIZE * ($trans_id - 1),
+ $STALE_SIZE,
+ )
+ );
}
+=head2 inc_txn_staleness_counter( $trans_id )
+
+This will increment the staleness counter for the given transaction ID. Please see
+L</TRANSACTION STALENESS> for more information.
+
+=cut
+
sub inc_txn_staleness_counter {
my $self = shift;
- return $self->_load_header->inc_txn_staleness_counter(@_);
+ my ($trans_id) = @_;
+
+ # Hardcode staleness of 0 for the HEAD
+ return 0 unless $trans_id;
+
+ $self->storage->print_at(
+ $self->trans_loc + $self->txn_bitfield_len + $STALE_SIZE * ($trans_id - 1),
+ pack( $StP{$STALE_SIZE}, $self->get_txn_staleness_counter( $trans_id ) + 1 ),
+ );
}
+=head2 get_entries()
+
+This takes no arguments.
+
+This returns a list of all the sectors that have been modified by this transaction.
+
+=cut
+
sub get_entries {
my $self = shift;
return [ keys %{ $self->{entries}{$self->trans_id} ||= {} } ];
}
+=head2 add_entry( $trans_id, $location )
+
+This takes a transaction ID and a file location and marks the sector at that location
+as having been modified by the transaction identified by $trans_id.
+
+This returns nothing.
+
+B<NOTE>: Unlike all the other _entries() methods, there are several cases where
+C<< $trans_id != $self->trans_id >> for this method.
+
+=cut
+
sub add_entry {
my $self = shift;
my ($trans_id, $loc) = @_;
$self->{entries}{$trans_id}{$loc} = undef;
}
-# If the buckets are being relocated because of a reindexing, the entries
-# mechanism needs to be made aware of it.
+=head2 reindex_entry( $old_loc, $new_loc )
+
+This takes two locations (old and new, respectively). If a location that has been
+modified by this transaction is subsequently reindexed due to a bucketlist
+overflowing, then the entries hash needs to be made aware of this change.
+
+This returns nothing.
+
+=cut
+
sub reindex_entry {
my $self = shift;
my ($old_loc, $new_loc) = @_;
}
}
+=head2 clear_entries()
+
+This takes no arguments. It will clear the entries list for the running transaction.
+
+This returns nothing.
+
+=cut
+
sub clear_entries {
my $self = shift;
delete $self->{entries}{$self->trans_id};
}
-################################################################################
+=head2 _write_file_header()
-sub _apply_digest {
- my $self = shift;
- return $self->{digest}->(@_);
-}
+This writes the file header for a new file. This will write the various settings
+that set how the file is interpreted.
-sub _add_free_blist_sector { shift->_add_free_sector( 0, @_ ) }
-sub _add_free_data_sector { shift->_add_free_sector( 1, @_ ) }
-sub _add_free_index_sector { shift->_add_free_sector( 2, @_ ) }
-sub _add_free_sector { shift->_load_header->add_free_sector( @_ ) }
+=head2 _read_file_header()
-sub _request_blist_sector { shift->_request_sector( 0, @_ ) }
-sub _request_data_sector { shift->_request_sector( 1, @_ ) }
-sub _request_index_sector { shift->_request_sector( 2, @_ ) }
-sub _request_sector { shift->_load_header->request_sector( @_ ) }
+This reads the file header from an existing file. This will read the various
+settings that set how the file is interpreted.
-################################################################################
+=cut
{
- my %t = (
- SIG_ARRAY => 'Reference',
- SIG_HASH => 'Reference',
- SIG_BLIST => 'BucketList',
- SIG_INDEX => 'Index',
- SIG_NULL => 'Null',
- SIG_DATA => 'Scalar',
- );
-
- my %class_for;
- while ( my ($k,$v) = each %t ) {
- $class_for{ DBM::Deep::Engine->$k } = "DBM::Deep::Engine::Sector::$v";
- }
+ my $header_fixed = length( SIG_FILE ) + 1 + 4 + 4;
+ my $this_file_version = 3;
- sub load_sector {
+ sub _write_file_header {
my $self = shift;
- my ($offset) = @_;
- my $data = $self->get_data( $offset )
- or return;#die "Cannot read from '$offset'\n";
- my $type = substr( $$data, 0, 1 );
- my $class = $class_for{ $type };
- return $class->new({
- engine => $self,
- type => $type,
- offset => $offset,
- });
- }
- *_load_sector = \&load_sector;
+ my $nt = $self->num_txns;
+ my $bl = $self->txn_bitfield_len;
- sub load_header {
- my $self = shift;
+ my $header_var = 1 + 1 + 1 + 1 + $bl + $STALE_SIZE * ($nt - 1) + 3 * $self->byte_size;
- #XXX Does this mean we make too many objects? -RobK, 2008-06-23
- return DBM::Deep::Engine::Sector::FileHeader->new({
- engine => $self,
- offset => 0,
- });
+ my $loc = $self->storage->request_space( $header_fixed + $header_var );
+
+ $self->storage->print_at( $loc,
+ SIG_FILE,
+ SIG_HEADER,
+ pack('N', $this_file_version), # At this point, we're at 9 bytes
+ pack('N', $header_var), # header size
+ # --- Above is $header_fixed. Below is $header_var
+ pack('C', $self->byte_size),
+
+ # These shenanigans are to allow a 256 within a C
+ pack('C', $self->max_buckets - 1),
+ pack('C', $self->data_sector_size - 1),
+
+ pack('C', $nt),
+ pack('C' . $bl, 0 ), # Transaction activeness bitfield
+ pack($StP{$STALE_SIZE}.($nt-1), 0 x ($nt-1) ), # Transaction staleness counters
+ pack($StP{$self->byte_size}, 0), # Start of free chain (blist size)
+ pack($StP{$self->byte_size}, 0), # Start of free chain (data size)
+ pack($StP{$self->byte_size}, 0), # Start of free chain (index size)
+ );
+
+ #XXX Set these less fragilely
+ $self->set_trans_loc( $header_fixed + 4 );
+ $self->set_chains_loc( $header_fixed + 4 + $bl + $STALE_SIZE * ($nt-1) );
+
+ return;
}
- *_load_header = \&load_header;
- sub get_data {
+ sub _read_file_header {
my $self = shift;
- my ($offset, $size) = @_;
- return unless defined $offset;
- unless ( exists $self->sector_cache->{$offset} ) {
- # Don't worry about the header sector. It will manage itself.
- return unless $offset;
+ my $buffer = $self->storage->read_at( 0, $header_fixed );
+ return unless length($buffer);
- if ( !defined $size ) {
- my $type = $self->storage->read_at( $offset, 1 )
- or die "($offset): Cannot read from '$offset' to find the type\n";
+ my ($file_signature, $sig_header, $file_version, $size) = unpack(
+ 'A4 A N N', $buffer
+ );
- if ( $type eq $self->SIG_FREE ) {
- return;
- }
+ unless ( $file_signature eq SIG_FILE ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error( "Signature not found -- file is not a Deep DB" );
+ }
- my $class = $class_for{$type}
- or die "($offset): Cannot find class for '$type'\n";
- $size = $class->size( $self )
- or die "($offset): '$class' doesn't return a size\n";
- $self->sector_cache->{$offset} = $type . $self->storage->read_at( undef, $size - 1 );
- }
- else {
- $self->sector_cache->{$offset} = $self->storage->read_at( $offset, $size )
- or return;
- }
+ unless ( $sig_header eq SIG_HEADER ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error( "Pre-1.00 file version found" );
+ }
+
+ unless ( $file_version == $this_file_version ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error(
+ "Wrong file version found - " . $file_version .
+ " - expected " . $this_file_version
+ );
+ }
+
+ my $buffer2 = $self->storage->read_at( undef, $size );
+ my @values = unpack( 'C C C C', $buffer2 );
+
+ if ( @values != 4 || grep { !defined } @values ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error("Corrupted file - bad header");
+ }
+
+ #XXX Add warnings if values weren't set right
+ @{$self}{qw(byte_size max_buckets data_sector_size num_txns)} = @values;
+
+ # These shenangians are to allow a 256 within a C
+ $self->{max_buckets} += 1;
+ $self->{data_sector_size} += 1;
+
+ my $bl = $self->txn_bitfield_len;
+
+ my $header_var = scalar(@values) + $bl + $STALE_SIZE * ($self->num_txns - 1) + 3 * $self->byte_size;
+ unless ( $size == $header_var ) {
+ $self->storage->close;
+ DBM::Deep->_throw_error( "Unexpected size found ($size <-> $header_var)." );
}
- return \$self->sector_cache->{$offset};
+ $self->set_trans_loc( $header_fixed + scalar(@values) );
+ $self->set_chains_loc( $header_fixed + scalar(@values) + $bl + $STALE_SIZE * ($self->num_txns - 1) );
+
+ return length($buffer) + length($buffer2);
}
}
-sub sector_cache {
- my $self = shift;
- return $self->{sector_cache} ||= {};
-}
+=head2 _load_sector( $offset )
+
+This will instantiate and return the sector object that represents the data found
+at $offset.
-sub clear_sector_cache {
+=cut
+
+sub _load_sector {
my $self = shift;
- $self->{sector_cache} = {};
+ my ($offset) = @_;
+
+ # Add a catch for offset of 0 or 1
+ return if !$offset || $offset <= 1;
+
+ my $type = $self->storage->read_at( $offset, 1 );
+ return if $type eq chr(0);
+
+ if ( $type eq $self->SIG_ARRAY || $type eq $self->SIG_HASH ) {
+ return DBM::Deep::Engine::Sector::Reference->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ # XXX Don't we need key_md5 here?
+ elsif ( $type eq $self->SIG_BLIST ) {
+ return DBM::Deep::Engine::Sector::BucketList->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ elsif ( $type eq $self->SIG_INDEX ) {
+ return DBM::Deep::Engine::Sector::Index->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ elsif ( $type eq $self->SIG_NULL ) {
+ return DBM::Deep::Engine::Sector::Null->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ elsif ( $type eq $self->SIG_DATA ) {
+ return DBM::Deep::Engine::Sector::Scalar->new({
+ engine => $self,
+ type => $type,
+ offset => $offset,
+ });
+ }
+ # This was deleted from under us, so just return and let the caller figure it out.
+ elsif ( $type eq $self->SIG_FREE ) {
+ return;
+ }
+
+ DBM::Deep->_throw_error( "'$offset': Don't know what to do with type '$type'" );
}
-sub dirty_sectors {
+=head2 _apply_digest( @stuff )
+
+This will apply the digest methd (default to Digest::MD5::md5) to the arguments
+passed in and return the result.
+
+=cut
+
+sub _apply_digest {
my $self = shift;
- return $self->{dirty_sectors} ||= {};
+ return $self->{digest}->(@_);
}
-sub clear_dirty_sectors {
+=head2 _add_free_blist_sector( $offset, $size )
+
+=head2 _add_free_data_sector( $offset, $size )
+
+=head2 _add_free_index_sector( $offset, $size )
+
+These methods are all wrappers around _add_free_sector(), providing the proper
+chain offset ($multiple) for the sector type.
+
+=cut
+
+sub _add_free_blist_sector { shift->_add_free_sector( 0, @_ ) }
+sub _add_free_data_sector { shift->_add_free_sector( 1, @_ ) }
+sub _add_free_index_sector { shift->_add_free_sector( 2, @_ ) }
+
+=head2 _add_free_sector( $multiple, $offset, $size )
+
+_add_free_sector() takes the offset into the chains location, the offset of the
+sector, and the size of that sector. It will mark the sector as a free sector
+and put it into the list of sectors that are free of this type for use later.
+
+This returns nothing.
+
+B<NOTE>: $size is unused?
+
+=cut
+
+sub _add_free_sector {
my $self = shift;
- $self->{dirty_sectors} = {};
+ my ($multiple, $offset, $size) = @_;
+
+ my $chains_offset = $multiple * $self->byte_size;
+
+ my $storage = $self->storage;
+
+ # Increment staleness.
+ # XXX Can this increment+modulo be done by "&= 0x1" ?
+ my $staleness = unpack( $StP{$STALE_SIZE}, $storage->read_at( $offset + SIG_SIZE, $STALE_SIZE ) );
+ $staleness = ($staleness + 1 ) % ( 2 ** ( 8 * $STALE_SIZE ) );
+ $storage->print_at( $offset + SIG_SIZE, pack( $StP{$STALE_SIZE}, $staleness ) );
+
+ my $old_head = $storage->read_at( $self->chains_loc + $chains_offset, $self->byte_size );
+
+ $storage->print_at( $self->chains_loc + $chains_offset,
+ pack( $StP{$self->byte_size}, $offset ),
+ );
+
+ # Record the old head in the new sector after the signature and staleness counter
+ $storage->print_at( $offset + SIG_SIZE + $STALE_SIZE, $old_head );
}
-sub add_dirty_sector {
+=head2 _request_blist_sector( $size )
+
+=head2 _request_data_sector( $size )
+
+=head2 _request_index_sector( $size )
+
+These methods are all wrappers around _request_sector(), providing the proper
+chain offset ($multiple) for the sector type.
+
+=cut
+
+sub _request_blist_sector { shift->_request_sector( 0, @_ ) }
+sub _request_data_sector { shift->_request_sector( 1, @_ ) }
+sub _request_index_sector { shift->_request_sector( 2, @_ ) }
+
+=head2 _request_sector( $multiple $size )
+
+This takes the offset into the chains location and the size of that sector.
+
+This returns the object with the sector. If there is an available free sector of
+that type, then it will be reused. If there isn't one, then a new one will be
+allocated.
+
+=cut
+
+sub _request_sector {
my $self = shift;
- my ($offset) = @_;
+ my ($multiple, $size) = @_;
+
+ my $chains_offset = $multiple * $self->byte_size;
+
+ my $old_head = $self->storage->read_at( $self->chains_loc + $chains_offset, $self->byte_size );
+ my $loc = unpack( $StP{$self->byte_size}, $old_head );
+
+ # We don't have any free sectors of the right size, so allocate a new one.
+ unless ( $loc ) {
+ my $offset = $self->storage->request_space( $size );
+
+ # Zero out the new sector. This also guarantees correct increases
+ # in the filesize.
+ $self->storage->print_at( $offset, chr(0) x $size );
- $self->dirty_sectors->{ $offset } = undef;
+ return $offset;
+ }
+
+ # Read the new head after the signature and the staleness counter
+ my $new_head = $self->storage->read_at( $loc + SIG_SIZE + $STALE_SIZE, $self->byte_size );
+ $self->storage->print_at( $self->chains_loc + $chains_offset, $new_head );
+ $self->storage->print_at(
+ $loc + SIG_SIZE + $STALE_SIZE,
+ pack( $StP{$self->byte_size}, 0 ),
+ );
+
+ return $loc;
}
+=head2 flush()
+
+This takes no arguments. It will do everything necessary to flush all things to
+disk. This is usually called during unlock() and setup_fh().
+
+This returns nothing.
+
+=cut
+
sub flush {
my $self = shift;
- my $sectors = $self->dirty_sectors;
- for my $offset (sort { $a <=> $b } keys %{ $sectors }) {
- $self->storage->print_at( $offset, $self->sector_cache->{$offset} );
- }
-
# Why do we need to have the storage flush? Shouldn't autoflush take care of things?
# -RobK, 2008-06-26
$self->storage->flush;
+}
- $self->clear_dirty_sectors;
+=head2 ACCESSORS
- $self->clear_sector_cache;
-}
+The following are readonly attributes.
-################################################################################
+=over 4
-sub lock_exclusive {
- my $self = shift;
- my ($obj) = @_;
- return $self->storage->lock_exclusive( $obj );
-}
+=item * storage
-sub lock_shared {
- my $self = shift;
- my ($obj) = @_;
- return $self->storage->lock_shared( $obj );
-}
+=item * byte_size
-sub unlock {
- my $self = shift;
- my ($obj) = @_;
+=item * hash_size
- my $rv = $self->storage->unlock( $obj );
+=item * hash_chars
- $self->flush if $rv;
+=item * num_txns
- return $rv;
-}
+=item * max_buckets
+
+=item * blank_md5
+
+=item * data_sector_size
+
+=item * txn_bitfield_len
-################################################################################
+=back
+
+=cut
sub storage { $_[0]{storage} }
sub byte_size { $_[0]{byte_size} }
return $self->{txn_bitfield_len};
}
+=pod
+
+The following are read/write attributes.
+
+=over 4
+
+=item * trans_id / set_trans_id( $new_id )
+
+=item * trans_loc / set_trans_loc( $new_loc )
+
+=item * chains_loc / set_chains_loc( $new_loc )
+
+=back
+
+=cut
+
sub trans_id { $_[0]{trans_id} }
sub set_trans_id { $_[0]{trans_id} = $_[1] }
sub cache { $_[0]{cache} ||= {} }
sub clear_cache { %{$_[0]->cache} = () }
+=head2 _dump_file()
+
+This method takes no arguments. It's used to print out a textual representation of the DBM::Deep
+DB file. It assumes the file is not-corrupted.
+
+=cut
+
sub _dump_file {
my $self = shift;
- $self->flush;
# Read the header
- my $header_sector = DBM::Deep::Engine::Sector::FileHeader->new({
- engine => $self,
- });
+ my $spot = $self->_read_file_header();
my %types = (
0 => 'B',
my $return = "";
- # Filesize
- $return .= "Size: " . (-s $self->storage->{fh}) . $/;
-
# Header values
$return .= "NumTxns: " . $self->num_txns . $/;
$return .= $/;
}
- my $spot = $header_sector->size;
SECTOR:
while ( $spot < $self->storage->{end} ) {
# Read each sector in order.