package DBM::Deep::Engine;
+use 5.006_000;
+
use strict;
+use warnings FATAL => 'all';
-use Fcntl qw( :DEFAULT :flock :seek );
-
-##
-# Setup file and tag signatures. These should never change.
-##
-sub SIG_FILE () { 'DPDB' }
-sub SIG_INTERNAL () { 'i' }
-sub SIG_HASH () { 'H' }
-sub SIG_ARRAY () { 'A' }
-sub SIG_NULL () { 'N' }
-sub SIG_DATA () { 'D' }
-sub SIG_INDEX () { 'I' }
-sub SIG_BLIST () { 'B' }
-sub SIG_FREE () { 'F' }
-sub SIG_SIZE () { 1 }
-
-sub precalc_sizes {
- ##
- # Precalculate index, bucket and bucket list sizes
- ##
- my $self = shift;
+our $VERSION = $DBM::Deep::VERSION;
- $self->{index_size} = (2**8) * $self->{long_size};
- $self->{bucket_size} = $self->{hash_size} + $self->{long_size} * 2;
- $self->{bucket_list_size} = $self->{max_buckets} * $self->{bucket_size};
+use DBM::Deep::Iterator ();
- return 1;
-}
+# File-wide notes:
+# * Every method in here assumes that the storage has been appropriately
+# safeguarded. This can be anything from flock() to some sort of manual
+# mutex. But, it's the caller's responsability to make sure that this has
+# been done.
-sub set_pack {
- ##
- # Set pack/unpack modes (see file header for more)
- ##
- my $self = shift;
- my ($long_s, $long_p, $data_s, $data_p) = @_;
-
- ##
- # Set to 4 and 'N' for 32-bit offset tags (default). Theoretical limit of 4
- # GB per file.
- # (Perl must be compiled with largefile support for files > 2 GB)
- #
- # Set to 8 and 'Q' for 64-bit offsets. Theoretical limit of 16 XB per file.
- # (Perl must be compiled with largefile and 64-bit long support)
- ##
- $self->{long_size} = $long_s ? $long_s : 4;
- $self->{long_pack} = $long_p ? $long_p : 'N';
-
- ##
- # Set to 4 and 'N' for 32-bit data length prefixes. Limit of 4 GB for each
- # key/value. Upgrading this is possible (see above) but probably not
- # necessary. If you need more than 4 GB for a single key or value, this
- # module is really not for you :-)
- ##
- $self->{data_size} = $data_s ? $data_s : 4;
- $self->{data_pack} = $data_p ? $data_p : 'N';
-
- return $self->precalc_sizes();
-}
+sub SIG_HASH () { 'H' }
+sub SIG_ARRAY () { 'A' }
-sub set_digest {
- ##
- # Set key digest function (default is MD5)
- ##
- my $self = shift;
- my ($digest_func, $hash_size) = @_;
+=head1 NAME
- $self->{digest} = $digest_func ? $digest_func : \&Digest::MD5::md5;
- $self->{hash_size} = $hash_size ? $hash_size : 16;
+DBM::Deep::Engine
- return $self->precalc_sizes();
-}
+=head1 PURPOSE
-sub new {
- my $class = shift;
- my ($args) = @_;
+This is an internal-use-only object for L<DBM::Deep>. It mediates the low-level
+mapping between the L<DBM::Deep> objects and the storage medium.
- my $self = bless {
- long_size => 4,
- long_pack => 'N',
- data_size => 4,
- data_pack => 'N',
+The purpose of this documentation is to provide low-level documentation for
+developers. It is B<not> intended to be used by the general public. This
+documentation and what it documents can and will change without notice.
- digest => \&Digest::MD5::md5,
- hash_size => 16,
+=head1 OVERVIEW
- ##
- # Maximum number of buckets per list before another level of indexing is
- # done.
- # Increase this value for slightly greater speed, but larger database
- # files. DO NOT decrease this value below 16, due to risk of recursive
- # reindex overrun.
- ##
- max_buckets => 16,
- }, $class;
+The engine exposes an API to the DBM::Deep objects (DBM::Deep, DBM::Deep::Array,
+and DBM::Deep::Hash) for their use to access the actual stored values. This API
+is the following:
- $self->precalc_sizes;
+=over 4
- return $self;
-}
+=item * new
-sub write_file_signature {
- my $self = shift;
- my ($obj) = @_;
+=item * read_value
- my $fh = $obj->_fh;
+=item * get_classname
- my $loc = $self->_request_space(
- $obj, length( SIG_FILE ) + $self->{data_size},
- );
- seek($fh, $loc + $obj->_root->{file_offset}, SEEK_SET);
- print( $fh SIG_FILE, pack($self->{data_pack}, 0) );
+=item * make_reference
- return;
-}
+=item * key_exists
-sub read_file_signature {
- my $self = shift;
- my ($obj) = @_;
+=item * delete_key
- my $fh = $obj->_fh;
+=item * write_value
- seek($fh, 0 + $obj->_root->{file_offset}, SEEK_SET);
- my $buffer;
- my $bytes_read = read(
- $fh, $buffer, length(SIG_FILE) + $self->{data_size},
- );
+=item * get_next_key
- if ( $bytes_read ) {
- my ($signature, $version) = unpack( "A4 $self->{data_pack}", $buffer );
- unless ($signature eq SIG_FILE) {
- $self->close_fh( $obj );
- $obj->_throw_error("Signature not found -- file is not a Deep DB");
- }
- }
+=item * setup
- return $bytes_read;
-}
+=item * begin_work
-sub setup_fh {
- my $self = shift;
- my ($obj) = @_;
+=item * commit
- $self->open( $obj ) if !defined $obj->_fh;
-
- my $fh = $obj->_fh;
- flock $fh, LOCK_EX;
-
- unless ( $obj->{base_offset} ) {
- my $bytes_read = $self->read_file_signature( $obj );
-
- ##
- # File is empty -- write signature and master index
- ##
- if (!$bytes_read) {
- $self->write_file_signature( $obj );
-
- $obj->{base_offset} = $self->_request_space(
- $obj, $self->tag_size( $self->{index_size} ),
- );
-
- $self->write_tag(
- $obj, $obj->_base_offset, $obj->_type,
- chr(0)x$self->{index_size},
- );
-
- # Flush the filehandle
- my $old_fh = select $fh;
- my $old_af = $|; $| = 1; $| = $old_af;
- select $old_fh;
- }
- else {
- $obj->{base_offset} = $bytes_read;
-
- ##
- # Get our type from master index signature
- ##
- my $tag = $self->load_tag($obj, $obj->_base_offset)
- or $obj->_throw_error("Corrupted file, no master index record");
-
- unless ($obj->{type} eq $tag->{signature}) {
- $obj->_throw_error("File type mismatch");
- }
- }
- }
+=item * rollback
- #XXX We have to make sure we don't mess up when autoflush isn't turned on
- unless ( $obj->_root->{inode} ) {
- my @stats = stat($obj->_fh);
- $obj->_root->{inode} = $stats[1];
- $obj->_root->{end} = $stats[7];
- }
+=item * lock_exclusive
- flock $fh, LOCK_UN;
+=item * lock_shared
- return 1;
-}
+=item * unlock
-sub open {
- ##
- # Open a fh to the database, create if nonexistent.
- # Make sure file signature matches DBM::Deep spec.
- ##
- my $self = shift;
- my ($obj) = @_;
+=back
- # Theoretically, adding O_BINARY should remove the need for the binmode
- # Of course, testing it is going to be ... interesting.
- my $flags = O_RDWR | O_CREAT | O_BINARY;
+They are explained in their own sections below. These methods, in turn, may
+provide some bounds-checking, but primarily act to instantiate objects in the
+Engine::Sector::* hierarchy and dispatch to them.
- my $fh;
- my $filename = $obj->_root->{file};
- sysopen( $fh, $filename, $flags )
- or $obj->_throw_error("Cannot sysopen file '$filename': $!");
- $obj->_root->{fh} = $fh;
+=head1 TRANSACTIONS
- # Even though we use O_BINARY, better be safe than sorry.
- binmode $fh;
+Transactions in DBM::Deep are implemented using a variant of MVCC. This attempts
+to keep the amount of actual work done against the file low while stil providing
+Atomicity, Consistency, and Isolation. Durability, unfortunately, cannot be done
+with only one file.
- if ($obj->_root->{autoflush}) {
- my $old = select $fh;
- $|=1;
- select $old;
- }
+=head2 STALENESS
- return 1;
-}
+If another process uses a transaction slot and writes stuff to it, then
+terminates, the data that process wrote it still within the file. In order to
+address this, there is also a transaction staleness counter associated within
+every write. Each time a transaction is started, that process increments that
+transaction's staleness counter. If, when it reads a value, the staleness
+counters aren't identical, DBM::Deep will consider the value on disk to be stale
+and discard it.
-sub close_fh {
- my $self = shift;
- my ($obj) = @_;
+=head2 DURABILITY
- if ( my $fh = $obj->_root->{fh} ) {
- close $fh;
- }
- $obj->_root->{fh} = undef;
+The fourth leg of ACID is Durability, the guarantee that when a commit returns,
+the data will be there the next time you read from it. This should be regardless
+of any crashes or powerdowns in between the commit and subsequent read.
+DBM::Deep does provide that guarantee; once the commit returns, all of the data
+has been transferred from the transaction shadow to the HEAD. The issue arises
+with partial commits - a commit that is interrupted in some fashion. In keeping
+with DBM::Deep's "tradition" of very light error-checking and non-existent
+error-handling, there is no way to recover from a partial commit. (This is
+probably a failure in Consistency as well as Durability.)
- return 1;
-}
+Other DBMSes use transaction logs (a separate file, generally) to achieve
+Durability. As DBM::Deep is a single-file, we would have to do something
+similar to what SQLite and BDB do in terms of committing using synchonized
+writes. To do this, we would have to use a much higher RAM footprint and some
+serious programming that make my head hurts just to think about it.
-sub tag_size {
- my $self = shift;
- my ($size) = @_;
- return SIG_SIZE + $self->{data_size} + $size;
-}
+=cut
-sub write_tag {
- ##
- # Given offset, signature and content, create tag and write to disk
- ##
- my $self = shift;
- my ($obj, $offset, $sig, $content) = @_;
- my $size = length( $content );
+=head2 read_value( $obj, $key )
- my $fh = $obj->_fh;
+This takes an object that provides _base_offset() and a string. It returns the
+value stored in the corresponding Sector::Value's data section.
- if ( defined $offset ) {
- seek($fh, $offset + $obj->_root->{file_offset}, SEEK_SET);
- }
+=cut
- print( $fh $sig . pack($self->{data_pack}, $size) . $content );
+sub read_value { die "read_value must be implemented in a child class" }
- return unless defined $offset;
+=head2 get_classname( $obj )
- return {
- signature => $sig,
- size => $size,
- offset => $offset + SIG_SIZE + $self->{data_size},
- content => $content
- };
-}
+This takes an object that provides _base_offset() and returns the classname (if
+any) associated with it.
-sub load_tag {
- ##
- # Given offset, load single tag and return signature, size and data
- ##
- my $self = shift;
- my ($obj, $offset) = @_;
+It delegates to Sector::Reference::get_classname() for the heavy lifting.
-# print join(':',map{$_||''}caller(1)), $/;
+It performs a staleness check.
- my $fh = $obj->_fh;
+=cut
- seek($fh, $offset + $obj->_root->{file_offset}, SEEK_SET);
+sub get_classname { die "get_classname must be implemented in a child class" }
- #XXX I'm not sure this check will work if autoflush isn't enabled ...
- return if eof $fh;
+=head2 make_reference( $obj, $old_key, $new_key )
- my $b;
- read( $fh, $b, SIG_SIZE + $self->{data_size} );
- my ($sig, $size) = unpack( "A $self->{data_pack}", $b );
+This takes an object that provides _base_offset() and two strings. The
+strings correspond to the old key and new key, respectively. This operation
+is equivalent to (given C<< $db->{foo} = []; >>) C<< $db->{bar} = $db->{foo} >>.
- my $buffer;
- read( $fh, $buffer, $size);
+This returns nothing.
- return {
- signature => $sig,
- size => $size,
- offset => $offset + SIG_SIZE + $self->{data_size},
- content => $buffer
- };
-}
+=cut
-sub _get_dbm_object {
- my $item = shift;
-
- my $obj = eval {
- local $SIG{__DIE__};
- if ($item->isa( 'DBM::Deep' )) {
- return $item;
- }
- return;
- };
- return $obj if $obj;
-
- my $r = Scalar::Util::reftype( $item ) || '';
- if ( $r eq 'HASH' ) {
- my $obj = eval {
- local $SIG{__DIE__};
- my $obj = tied(%$item);
- if ($obj->isa( 'DBM::Deep' )) {
- return $obj;
- }
- return;
- };
- return $obj if $obj;
- }
- elsif ( $r eq 'ARRAY' ) {
- my $obj = eval {
- local $SIG{__DIE__};
- my $obj = tied(@$item);
- if ($obj->isa( 'DBM::Deep' )) {
- return $obj;
- }
- return;
- };
- return $obj if $obj;
- }
+sub make_reference { die "make_reference must be implemented in a child class" }
- return;
-}
+=head2 key_exists( $obj, $key )
-sub _length_needed {
- my $self = shift;
- my ($obj, $value, $key) = @_;
+This takes an object that provides _base_offset() and a string for
+the key to be checked. This returns 1 for true and "" for false.
- my $is_dbm_deep = eval {
- local $SIG{'__DIE__'};
- $value->isa( 'DBM::Deep' );
- };
+=cut
- my $len = SIG_SIZE + $self->{data_size}
- + $self->{data_size} + length( $key );
+sub key_exists { die "key_exists must be implemented in a child class" }
- if ( $is_dbm_deep && $value->_root eq $obj->_root ) {
- return $len + $self->{long_size};
- }
+=head2 delete_key( $obj, $key )
- my $r = Scalar::Util::reftype( $value ) || '';
- if ( $obj->_root->{autobless} ) {
- # This is for the bit saying whether or not this thing is blessed.
- $len += 1;
- }
+This takes an object that provides _base_offset() and a string for
+the key to be deleted. This returns the result of the Sector::Reference
+delete_key() method.
- unless ( $r eq 'HASH' || $r eq 'ARRAY' ) {
- if ( defined $value ) {
- $len += length( $value );
- }
- return $len;
- }
+=cut
- $len += $self->{index_size};
+sub delete_key { die "delete_key must be implemented in a child class" }
- # if autobless is enabled, must also take into consideration
- # the class name as it is stored after the key.
- if ( $obj->_root->{autobless} ) {
- my $c = Scalar::Util::blessed($value);
- if ( defined $c && !$is_dbm_deep ) {
- $len += $self->{data_size} + length($c);
- }
- }
+=head2 write_value( $obj, $key, $value )
- return $len;
-}
+This takes an object that provides _base_offset(), a string for the
+key, and a value. This value can be anything storable within L<DBM::Deep>.
-sub add_bucket {
- ##
- # Adds one key/value pair to bucket list, given offset, MD5 digest of key,
- # plain (undigested) key and value.
- ##
- my $self = shift;
- my ($obj, $tag, $md5, $plain_key, $value) = @_;
+This returns 1 upon success.
- # This verifies that only supported values will be stored.
- {
- my $r = Scalar::Util::reftype( $value );
- last if !defined $r;
+=cut
- last if $r eq 'HASH';
- last if $r eq 'ARRAY';
+sub write_value { die "write_value must be implemented in a child class" }
- $obj->_throw_error(
- "Storage of variables of type '$r' is not supported."
- );
- }
+=head2 setup( $obj )
- my $location = 0;
- my $result = 2;
-
- my $root = $obj->_root;
- my $fh = $obj->_fh;
-
- my $actual_length = $self->_length_needed( $obj, $value, $plain_key );
-
- my ($subloc, $offset, $size) = $self->_find_in_buckets( $tag, $md5 );
-
-# $self->_release_space( $obj, $size, $subloc );
- # Updating a known md5
-#XXX This needs updating to use _release_space
- if ( $subloc ) {
- $result = 1;
-
- if ($actual_length <= $size) {
- $location = $subloc;
- }
- else {
- $location = $self->_request_space( $obj, $actual_length );
- seek(
- $fh,
- $tag->{offset} + $offset
- + $self->{hash_size} + $root->{file_offset},
- SEEK_SET,
- );
- print( $fh pack($self->{long_pack}, $location ) );
- print( $fh pack($self->{long_pack}, $actual_length ) );
- }
- }
- # Adding a new md5
- elsif ( defined $offset ) {
- $location = $self->_request_space( $obj, $actual_length );
+This takes an object that provides _base_offset(). It will do everything needed
+in order to properly initialize all values for necessary functioning. If this is
+called upon an already initialized object, this will also reset the inode.
- seek( $fh, $tag->{offset} + $offset + $root->{file_offset}, SEEK_SET );
- print( $fh $md5 . pack($self->{long_pack}, $location ) );
- print( $fh pack($self->{long_pack}, $actual_length ) );
- }
- # If bucket didn't fit into list, split into a new index level
- # split_index() will do the _request_space() call
- else {
- $location = $self->split_index( $obj, $md5, $tag );
- }
+This returns 1.
- $self->write_value( $obj, $location, $plain_key, $value );
+=cut
- return $result;
-}
+sub setup { die "setup must be implemented in a child class" }
-sub write_value {
- my $self = shift;
- my ($obj, $location, $key, $value) = @_;
+=head2 begin_work( $obj )
- my $fh = $obj->_fh;
- my $root = $obj->_root;
+This takes an object that provides _base_offset(). It will set up all necessary
+bookkeeping in order to run all work within a transaction.
- my $dbm_deep_obj = _get_dbm_object( $value );
- if ( $dbm_deep_obj && $dbm_deep_obj->_root ne $obj->_root ) {
- $obj->_throw_error( "Cannot cross-reference. Use export() instead" );
- }
+If $obj is already within a transaction, an error wiill be thrown. If there are
+no more available transactions, an error will be thrown.
- seek($fh, $location + $root->{file_offset}, SEEK_SET);
+This returns undef.
- ##
- # Write signature based on content type, set content length and write
- # actual value.
- ##
- my $r = Scalar::Util::reftype( $value ) || '';
- if ( $dbm_deep_obj ) {
- $self->write_tag( $obj, undef, SIG_INTERNAL,pack($self->{long_pack}, $dbm_deep_obj->_base_offset) );
- }
- elsif ($r eq 'HASH') {
- if ( !$dbm_deep_obj && tied %{$value} ) {
- $obj->_throw_error( "Cannot store something that is tied" );
- }
- $self->write_tag( $obj, undef, SIG_HASH, chr(0)x$self->{index_size} );
- }
- elsif ($r eq 'ARRAY') {
- if ( !$dbm_deep_obj && tied @{$value} ) {
- $obj->_throw_error( "Cannot store something that is tied" );
- }
- $self->write_tag( $obj, undef, SIG_ARRAY, chr(0)x$self->{index_size} );
- }
- elsif (!defined($value)) {
- $self->write_tag( $obj, undef, SIG_NULL, '' );
- }
- else {
- $self->write_tag( $obj, undef, SIG_DATA, $value );
- }
+=cut
- ##
- # Plain key is stored AFTER value, as keys are typically fetched less often.
- ##
- print( $fh pack($self->{data_pack}, length($key)) . $key );
-
- # Internal references don't care about autobless
- return 1 if $dbm_deep_obj;
-
- ##
- # If value is blessed, preserve class name
- ##
- if ( $root->{autobless} ) {
- my $c = Scalar::Util::blessed($value);
- if ( defined $c && !$dbm_deep_obj ) {
- print( $fh chr(1) );
- print( $fh pack($self->{data_pack}, length($c)) . $c );
- }
- else {
- print( $fh chr(0) );
- }
- }
+sub begin_work { die "begin_work must be implemented in a child class" }
- ##
- # Tie the passed in reference so that changes to it are reflected in the
- # datafile. The use of $location as the base_offset will act as the
- # the linkage between parent and child.
- #
- # The overall assignment is a hack around the fact that just tying doesn't
- # store the values. This may not be the wrong thing to do.
- ##
- if ($r eq 'HASH') {
- my %x = %$value;
- tie %$value, 'DBM::Deep', {
- base_offset => $location,
- root => $root,
- };
- %$value = %x;
- }
- elsif ($r eq 'ARRAY') {
- my @x = @$value;
- tie @$value, 'DBM::Deep', {
- base_offset => $location,
- root => $root,
- };
- @$value = @x;
- }
+=head2 rollback( $obj )
- return 1;
-}
+This takes an object that provides _base_offset(). It will revert all
+actions taken within the running transaction.
-sub split_index {
- my $self = shift;
- my ($obj, $md5, $tag) = @_;
+If $obj is not within a transaction, an error will be thrown.
- my $fh = $obj->_fh;
- my $root = $obj->_root;
+This returns 1.
- my $loc = $self->_request_space(
- $obj, $self->tag_size( $self->{index_size} ),
- );
+=cut
- seek($fh, $tag->{ref_loc} + $root->{file_offset}, SEEK_SET);
- print( $fh pack($self->{long_pack}, $loc) );
+sub rollback { die "rollback must be implemented in a child class" }
- my $index_tag = $self->write_tag(
- $obj, $loc, SIG_INDEX,
- chr(0)x$self->{index_size},
- );
+=head2 commit( $obj )
- my $newtag_loc = $self->_request_space(
- $obj, $self->tag_size( $self->{bucket_list_size} ),
- );
+This takes an object that provides _base_offset(). It will apply all
+actions taken within the transaction to the HEAD.
- my $keys = $tag->{content}
- . $md5 . pack($self->{long_pack}, $newtag_loc)
- . pack($self->{long_pack}, 0);
+If $obj is not within a transaction, an error will be thrown.
- my @newloc = ();
- BUCKET:
- for (my $i = 0; $i <= $self->{max_buckets}; $i++) {
- my ($key, $old_subloc, $size) = $self->_get_key_subloc( $keys, $i );
+This returns 1.
- die "[INTERNAL ERROR]: No key in split_index()\n" unless $key;
- die "[INTERNAL ERROR]: No subloc in split_index()\n" unless $old_subloc;
+=cut
- my $num = ord(substr($key, $tag->{ch} + 1, 1));
+sub commit { die "commit must be implemented in a child class" }
- if ($newloc[$num]) {
- seek($fh, $newloc[$num] + $root->{file_offset}, SEEK_SET);
- my $subkeys;
- read( $fh, $subkeys, $self->{bucket_list_size});
+=head2 get_next_key( $obj, $prev_key )
- # This is looking for the first empty spot
- my ($subloc, $offset, $size) = $self->_find_in_buckets(
- { content => $subkeys }, '',
- );
+This takes an object that provides _base_offset() and an optional string
+representing the prior key returned via a prior invocation of this method.
- seek($fh, $newloc[$num] + $offset + $root->{file_offset}, SEEK_SET);
- print( $fh $key . pack($self->{long_pack}, $old_subloc) );
+This method delegates to C<< DBM::Deep::Iterator->get_next_key() >>.
- next;
- }
+=cut
- seek($fh, $index_tag->{offset} + ($num * $self->{long_size}) + $root->{file_offset}, SEEK_SET);
-
- my $loc = $self->_request_space(
- $obj, $self->tag_size( $self->{bucket_list_size} ),
- );
-
- print( $fh pack($self->{long_pack}, $loc) );
-
- my $blist_tag = $self->write_tag(
- $obj, $loc, SIG_BLIST,
- chr(0)x$self->{bucket_list_size},
- );
-
- seek($fh, $blist_tag->{offset} + $root->{file_offset}, SEEK_SET);
- print( $fh $key . pack($self->{long_pack}, $old_subloc) );
-
- $newloc[$num] = $blist_tag->{offset};
- }
-
- $self->_release_space(
- $obj, $self->tag_size( $self->{bucket_list_size} ),
- $tag->{offset} - SIG_SIZE - $self->{data_size},
- );
-
- return $newtag_loc;
-}
-
-sub read_from_loc {
+# XXX Add staleness here
+sub get_next_key {
my $self = shift;
- my ($obj, $subloc) = @_;
-
- my $fh = $obj->_fh;
-
- ##
- # Found match -- seek to offset and read signature
- ##
- my $signature;
- seek($fh, $subloc + $obj->_root->{file_offset}, SEEK_SET);
- read( $fh, $signature, SIG_SIZE);
-
- ##
- # If value is a hash or array, return new DBM::Deep object with correct offset
- ##
- if (($signature eq SIG_HASH) || ($signature eq SIG_ARRAY)) {
- my $new_obj = DBM::Deep->new({
- type => $signature,
- base_offset => $subloc,
- root => $obj->_root,
+ my ($obj, $prev_key) = @_;
+
+ # XXX Need to add logic about resetting the iterator if any key in the
+ # reference has changed
+ unless ( defined $prev_key ) {
+ $obj->{iterator} = $self->iterator_class->new({
+ base_offset => $obj->_base_offset,
+ engine => $self,
});
-
- if ($new_obj->_root->{autobless}) {
- ##
- # Skip over value and plain key to see if object needs
- # to be re-blessed
- ##
- seek($fh, $self->{data_size} + $self->{index_size}, SEEK_CUR);
-
- my $size;
- read( $fh, $size, $self->{data_size});
- $size = unpack($self->{data_pack}, $size);
- if ($size) { seek($fh, $size, SEEK_CUR); }
-
- my $bless_bit;
- read( $fh, $bless_bit, 1);
- if (ord($bless_bit)) {
- ##
- # Yes, object needs to be re-blessed
- ##
- my $class_name;
- read( $fh, $size, $self->{data_size});
- $size = unpack($self->{data_pack}, $size);
- if ($size) { read( $fh, $class_name, $size); }
- if ($class_name) { $new_obj = bless( $new_obj, $class_name ); }
- }
- }
-
- return $new_obj;
- }
- elsif ( $signature eq SIG_INTERNAL ) {
- my $size;
- read( $fh, $size, $self->{data_size});
- $size = unpack($self->{data_pack}, $size);
-
- if ( $size ) {
- my $new_loc;
- read( $fh, $new_loc, $size );
- $new_loc = unpack( $self->{long_pack}, $new_loc );
-
- return $self->read_from_loc( $obj, $new_loc );
- }
- else {
- return;
- }
- }
- ##
- # Otherwise return actual value
- ##
- elsif ($signature eq SIG_DATA) {
- my $size;
- read( $fh, $size, $self->{data_size});
- $size = unpack($self->{data_pack}, $size);
-
- my $value = '';
- if ($size) { read( $fh, $value, $size); }
- return $value;
}
- ##
- # Key exists, but content is null
- ##
- return;
+ return $obj->{iterator}->get_next_key( $obj );
}
-sub get_bucket_value {
- ##
- # Fetch single value given tag and MD5 digested key.
- ##
- my $self = shift;
- my ($obj, $tag, $md5) = @_;
+=head2 lock_exclusive()
- my ($subloc, $offset, $size) = $self->_find_in_buckets( $tag, $md5 );
- if ( $subloc ) {
- return $self->read_from_loc( $obj, $subloc );
- }
- return;
-}
+This takes an object that provides _base_offset(). It will guarantee that
+the storage has taken precautions to be safe for a write.
-sub delete_bucket {
- ##
- # Delete single key/value pair given tag and MD5 digested key.
- ##
- my $self = shift;
- my ($obj, $tag, $md5) = @_;
+This returns nothing.
- my ($subloc, $offset, $size) = $self->_find_in_buckets( $tag, $md5 );
-#XXX This needs _release_space()
- if ( $subloc ) {
- my $fh = $obj->_fh;
- seek($fh, $tag->{offset} + $offset + $obj->_root->{file_offset}, SEEK_SET);
- print( $fh substr($tag->{content}, $offset + $self->{bucket_size} ) );
- print( $fh chr(0) x $self->{bucket_size} );
+=cut
- return 1;
- }
- return;
+sub lock_exclusive {
+ my $self = shift;
+ my ($obj) = @_;
+ return $self->storage->lock_exclusive( $obj );
}
-sub bucket_exists {
- ##
- # Check existence of single key given tag and MD5 digested key.
- ##
- my $self = shift;
- my ($obj, $tag, $md5) = @_;
+=head2 lock_shared()
- my ($subloc, $offset, $size) = $self->_find_in_buckets( $tag, $md5 );
- return $subloc && 1;
-}
+This takes an object that provides _base_offset(). It will guarantee that
+the storage has taken precautions to be safe for a read.
+
+This returns nothing.
+
+=cut
-sub find_bucket_list {
- ##
- # Locate offset for bucket list, given digested key
- ##
+sub lock_shared {
my $self = shift;
- my ($obj, $md5, $args) = @_;
- $args = {} unless $args;
+ my ($obj) = @_;
+ return $self->storage->lock_shared( $obj );
+}
- ##
- # Locate offset for bucket list using digest index system
- ##
- my $tag = $self->load_tag($obj, $obj->_base_offset)
- or $obj->_throw_error( "INTERNAL ERROR - Cannot find tag" );
+=head2 unlock()
- my $ch = 0;
- while ($tag->{signature} ne SIG_BLIST) {
- my $num = ord substr($md5, $ch, 1);
+This takes an object that provides _base_offset(). It will guarantee that
+the storage has released the most recently-taken lock.
- my $ref_loc = $tag->{offset} + ($num * $self->{long_size});
- $tag = $self->index_lookup( $obj, $tag, $num );
+This returns nothing.
- if (!$tag) {
- return if !$args->{create};
+=cut
- my $loc = $self->_request_space(
- $obj, $self->tag_size( $self->{bucket_list_size} ),
- );
+sub unlock {
+ my $self = shift;
+ my ($obj) = @_;
- my $fh = $obj->_fh;
- seek($fh, $ref_loc + $obj->_root->{file_offset}, SEEK_SET);
- print( $fh pack($self->{long_pack}, $loc) );
+ my $rv = $self->storage->unlock( $obj );
- $tag = $self->write_tag(
- $obj, $loc, SIG_BLIST,
- chr(0)x$self->{bucket_list_size},
- );
+ $self->flush if $rv;
- $tag->{ref_loc} = $ref_loc;
- $tag->{ch} = $ch;
+ return $rv;
+}
- last;
- }
+=head1 INTERNAL METHODS
- $tag->{ch} = $ch++;
- $tag->{ref_loc} = $ref_loc;
- }
+The following methods are internal-use-only to DBM::Deep::Engine and its
+child classes.
- return $tag;
-}
+=cut
-sub index_lookup {
- ##
- # Given index tag, lookup single entry in index and return .
- ##
- my $self = shift;
- my ($obj, $tag, $index) = @_;
+=head2 flush()
- my $location = unpack(
- $self->{long_pack},
- substr(
- $tag->{content},
- $index * $self->{long_size},
- $self->{long_size},
- ),
- );
+This takes no arguments. It will do everything necessary to flush all things to
+disk. This is usually called during unlock() and setup().
- if (!$location) { return; }
+This returns nothing.
- return $self->load_tag( $obj, $location );
-}
+=cut
-sub traverse_index {
- ##
- # Scan index and recursively step into deeper levels, looking for next key.
- ##
+sub flush {
my $self = shift;
- my ($obj, $offset, $ch, $force_return_next) = @_;
-
- my $tag = $self->load_tag($obj, $offset );
-
- my $fh = $obj->_fh;
-
- if ($tag->{signature} ne SIG_BLIST) {
- my $content = $tag->{content};
- my $start = $obj->{return_next} ? 0 : ord(substr($obj->{prev_md5}, $ch, 1));
-
- for (my $idx = $start; $idx < (2**8); $idx++) {
- my $subloc = unpack(
- $self->{long_pack},
- substr(
- $content,
- $idx * $self->{long_size},
- $self->{long_size},
- ),
- );
-
- if ($subloc) {
- my $result = $self->traverse_index(
- $obj, $subloc, $ch + 1, $force_return_next,
- );
-
- if (defined($result)) { return $result; }
- }
- } # index loop
-
- $obj->{return_next} = 1;
- } # tag is an index
-
- else {
- my $keys = $tag->{content};
- if ($force_return_next) { $obj->{return_next} = 1; }
-
- ##
- # Iterate through buckets, looking for a key match
- ##
- for (my $i = 0; $i < $self->{max_buckets}; $i++) {
- my ($key, $subloc) = $self->_get_key_subloc( $keys, $i );
-
- # End of bucket list -- return to outer loop
- if (!$subloc) {
- $obj->{return_next} = 1;
- last;
- }
- # Located previous key -- return next one found
- elsif ($key eq $obj->{prev_md5}) {
- $obj->{return_next} = 1;
- next;
- }
- # Seek to bucket location and skip over signature
- elsif ($obj->{return_next}) {
- seek($fh, $subloc + $obj->_root->{file_offset}, SEEK_SET);
-
- # Skip over value to get to plain key
- my $sig;
- read( $fh, $sig, SIG_SIZE );
-
- my $size;
- read( $fh, $size, $self->{data_size});
- $size = unpack($self->{data_pack}, $size);
- if ($size) { seek($fh, $size, SEEK_CUR); }
-
- # Read in plain key and return as scalar
- my $plain_key;
- read( $fh, $size, $self->{data_size});
- $size = unpack($self->{data_pack}, $size);
- if ($size) { read( $fh, $plain_key, $size); }
-
- return $plain_key;
- }
- }
-
- $obj->{return_next} = 1;
- } # tag is a bucket list
+
+ # Why do we need to have the storage flush? Shouldn't autoflush take care of
+ # things? -RobK, 2008-06-26
+ $self->storage->flush;
return;
}
-sub get_next_key {
- ##
- # Locate next key, given digested previous one
- ##
- my $self = shift;
- my ($obj) = @_;
+=head2 load_sector( $loc )
- $obj->{prev_md5} = $_[1] ? $_[1] : undef;
- $obj->{return_next} = 0;
+This takes an id/location/offset and loads the sector based on the engine's
+defined sector type.
- ##
- # If the previous key was not specifed, start at the top and
- # return the first one found.
- ##
- if (!$obj->{prev_md5}) {
- $obj->{prev_md5} = chr(0) x $self->{hash_size};
- $obj->{return_next} = 1;
- }
+=cut
- return $self->traverse_index( $obj, $obj->_base_offset, 0 );
-}
+sub load_sector { $_[0]->sector_type->load( @_ ) }
-# Utilities
+=head2 cache / clear_cache
-sub _get_key_subloc {
- my $self = shift;
- my ($keys, $idx) = @_;
-
- my ($key, $subloc, $size) = unpack(
- "a$self->{hash_size} $self->{long_pack} $self->{long_pack}",
- substr(
- $keys,
- ($idx * $self->{bucket_size}),
- $self->{bucket_size},
- ),
- );
-
- return ($key, $subloc, $size);
-}
+This is the cache of loaded Reference sectors.
-sub _find_in_buckets {
- my $self = shift;
- my ($tag, $md5) = @_;
+=cut
- BUCKET:
- for ( my $i = 0; $i < $self->{max_buckets}; $i++ ) {
- my ($key, $subloc, $size) = $self->_get_key_subloc(
- $tag->{content}, $i,
- );
+sub cache { $_[0]{cache} ||= {} }
+sub clear_cache { %{$_[0]->cache} = () }
- return ($subloc, $i * $self->{bucket_size}, $size) unless $subloc;
+=head2 supports( $option )
- next BUCKET if $key ne $md5;
+This returns a boolean depending on if this instance of DBM::Dep supports
+that feature. C<$option> can be one of:
- return ($subloc, $i * $self->{bucket_size}, $size);
- }
+=over 4
- return;
-}
+=item * transactions
-#sub _print_at {
-# my $self = shift;
-# my ($obj, $spot, $data) = @_;
-#
-# my $fh = $obj->_fh;
-# seek( $fh, $spot, SEEK_SET );
-# print( $fh $data );
-#
-# return;
-#}
-
-sub _request_space {
- my $self = shift;
- my ($obj, $size) = @_;
+=back
- my $loc = $obj->_root->{end};
- $obj->_root->{end} += $size;
+=cut
- return $loc;
-}
+sub supports { die "supports must be implemented in a child class" }
-sub _release_space {
- my $self = shift;
- my ($obj, $size, $loc) = @_;
+=head2 ACCESSORS
- my $next_loc = 0;
+The following are readonly attributes.
- my $fh = $obj->_fh;
- seek( $fh, $loc + $obj->_root->{file_offset}, SEEK_SET );
- print( $fh SIG_FREE
- . pack($self->{long_pack}, $size )
- . pack($self->{long_pack}, $next_loc )
- );
+=over 4
- return;
-}
+=item * storage
-1;
-__END__
+=item * sector_type
-# This will be added in later, after more refactoring is done. This is an early
-# attempt at refactoring on the physical level instead of the virtual level.
-sub _read_at {
- my $self = shift;
- my ($obj, $spot, $amount, $unpack) = @_;
+=back
- my $fh = $obj->_fh;
- seek( $fh, $spot + $obj->_root->{file_offset}, SEEK_SET );
+=cut
- my $buffer;
- my $bytes_read = read( $fh, $buffer, $amount );
+sub storage { $_[0]{storage} }
- if ( $unpack ) {
- $buffer = unpack( $unpack, $buffer );
- }
+sub sector_type { die "sector_type must be implemented in a child class" }
- if ( wantarray ) {
- return ($buffer, $bytes_read);
- }
- else {
- return $buffer;
- }
-}
+1;
+__END__