--- /dev/null
+# These are the calls into ::Engine
+::Deep:
+ _init:
+ setup_fh($self)
+ optimize:
+ setup_fh($self)
+ STORE:
+ old:
+ apply_digest($key)
+ find_blist( $self->_base_offset, $md5, { create => 1 } )
+ add_bucket( $tag, $md5, $key, $value, undef, $orig_key )
+ new:
+ write_value( $key, $value );
+ FETCH:
+ old:
+ apply_digest($key)
+ find_blist( $self->_base_offset, $md5 )
+ get_bucket_value( $tag, $md5, $orig_key )
+ new:
+ read_value( $key )
+ DELETE:
+ old:
+ apply_digest($key)
+ find_blist( $self->_base_offset, $md5 )
+ get_bucket_value( $tag, $md5, $orig_key )
+ delete_bucket( $tag, $md5, $orig_key )
+ new:
+ delete_key( $key )
+ EXiSTS:
+ old:
+ apply_digest($key)
+ find_blist( $self->_base_offset, $md5 )
+ bucket_exists( $tag, $md5 )
+ new:
+ exists_key( $key )
+ CLEAR:
+ old:
+ apply_digest($key)
+ find_blist( $self->_base_offset, $md5 )
+ delete_bucket( $tag, $md5, $key )
+ new:
+ delete_key( $key )
+::Array:
+::Hash:
+ FIRSTKEY:
+ old:
+ get_next_key($self)
+ new:
+ get_next_key()
+ NEXTKEY:
+ old:
+ apply_digest($prev_key)
+ get_next_key($self, $prev_md5)
+ new:
+ get_next_key($prev_key)
+::File:
$self->_engine->setup_fh( $self );
- $self->{fileobj}->set_db( $self );
+ $self->_fileobj->set_db( $self );
return $self;
}
my $self = shift->_get_self;
return DBM::Deep->new(
- type => $self->_type,
+ type => $self->_type,
base_offset => $self->_base_offset,
- fileobj => $self->_fileobj,
+ fileobj => $self->_fileobj,
+ parent => $self->{parent},
+ parent_key => $self->{parent_key},
);
}
sub begin_work {
my $self = shift->_get_self;
- $self->_fileobj->begin_transaction;
- return 1;
+ return $self->_fileobj->begin_transaction;
}
sub rollback {
my $self = shift->_get_self;
- $self->_fileobj->end_transaction;
- return 1;
+ return $self->_fileobj->end_transaction;
}
sub commit {
my $self = shift->_get_self;
- $self->_fileobj->commit_transaction;
- return 1;
+ return $self->_fileobj->commit_transaction;
}
##
##
my $self = shift->_get_self;
my ($key, $value, $orig_key) = @_;
+ $orig_key = $key unless defined $orig_key;
if ( !FileHandle::Fmode::is_W( $self->_fh ) ) {
$self->_throw_error( 'Cannot write to a readonly filehandle' );
}
#XXX The second condition needs to disappear
- if ( defined $orig_key && !( $self->_type eq TYPE_ARRAY && $orig_key eq 'length') ) {
+ if ( !( $self->_type eq TYPE_ARRAY && $orig_key eq 'length') ) {
my $rhs;
my $r = Scalar::Util::reftype( $value ) || '';
##
$self->lock( LOCK_EX );
- my $md5 = $self->_engine->{digest}->($key);
-
- my $tag = $self->_engine->find_blist( $self->_base_offset, $md5, { create => 1 } );
-
# User may be storing a hash, in which case we do not want it run
# through the filtering system
if ( !ref($value) && $self->_fileobj->{filter_store_value} ) {
##
# Add key/value to bucket list
##
- $self->_engine->add_bucket( $tag, $md5, $key, $value, undef, $orig_key );
+# my $md5 = $self->_engine->apply_digest($key);
+# my $tag = $self->_engine->find_blist( $self->_base_offset, $md5, { create => 1 } );
+# $self->_engine->add_bucket( $tag, $md5, $key, $value, undef, $orig_key );
+ $self->_engine->write_value( $self->_base_offset, $key, $value, $orig_key );
$self->unlock();
##
my $self = shift->_get_self;
my ($key, $orig_key) = @_;
+ $orig_key = $key unless @_ > 1;
- my $md5 = $self->_engine->{digest}->($key);
+ my $md5 = $self->_engine->apply_digest($key);
##
# Request shared lock for reading
##
my $self = shift->_get_self;
my ($key, $orig_key) = @_;
+ $orig_key = $key unless defined $orig_key;
if ( !FileHandle::Fmode::is_W( $self->_fh ) ) {
$self->_throw_error( 'Cannot write to a readonly filehandle' );
##
$self->lock( LOCK_EX );
- my $md5 = $self->_engine->{digest}->($key);
+ my $md5 = $self->_engine->apply_digest($key);
my $tag = $self->_engine->find_blist( $self->_base_offset, $md5 );
if (!$tag) {
my $self = shift->_get_self;
my ($key) = @_;
- my $md5 = $self->_engine->{digest}->($key);
+ my $md5 = $self->_engine->apply_digest($key);
##
# Request shared lock for reading
my $key = $self->first_key;
while ( $key ) {
my $next_key = $self->next_key( $key );
- my $md5 = $self->_engine->{digest}->($key);
+ my $md5 = $self->_engine->apply_digest($key);
my $tag = $self->_engine->find_blist( $self->_base_offset, $md5 );
$self->_engine->delete_bucket( $tag, $md5, $key );
$key = $next_key;
}
else {
my $size = $self->FETCHSIZE;
- for my $key ( map { pack ( $self->_engine->{long_pack}, $_ ) } 0 .. $size - 1 ) {
- my $md5 = $self->_engine->{digest}->($key);
+ for my $key ( 0 .. $size - 1 ) {
+ my $md5 = $self->_engine->apply_digest($key);
my $tag = $self->_engine->find_blist( $self->_base_offset, $md5 );
$self->_engine->delete_bucket( $tag, $md5, $key );
}
use strict;
use warnings;
-our $VERSION = '0.99_01';
+our $VERSION = '0.99_03';
# This is to allow DBM::Deep::Array to handle negative indices on
# its own. Otherwise, Perl would intercept the call to negative
$self->lock( $self->LOCK_SH );
-# my $orig_key = $key eq 'length' ? undef : $key;
- my $orig_key = $key;
+ my $orig_key;
if ( $key =~ /^-?\d+$/ ) {
if ( $key < 0 ) {
$key += $self->FETCHSIZE;
return;
}
}
-
- $key = pack($self->_engine->{long_pack}, $key);
+ $orig_key = $key;
+ }
+ else {
+ $orig_key = undef;
}
my $rv = $self->SUPER::FETCH( $key, $orig_key );
$self->lock( $self->LOCK_EX );
-# my $orig = $key eq 'length' ? undef : $key;
- my $orig_key = $key;
-
my $size;
- my $numeric_idx;
+ my $idx_is_numeric;
if ( $key =~ /^\-?\d+$/ ) {
- $numeric_idx = 1;
+ $idx_is_numeric = 1;
if ( $key < 0 ) {
$size = $self->FETCHSIZE;
- $key += $size;
- if ( $key < 0 ) {
- die( "Modification of non-creatable array value attempted, subscript $orig_key" );
+ if ( $key + $size < 0 ) {
+ die( "Modification of non-creatable array value attempted, subscript $key" );
}
+ $key += $size
}
-
- $key = pack($self->_engine->{long_pack}, $key);
}
- my $rv = $self->SUPER::STORE( $key, $value, $orig_key );
+ my $rv = $self->SUPER::STORE( $key, $value, ($key eq 'length' ? undef : $key) );
- if ( $numeric_idx ) {
+ if ( $idx_is_numeric ) {
$size = $self->FETCHSIZE unless defined $size;
- if ( $orig_key >= $size ) {
- $self->STORESIZE( $orig_key + 1 );
+ if ( $key >= $size ) {
+ $self->STORESIZE( $key + 1 );
}
}
return;
}
}
-
- $key = pack($self->_engine->{long_pack}, $key);
}
my $rv = $self->SUPER::EXISTS( $key );
my $self = shift->_get_self;
my ($key) = @_;
- my $unpacked_key = $key;
- my $orig = $key eq 'length' ? undef : $key;
-
$self->lock( $self->LOCK_EX );
my $size = $self->FETCHSIZE;
return;
}
}
-
- $key = pack($self->_engine->{long_pack}, $key);
}
- my $rv = $self->SUPER::DELETE( $key, $orig );
+ my $rv = $self->SUPER::DELETE( $key );
- if ($rv && $unpacked_key == $size - 1) {
- $self->STORESIZE( $unpacked_key );
+ if ($rv && $key == $size - 1) {
+ $self->STORESIZE( $key, ($key eq 'length' ? undef : $key) );
}
$self->unlock;
use strict;
use warnings;
-our $VERSION = q(0.99_01);
+our $VERSION = q(0.99_03);
use Fcntl qw( :DEFAULT :flock );
use Scalar::Util ();
# - calculate_sizes()
# - _get_key_subloc()
# - add_bucket() - where the buckets are printed
+#
+# * Every method in here assumes that the _fileobj has been appropriately
+# safeguarded. This can be anything from flock() to some sort of manual
+# mutex. But, it's the caller's responsability to make sure that this has
+# been done.
##
# Setup file and tag signatures. These should never change.
sub SIG_KEYS () { 'K' }
sub SIG_SIZE () { 1 }
+################################################################################
+#
+# This is new code. It is a complete rewrite of the engine based on a new API
+#
+################################################################################
+
+sub write_value {
+ my $self = shift;
+ my ($offset, $key, $value, $orig_key) = @_;
+
+ my $dig_key = $self->apply_digest( $key );
+ my $tag = $self->find_blist( $offset, $dig_key, { create => 1 } );
+ return $self->add_bucket( $tag, $dig_key, $key, $value, undef, $orig_key );
+}
+
+sub read_value {
+ my $self = shift;
+ my ($offset, $key) = @_;
+
+ my $dig_key = $self->apply_digest( $key );
+ my $tag = $self->find_blist( $offset, $dig_key );
+ return $self->get_bucket_value( $tag, $dig_key, $key );
+}
+
+sub delete_key {
+ my $self = shift;
+ my ($offset, $key) = @_;
+
+ my $dig_key = $self->apply_digest( $key );
+ my $tag = $self->find_blist( $offset, $dig_key );
+ return $self->delete_bucket( $tag, $dig_key, $key );
+}
+
+sub key_exists {
+ my $self = shift;
+ my ($offset, $key) = @_;
+
+ my $dig_key = $self->apply_digest( $key );
+ my $tag = $self->find_blist( $offset, $dig_key );
+ return $self->bucket_exists( $tag, $dig_key, $key );
+}
+
+sub XXXget_next_key {
+ my $self = shift;
+ my ($offset, $prev_key) = @_;
+
+# my $dig_key = $self->apply_digest( $key );
+}
+
+################################################################################
+#
+# Below here is the old code. It will be folded into the code above as it can.
+#
+################################################################################
+
sub new {
my $class = shift;
my ($args) = @_;
data_pack => 'N',
digest => \&Digest::MD5::md5,
- hash_size => 16,
+ hash_size => 16, # In bytes
##
- # Maximum number of buckets per blist before another level of indexing is
+ # Number of buckets per blist before another level of indexing is
# done. Increase this value for slightly greater speed, but larger database
# files. DO NOT decrease this value below 16, due to risk of recursive
# reindex overrun.
sub _fileobj { return $_[0]{fileobj} }
+sub apply_digest {
+ my $self = shift;
+ return $self->{digest}->(@_);
+}
+
sub calculate_sizes {
my $self = shift;
return {
signature => $sig,
- #XXX Is this even used?
- size => $size,
+ size => $size, #XXX Is this even used?
offset => $offset + SIG_SIZE + $self->{data_size},
content => $fileobj->read_at( undef, $size ),
};
pack($self->{long_pack}, $location2 ),
pack( 'C C', $trans_id, 0 ),
);
- $self->write_value( $location2, $plain_key, $old_value, $orig_key );
+ $self->_write_value( $location2, $plain_key, $old_value, $orig_key );
}
}
}
}
}
- $self->write_value( $location, $plain_key, $value, $orig_key );
+ $self->_write_value( $location, $plain_key, $value, $orig_key );
return 1;
}
-sub write_value {
+sub _write_value {
my $self = shift;
my ($location, $key, $value, $orig_key) = @_;
# If value is a hash or array, return new DBM::Deep object with correct offset
##
if (($signature eq SIG_HASH) || ($signature eq SIG_ARRAY)) {
+ #XXX This needs to be a singleton
my $new_obj = DBM::Deep->new({
type => $signature,
base_offset => $subloc,
pack($self->{long_pack}, $location2 ),
pack( 'C C', $trans_id, 0 ),
);
- $self->write_value( $location2, $orig_key, $value, $orig_key );
+ $self->_write_value( $location2, $orig_key, $value, $orig_key );
}
}
use strict;
use warnings;
-our $VERSION = q(0.99_01);
+our $VERSION = q(0.99_03);
use Fcntl qw( :DEFAULT :flock :seek );
use strict;
use warnings;
-our $VERSION = q(0.99_01);
+our $VERSION = q(0.99_03);
use base 'DBM::Deep';
? $self->_fileobj->{filter_store_key}->($_[0])
: $_[0];
- my $prev_md5 = $self->_engine->{digest}->($prev_key);
+ my $prev_md5 = $self->_engine->apply_digest($prev_key);
##
# Request shared lock for reading