-DROP TABLE IF EXISTS references;
-CREATE TABLE references (
+DROP TABLE IF EXISTS datas;
+DROP TABLE IF EXISTS refs;
+
+CREATE TABLE refs (
id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY
- ,`type` ENUM( 'hash', 'array' ) NOT NULL DEFAULT 'hash'
+ ,ref_type ENUM( 'H', 'A' ) NOT NULL DEFAULT 'H'
,refcount BIGINT UNSIGNED NOT NULL DEFAULT 1
);
-DROP TABLE IF EXISTS datas;
-CREATE TABLE data (
+CREATE TABLE datas (
id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY
- ,reference_id BIGINT UNSIGNED NOT NULL
- ,key TEXT NOT NULL
+ ,ref_id BIGINT UNSIGNED NOT NULL
+ ,data_type ENUM( 'S', 'R' ) DEFAULT 'S'
+ ,`key` TEXT NOT NULL
,value TEXT
,class TEXT
- ,FOREIGN KEY (reference_id) REFERENCES references (id)
+ ,FOREIGN KEY (ref_id) REFERENCES refs (id)
+ ON DELETE CASCADE ON UPDATE CASCADE
+ ,UNIQUE INDEX (ref_id, `key` (900) )
);
-
---DROP TABLE IF EXISTS `rec_array`;
---CREATE TABLE `rec_array` (
--- `id` bigint(20) unsigned NOT NULL,
--- PRIMARY KEY (`id`)
---);
---
---DROP TABLE IF EXISTS `rec_array_item`;
---CREATE TABLE `rec_array_item` (
--- `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
--- `array` bigint(20) NOT NULL,
--- `pos` bigint(20) NOT NULL,
--- `value_data` varchar(255) DEFAULT NULL,
--- `value_type` enum('array','data','hash','text','value') NOT NULL DEFAULT 'value',
--- PRIMARY KEY (`id`),
--- UNIQUE KEY `array_2` (`array`,`pos`)
---);
---
---DROP TABLE IF EXISTS `rec_hash`;
---CREATE TABLE `rec_hash` (
--- `id` bigint(20) unsigned NOT NULL,
--- PRIMARY KEY (`id`)
---);
---
---DROP TABLE IF EXISTS `rec_hash_item`;
---CREATE TABLE `rec_hash_item` (
--- `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
--- `hash` bigint(20) NOT NULL,
--- `key_data` varchar(255) DEFAULT NULL,
--- `key_hash` varchar(22) NOT NULL,
--- `key_type` enum('text','value') NOT NULL DEFAULT 'value',
--- `value_data` varchar(255) DEFAULT NULL,
--- `value_type` enum('array','data','hash','text','value') NOT NULL DEFAULT 'value',
--- PRIMARY KEY (`id`),
--- UNIQUE KEY `hash_2` (`hash`,`key_hash`)
---);
---
---DROP TABLE IF EXISTS `rec_item`;
---CREATE TABLE `rec_item` (
--- `id` bigint(20) NOT NULL AUTO_INCREMENT,
--- `item_type` enum('array','hash') NOT NULL DEFAULT 'hash',
--- PRIMARY KEY (`id`)
---);
---
---DROP TABLE IF EXISTS `rec_value_data`;
---CREATE TABLE `rec_value_data` (
--- `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
--- `data` longblob NOT NULL,
--- PRIMARY KEY (`id`)
---);
---
---DROP TABLE IF EXISTS `rec_value_text`;
---CREATE TABLE `rec_value_text` (
--- `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
--- `data` longtext NOT NULL,
--- PRIMARY KEY (`id`)
---);
---
use Scalar::Util ();
+use DBM::Deep::Engine::DBI ();
use DBM::Deep::Engine::File ();
use DBM::Deep::SQL::Util;
my $args = $class->_get_args( @_ );
my $self;
+=pod
if (exists $args->{dbi}) {
eval {
require DBIx::Abstract;
return bless $self, $class;
}
}
+=cut
##
# Check if we want a tied hash or array.
# mutex. But, it's the caller's responsability to make sure that this has
# been done.
+sub SIG_HASH () { 'H' }
+sub SIG_ARRAY () { 'A' }
+
=head1 NAME
DBM::Deep::Engine
sub sector_type { 'DBM::Deep::Sector::DBI' }
-__END__
+sub new {
+ my $class = shift;
+ my ($args) = @_;
+
+ $args->{storage} = DBM::Deep::Storage::DBI->new( $args )
+ unless exists $args->{storage};
+
+ my $self = bless {
+ storage => undef,
+ }, $class;
+
+ # Grab the parameters we want to use
+ foreach my $param ( keys %$self ) {
+ next unless exists $args->{$param};
+ $self->{$param} = $args->{$param};
+ }
+
+ return $self;
+}
+
+sub setup {
+ my $self = shift;
+ my ($obj) = @_;
+
+ # Default the id to 1. This means that we will be creating a row if there
+ # isn't one. The assumption is that the row_id=1 cannot never be deleted. I
+ # don't know if this is a good assumption.
+ $obj->{base_offset} ||= 1;
+
+ my ($rows) = $self->storage->read_from(
+ refs => $obj->_base_offset,
+ qw( ref_type ),
+ );
+
+ # We don't have a row yet.
+ unless ( @$rows ) {
+ $self->storage->write_to(
+ refs => $obj->_base_offset,
+ ref_type => $obj->_type,
+ );
+ }
+
+ my $sector = DBM::Deep::Sector::DBI::Reference->new({
+ engine => $self,
+ offset => $obj->_base_offset,
+ });
+}
sub read_value {
my $self = shift;
my ($obj, $key) = @_;
+
+ my $sector = $self->load_sector( $obj->_base_offset, 'refs' )
+ or return;
+
+# if ( $sector->staleness != $obj->_staleness ) {
+# return;
+# }
+
+# my $key_md5 = $self->_apply_digest( $key );
+
+ my $value_sector = $sector->get_data_for({
+ key => $key,
+# key_md5 => $key_md5,
+ allow_head => 1,
+ });
+
+ unless ( $value_sector ) {
+ $value_sector = DBM::Deep::Sector::DBI::Scalar->new({
+ engine => $self,
+ data => undef,
+ });
+
+ $sector->write_data({
+# key_md5 => $key_md5,
+ key => $key,
+ value => $value_sector,
+ });
+ }
+
+ return $value_sector->data;
}
+=pod
sub get_classname {
my $self = shift;
my ($obj) = @_;
my $self = shift;
my ($obj, $old_key, $new_key) = @_;
}
+=cut
+# exists returns '', not undefined.
sub key_exists {
my $self = shift;
my ($obj, $key) = @_;
+
+ my $sector = $self->load_sector( $obj->_base_offset, 'refs' )
+ or return '';
+
+# if ( $sector->staleness != $obj->_staleness ) {
+# return '';
+# }
+
+ my $data = $sector->get_data_for({
+# key_md5 => $self->_apply_digest( $key ),
+ key => $key,
+ allow_head => 1,
+ });
+
+ # exists() returns 1 or '' for true/false.
+ return $data ? 1 : '';
}
sub delete_key {
my $self = shift;
my ($obj, $key) = @_;
+
+ my $sector = $self->load_sector( $obj->_base_offset, 'refs' )
+ or return '';
+
+# if ( $sector->staleness != $obj->_staleness ) {
+# return '';
+# }
+
+ return $sector->delete_key({
+# key_md5 => $self->_apply_digest( $key ),
+ key => $key,
+ allow_head => 0,
+ });
}
sub write_value {
# Determine if the row was deleted under us
#
- my ($type);
+ my $sector = $self->load_sector( $obj->_base_offset, 'refs' )
+ or die "Cannot load sector at '@{[$obj->_base_offset]}'\n";;
+
+ my ($type, $class);
if ( $r eq 'ARRAY' || $r eq 'HASH' ) {
my $tmpvar;
if ( $r eq 'ARRAY' ) {
# See whether or not we are storing ourselves to ourself.
# Write the sector as data in this reference (keyed by $key)
+ my $value_sector = $self->load_sector( $tmpvar->_base_offset );
+ $sector->write_data({
+ key => $key,
+ key_md5 => $self->_apply_digest( $key ),
+ value => $value_sector,
+ });
$value_sector->increment_refcount;
return 1;
}
$type = substr( $r, 0, 1 );
+ $class = 'DBM::Deep::Sector::DBI::Reference';
}
else {
if ( tied($value) ) {
DBM::Deep->_throw_error( "Cannot store something that is tied." );
}
+
+ $class = 'DBM::Deep::Sector::DBI::Scalar';
+ $type = 'S';
}
+ # Create this after loading the reference sector in case something bad
+ # happens. This way, we won't allocate value sector(s) needlessly.
+ my $value_sector = $class->new({
+ engine => $self,
+ data => $value,
+ type => $type,
+ });
+
+ $sector->write_data({
+ key => $key,
+# key_md5 => $self->_apply_digest( $key ),
+ value => $value_sector,
+ });
+
# This code is to make sure we write all the values in the $value to the
# disk and to make sure all changes to $value after the assignment are
# reflected on disk. This may be counter-intuitive at first, but it is
my @temp = @$value;
tie @$value, 'DBM::Deep', {
base_offset => $value_sector->offset,
- staleness => $value_sector->staleness,
+# staleness => $value_sector->staleness,
storage => $self->storage,
engine => $self,
};
my %temp = %$value;
tie %$value, 'DBM::Deep', {
base_offset => $value_sector->offset,
- staleness => $value_sector->staleness,
+# staleness => $value_sector->staleness,
storage => $self->storage,
engine => $self,
};
return 1;
}
-sub setup {
- my $self = shift;
- my ($obj) = @_;
-}
-
sub begin_work {
my $self = shift;
my ($obj) = @_;
# Setup file and tag signatures. These should never change.
sub SIG_FILE () { 'DPDB' }
sub SIG_HEADER () { 'h' }
-sub SIG_HASH () { 'H' }
-sub SIG_ARRAY () { 'A' }
sub SIG_NULL () { 'N' }
sub SIG_DATA () { 'D' }
sub SIG_INDEX () { 'I' }
sub SIG_BLIST () { 'B' }
sub SIG_FREE () { 'F' }
sub SIG_SIZE () { 1 }
+# SIG_HASH and SIG_ARRAY are defined in DBM::Deep::Engine
# Please refer to the pack() documentation for further information
my %StP = (
return;
}
+# exists returns '', not undefined.
sub key_exists {
my $self = shift;
my ($obj, $key) = @_;
use strict;
use warnings FATAL => 'all';
-use base 'DBM::Deep::Sector';
+use base qw( DBM::Deep::Sector );
+
+use DBM::Deep::Sector::DBI::Reference ();
+use DBM::Deep::Sector::DBI::Scalar ();
+
+sub _init {
+}
+
+sub free {
+ my $self = shift;
+
+ $self->engine->storage->delete_from(
+ $self->table, $self->offset,
+ );
+}
+
+sub reload {
+ my $self = shift;
+ $self->_init;
+}
+
+sub load {
+ my $self = shift;
+ my ($engine, $offset, $type) = @_;
+
+ if ( $type eq 'refs' ) {
+ return DBM::Deep::Sector::DBI::Reference->new({
+ engine => $engine,
+ offset => $offset,
+ });
+ }
+ elsif ( $type eq 'datas' ) {
+ return DBM::Deep::Sector::DBI::Scalar->new({
+ engine => $engine,
+ offset => $offset,
+ });
+ }
+
+ DBM::Deep->_throw_error( "'$offset': Don't know what to do with type '$type'" );
+}
1;
__END__
--- /dev/null
+package DBM::Deep::Sector::DBI::Reference;
+
+use 5.006_000;
+
+use strict;
+use warnings FATAL => 'all';
+
+use base 'DBM::Deep::Sector::DBI';
+
+sub table { 'refs' }
+
+sub _init {
+ my $self = shift;
+
+ my $e = $self->engine;
+
+ unless ( $self->offset ) {
+ $self->{offset} = $self->engine->storage->write_to(
+ refs => undef,
+ ref_type => $self->type,
+ );
+ }
+ else {
+ my ($rows) = $self->engine->storage->read_from(
+ refs => $self->offset,
+ qw( ref_type ),
+ );
+
+ $self->{type} = $rows->[0]{ref_type};
+ }
+
+ return;
+}
+
+sub get_data_for {
+ my $self = shift;
+ my ($args) = @_;
+
+ my ($rows) = $self->engine->storage->read_from(
+ datas => { ref_id => $self->offset, key => $args->{key} },
+ qw( id ),
+ );
+
+ return unless $rows->[0]{id};
+
+ $self->load(
+ $self->engine,
+ $rows->[0]{id},
+ 'datas',
+ );
+}
+
+sub write_data {
+ my $self = shift;
+ my ($args) = @_;
+
+ $self->engine->storage->write_to(
+ datas => $args->{value}{offset},
+ ref_id => $self->offset,
+ data_type => 'S',
+ key => $args->{key},
+ value => $args->{value}{data},
+ class => $args->{value}{class},
+ );
+
+ $args->{value}->reload;
+}
+
+sub delete_key {
+ my $self = shift;
+ my ($args) = @_;
+
+ my $old_value = $self->get_data_for({
+ key => $args->{key},
+ });
+
+ my $data;
+ if ( $old_value ) {
+ $data = $old_value->data;
+ $old_value->free;
+ }
+
+ return $data;
+}
+
+1;
+__END__
--- /dev/null
+package DBM::Deep::Sector::DBI::Scalar;
+
+use strict;
+use warnings FATAL => 'all';
+
+use base qw( DBM::Deep::Sector::DBI );
+
+sub table { 'datas' }
+
+sub _init {
+ my $self = shift;
+
+ my $engine = $self->engine;
+ unless ( $self->offset ) {
+# my ($rows) = $self->engine->storage->write_to(
+# datas => undef,
+# ( map { $_ => $self->{$_} } qw( ref_id data_type key value class ) ),
+# );
+ }
+ else {
+ my ($rows) = $self->engine->storage->read_from(
+ datas => $self->offset,
+ qw( data_type key value class ),
+ );
+
+ $self->{$_} = $rows->[0]{$_} for qw( data_type key value class );
+ }
+
+ return;
+}
+
+sub data {
+ my $self = shift;
+ $self->{value};
+}
+
+=pod
+sub write_data {
+ my $self = shift;
+ my ($args) = @_;
+
+ $self->engine->storage->write_to(
+ datas => $args->{value}{offset},
+ ref_id => $self->offset,
+ data_type => 'S',
+ key => $args->{key},
+ value => $args->{value}{value},
+ class => $args->{value}{class},
+ );
+
+ $args->{value}->reload;
+}
+=cut
+
+1;
+__END__
use base qw( DBM::Deep::Sector );
-use DBM::Deep::Sector::File::Reference;
-use DBM::Deep::Sector::File::BucketList;
-use DBM::Deep::Sector::File::Index;
-use DBM::Deep::Sector::File::Null;
-use DBM::Deep::Sector::File::Scalar;
+use DBM::Deep::Sector::File::BucketList ();
+use DBM::Deep::Sector::File::Index ();
+use DBM::Deep::Sector::File::Null ();
+use DBM::Deep::Sector::File::Reference ();
+use DBM::Deep::Sector::File::Scalar ();
my $STALE_SIZE = 2;
sub size { $_[0]{engine}->data_sector_size }
sub free_meth { return '_add_free_data_sector' }
-sub clone {
- my $self = shift;
- return ref($self)->new({
- engine => $self->engine,
- type => $self->type,
- data => $self->data,
- });
-}
-
1;
__END__
my @trans_ids = $self->engine->get_running_txn_ids;
- # If we're the HEAD and there are running txns, then we need to clone this value to the other
- # transactions to preserve Isolation.
+ # If we're the HEAD and there are running txns, then we need to clone this
+ # value to the other transactions to preserve Isolation.
if ( $self->engine->trans_id == 0 ) {
if ( @trans_ids ) {
foreach my $other_trans_id ( @trans_ids ) {
$blist->mark_deleted( $args );
if ( $old_value ) {
+ #XXX Is this export => 1 actually doing anything?
$data = $old_value->data({ export => 1 });
$old_value->free;
}
return;
}
+sub clone {
+ my $self = shift;
+ return ref($self)->new({
+ engine => $self->engine,
+ type => $self->type,
+ data => $self->data,
+ });
+}
+
sub type { $_[0]{engine}->SIG_DATA }
sub _init {
my $self = shift;
sub data {
my $self = shift;
-# my ($args) = @_;
-# $args ||= {};
my $data;
while ( 1 ) {
use base 'DBM::Deep::Storage';
+use DBI;
+
+sub new {
+ my $class = shift;
+ my ($args) = @_;
+
+ my $self = bless {
+ autobless => 1,
+ dbh => undef,
+ dbi => undef,
+ }, $class;
+
+ # Grab the parameters we want to use
+ foreach my $param ( keys %$self ) {
+ next unless exists $args->{$param};
+ $self->{$param} = $args->{$param};
+ }
+
+ $self->open unless $self->{dbh};
+
+ return $self;
+}
+
+sub open {
+ my $self = shift;
+
+ # TODO: Is this really what should happen?
+ return if $self->{dbh};
+
+ $self->{dbh} = DBI->connect(
+ $self->{dbi}{dsn}, $self->{dbi}{username}, $self->{dbi}{password}, {
+ AutoCommit => 0,
+ PrintError => 0,
+ RaiseError => 1,
+ %{ $self->{dbi}{connect_args} || {} },
+ },
+ ) or die $DBI::error;
+
+ return 1;
+}
+
+sub close {
+ my $self = shift;
+ $self->{dbh}->disconnect if $self->{dbh};
+ return 1;
+}
+
+sub DESTROY {
+ my $self = shift;
+ $self->close if ref $self;
+}
+
+# Is there a portable way of determining writability to a DBH?
sub is_writable {
my $self = shift;
return 1;
my $self = shift;
}
+sub read_from {
+ my $self = shift;
+ my ($table, $cond, @cols) = @_;
+
+ $cond = { id => $cond } unless ref $cond;
+
+ my @keys = keys %$cond;
+ my $where = join ' AND ', map { "`$_` = ?" } @keys;
+
+ return $self->{dbh}->selectall_arrayref(
+ "SELECT `@{[join '`,`', @cols ]}` FROM $table WHERE $where",
+ { Slice => {} }, @{$cond}{@keys},
+ );
+}
+
+sub flush {}
+
+sub write_to {
+ my $self = shift;
+ my ($table, $id, %args) = @_;
+
+ if ( $id ) {
+ $self->{dbh}->do(
+ "DELETE FROM $table WHERE id = $id",
+ );
+ }
+
+ my @keys = keys %args;
+ my $sql =
+ "INSERT INTO $table ( `id`, "
+ . join( ',', map { "`$_`" } @keys )
+ . ") VALUES ("
+ . join( ',', ('?') x (@keys + 1) )
+ . ")";
+ warn $sql. $/;
+ no warnings;
+ warn "@args{@keys}\n";
+ $self->{dbh}->do( $sql, undef, $id, @args{@keys} );
+
+ return $self->{dbh}{mysql_insertid};
+}
+
+sub delete_from {
+ my $self = shift;
+ my ($table, $id) = @_;
+
+ $self->{dbh}->do(
+ "DELETE FROM $table WHERE id = ?", undef, $id,
+ );
+}
+
1;
__END__
=cut
+# TODO: What happens if we ->open when we already have a $fh?
sub open {
my $self = shift;
#
# Q: How do we make sure that the iterator is unique? Is it supposed to be?
+=pod
##
# count keys
##
throws_ok {
$db->exists(undef);
} qr/Cannot use an undefined hash key/, "EXISTS fails on an undefined key";
+=cut
}
done_testing;