requires 'JSON::Any' => 1.00;
requires 'Scope::Guard' => 0.03;
requires 'Path::Class' => 0;
+requires 'List::Util' => 1.19;
# Perl 5.8.0 doesn't have utf8::is_utf8()
requires 'Encode' => 0 if ($] <= 5.008000);
delete $self->{_dirty_columns};
return unless $self->in_storage; # Don't reload if we aren't real!
- my $reload = $self->result_source->resultset->find(
- map { $self->$_ } $self->primary_columns
- );
+ my $reload = $self->result_source->schema->storage->reload_row($self);
+
unless ($reload) { # If we got deleted in the mean-time
$self->in_storage(0);
return $self;
=over 4
-=item Arguments: $storage_type
+=item Arguments: $storage_type|{$storage_type, \%args}
-=item Return Value: $storage_type
+=item Return Value: $storage_type|{$storage_type, \%args}
=back
dealing with MSSQL via L<DBD::Sybase>, in which case you'd set it to
C<::DBI::Sybase::MSSQL>.
+If your storage type requires instantiation arguments, those are defined as a
+second argument in the form of a hashref and the entire value needs to be
+wrapped into an arrayref or a hashref. We support both types of refs here in
+order to play nice with your Config::[class] or your choice.
+
+See L<DBIx::Class::Storage::DBI::Replicated> for an example of this.
+
=head2 connection
=over 4
sub connection {
my ($self, @info) = @_;
return $self if !@info && $self->storage;
- my $storage_class = $self->storage_type;
+
+ my ($storage_class, $args) = ref $self->storage_type ?
+ ($self->_normalize_storage_type($self->storage_type),{}) : ($self->storage_type, {});
+
$storage_class = 'DBIx::Class::Storage'.$storage_class
if $storage_class =~ m/^::/;
eval "require ${storage_class};";
$self->throw_exception(
"No arguments to load_classes and couldn't load ${storage_class} ($@)"
) if $@;
- my $storage = $storage_class->new($self);
+ my $storage = $storage_class->new($self=>$args);
$storage->connect_info(\@info);
$self->storage($storage);
return $self;
}
+sub _normalize_storage_type {
+ my ($self, $storage_type) = @_;
+ if(ref $storage_type eq 'ARRAY') {
+ return @$storage_type;
+ } elsif(ref $storage_type eq 'HASH') {
+ return %$storage_type;
+ } else {
+ $self->throw_exception('Unsupported REFTYPE given: '. ref $storage_type);
+ }
+}
+
=head2 connect
=over 4
sub select_single { die "Virtual method!" }
+=head2 reload_row ($row)
+
+given a L<DBIx::Class::Row> object, loads and returns the matching version from
+storage. Does not effect the existing row object.
+
+=cut
+
+sub reload_row { die "Virtual method!" }
+
=head2 columns_info_for
Returns metadata for the given source's columns. This
if ( $self->debug ) {
@bind = $self->_fix_bind_params(@bind);
+
$self->debugobj->query_start( $sql, @bind );
}
}
return @row;
}
+sub reload_row {
+ my ($self, $row) = @_;
+
+ my $reload = $row->result_source->resultset->find(
+ map { $row->$_ } $row->primary_columns
+ );
+
+ return $reload;
+}
+
=head2 sth
=over 4
}
}
+=head2 is_replicating
+
+A boolean that reports if a particular L<DBIx::Class::Storage::DBI> is set to
+replicate from a master database. Default is undef, which is the result
+returned by databases that don't support replication.
+
+=cut
+
+sub is_replicating {
+ return;
+
+}
+
+=head2 lag_behind_master
+
+Returns a number that represents a certain amount of lag behind a master db
+when a given storage is replicating. The number is database dependent, but
+starts at zero and increases with the amount of lag. Default in undef
+
+=cut
+
+sub lag_behind_master {
+ return;
+}
+
sub DESTROY {
my $self = shift;
return if !$self->_dbh;
package DBIx::Class::Storage::DBI::Replicated;
-use strict;
-use warnings;
-
+use Moose;
use DBIx::Class::Storage::DBI;
-use DBD::Multi;
-
-use base qw/Class::Accessor::Fast/;
+use DBIx::Class::Storage::DBI::Replicated::Pool;
+use DBIx::Class::Storage::DBI::Replicated::Balancer;
+use Scalar::Util qw(blessed);
-__PACKAGE__->mk_accessors( qw/read_source write_source/ );
+extends 'DBIx::Class::Storage::DBI', 'Moose::Object';
=head1 NAME
=head1 SYNOPSIS
The Following example shows how to change an existing $schema to a replicated
-storage type and update it's connection information to contain a master DSN and
-an array of slaves.
+storage type, add some replicated (readonly) databases, and perform reporting
+tasks.
## Change storage_type in your schema class
- $schema->storage_type( '::DBI::Replicated' );
+ $schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] );
- ## Set your connection.
- $schema->connect(
- $dsn, $user, $password, {
- AutoCommit => 1,
- ## Other standard DBI connection or DBD custom attributes added as
- ## usual. Additionally, we have two custom attributes for defining
- ## slave information and controlling how the underlying DBD::Multi
- slaves_connect_info => [
- ## Define each slave like a 'normal' DBI connection, but you add
- ## in a DBD::Multi custom attribute to define how the slave is
- ## prioritized. Please see DBD::Multi for more.
- [$slave1dsn, $user, $password, {%slave1opts, priority=>10}],
- [$slave2dsn, $user, $password, {%slave2opts, priority=>10}],
- [$slave3dsn, $user, $password, {%slave3opts, priority=>20}],
- ## add in a preexisting database handle
- [$dbh, '','', {priority=>30}],
- ## DBD::Multi will call this coderef for connects
- [sub { DBI->connect(< DSN info >) }, '', '', {priority=>40}],
- ## If the last item is hashref, we use that for DBD::Multi's
- ## configuration information. Again, see DBD::Multi for more.
- {timeout=>25, failed_max=>2},
- ],
- },
+ ## Add some slaves. Basically this is an array of arrayrefs, where each
+ ## arrayref is database connect information
+
+ $schema->storage->connect_replicants(
+ [$dsn1, $user, $pass, \%opts],
+ [$dsn2, $user, $pass, \%opts],
+ [$dsn3, $user, $pass, \%opts],
);
- ## Now, just use the schema as normal
- $schema->resultset('Table')->find(< unique >); ## Reads will use slaves
- $schema->resultset('Table')->create(\%info); ## Writes will use master
-
=head1 DESCRIPTION
Warning: This class is marked ALPHA. We are using this in development and have
(INSERT, UPDATE, DELETE and even LAST_INSERT_ID) are routed to master
database, all read-type queries (SELECTs) go to the slave database.
-For every slave database you can define a priority value, which controls data
-source usage pattern. It uses L<DBD::Multi>, so first the lower priority data
-sources used (if they have the same priority, the are used randomized), than
-if all low priority data sources fail, higher ones tried in order.
+Basically, any method request that L<DBIx::Class::Storage::DBI> would normally
+handle gets delegated to one of the two attributes: L</read_handler> or to
+L</write_handler>. Additionally, some methods need to be distributed
+to all existing storages. This way our storage class is a drop in replacement
+for L<DBIx::Class::Storage::DBI>.
+
+Read traffic is spread across the replicants (slaves) occuring to a user
+selected algorithm. The default algorithm is random weighted.
+
+=head1 NOTES
+
+The consistancy betweeen master and replicants is database specific. The Pool
+gives you a method to validate it's replicants, removing and replacing them
+when they fail/pass predefined criteria. It is recommened that your application
+define two schemas, one using the replicated storage and another that just
+connects to the master.
-=head1 CONFIGURATION
+=head1 ATTRIBUTES
-Please see L<DBD::Multi> for most configuration information.
+This class defines the following attributes.
+
+=head2 pool_type
+
+Contains the classname which will instantiate the L</pool> object. Defaults
+to: L<DBIx::Class::Storage::DBI::Replicated::Pool>.
=cut
-sub new {
- my $proto = shift;
- my $class = ref( $proto ) || $proto;
- my $self = {};
+has 'pool_type' => (
+ is=>'ro',
+ isa=>'ClassName',
+ lazy_build=>1,
+ handles=>{
+ 'create_pool' => 'new',
+ },
+);
- bless( $self, $class );
+=head2 pool_args
- $self->write_source( DBIx::Class::Storage::DBI->new );
- $self->read_source( DBIx::Class::Storage::DBI->new );
+Contains a hashref of initialized information to pass to the Balancer object.
+See L<DBIx::Class::Storage::Replicated::Pool> for available arguments.
- return $self;
-}
+=cut
-sub all_sources {
- my $self = shift;
+has 'pool_args' => (
+ is=>'ro',
+ isa=>'HashRef',
+ lazy=>1,
+ required=>1,
+ default=>sub { {} },
+);
- my @sources = ($self->read_source, $self->write_source);
- return wantarray ? @sources : \@sources;
-}
+=head2 balancer_type
-sub _connect_info {
- my $self = shift;
- my $master = $self->write_source->_connect_info;
- $master->[-1]->{slave_connect_info} = $self->read_source->_connect_info;
- return $master;
-}
+The replication pool requires a balance class to provider the methods for
+choose how to spread the query load across each replicant in the pool.
+
+=cut
-sub connect_info {
- my ($self, $source_info) = @_;
+has 'balancer_type' => (
+ is=>'ro',
+ isa=>'ClassName',
+ lazy_build=>1,
+ handles=>{
+ 'create_balancer' => 'new',
+ },
+);
- ## if there is no $source_info, treat this sub like an accessor
- return $self->_connect_info
- if !$source_info;
-
- ## Alright, let's conect the master
- $self->write_source->connect_info($source_info);
-
- ## Now, build and then connect the Slaves
- my @slaves_connect_info = @{$source_info->[-1]->{slaves_connect_info}};
- my $dbd_multi_config = ref $slaves_connect_info[-1] eq 'HASH'
- ? pop @slaves_connect_info : {};
-
- ## We need to do this since SQL::Abstract::Limit can't guess what DBD::Multi is
- $dbd_multi_config->{limit_dialect} = $self->write_source->sql_maker->limit_dialect
- unless defined $dbd_multi_config->{limit_dialect};
-
- @slaves_connect_info = map {
- ## if the first element in the arrayhash is a ref, make that the value
- my $db = ref $_->[0] ? $_->[0] : $_;
- my $priority = $_->[-1]->{priority} || 10; ## default priority is 10
- $priority => $db;
- } @slaves_connect_info;
+=head2 balancer_args
+
+Contains a hashref of initialized information to pass to the Balancer object.
+See L<DBIx::Class::Storage::Replicated::Balancer> for available arguments.
+
+=cut
+
+has 'balancer_args' => (
+ is=>'ro',
+ isa=>'HashRef',
+ lazy=>1,
+ required=>1,
+ default=>sub { {} },
+);
+
+=head2 pool
+
+Is a <DBIx::Class::Storage::DBI::Replicated::Pool> or derived class. This is a
+container class for one or more replicated databases.
+
+=cut
+
+has 'pool' => (
+ is=>'ro',
+ isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
+ lazy_build=>1,
+ handles=>[qw/
+ connect_replicants
+ replicants
+ has_replicants
+ /],
+);
+
+=head2 balancer
+
+Is a <DBIx::Class::Storage::DBI::Replicated::Balancer> or derived class. This
+is a class that takes a pool (<DBIx::Class::Storage::DBI::Replicated::Pool>)
+
+=cut
+
+has 'balancer' => (
+ is=>'ro',
+ isa=>'DBIx::Class::Storage::DBI::Replicated::Balancer',
+ lazy_build=>1,
+ handles=>[qw/auto_validate_every/],
+);
+
+=head2 master
+
+The master defines the canonical state for a pool of connected databases. All
+the replicants are expected to match this databases state. Thus, in a classic
+Master / Slaves distributed system, all the slaves are expected to replicate
+the Master's state as quick as possible. This is the only database in the
+pool of databases that is allowed to handle write traffic.
+
+=cut
+
+has 'master' => (
+ is=> 'ro',
+ isa=>'DBIx::Class::Storage::DBI',
+ lazy_build=>1,
+);
+
+=head1 ATTRIBUTES IMPLEMENTING THE DBIx::Storage::DBI INTERFACE
+
+The following methods are delegated all the methods required for the
+L<DBIx::Class::Storage::DBI> interface.
+
+=head2 read_handler
+
+Defines an object that implements the read side of L<BIx::Class::Storage::DBI>.
+
+=cut
+
+has 'read_handler' => (
+ is=>'rw',
+ isa=>'Object',
+ lazy_build=>1,
+ handles=>[qw/
+ select
+ select_single
+ columns_info_for
+ /],
+);
+
+=head2 write_handler
+
+Defines an object that implements the write side of L<BIx::Class::Storage::DBI>.
+
+=cut
+
+has 'write_handler' => (
+ is=>'ro',
+ isa=>'Object',
+ lazy_build=>1,
+ lazy_build=>1,
+ handles=>[qw/
+ on_connect_do
+ on_disconnect_do
+ connect_info
+ throw_exception
+ sql_maker
+ sqlt_type
+ create_ddl_dir
+ deployment_statements
+ datetime_parser
+ datetime_parser_type
+ last_insert_id
+ insert
+ insert_bulk
+ update
+ delete
+ dbh
+ txn_do
+ txn_commit
+ txn_rollback
+ sth
+ deploy
+ schema
+ reload_row
+ /],
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 new
+
+L<DBIx::Class::Schema> when instantiating it's storage passed itself as the
+first argument. We need to invoke L</new> on the underlying parent class, make
+sure we properly give it a L<Moose> meta class, and then correctly instantiate
+our attributes. Basically we pass on whatever the schema has in it's class
+data for 'storage_type_args' to our replicated storage type.
+
+=cut
+
+sub new {
+ my $class = shift @_;
+ my $schema = shift @_;
+ my $storage_type_args = shift @_;
+ my $obj = $class->SUPER::new($schema, $storage_type_args, @_);
- $self->read_source->connect_info([
- 'dbi:Multi:', undef, undef, {
- dsns => [@slaves_connect_info],
- %$dbd_multi_config,
- },
- ]);
+ ## Hate to do it this way, but can't seem to get advice on the attribute working right
+ ## maybe we can do a type and coercion for it.
+ if( $storage_type_args->{balancer_type} && $storage_type_args->{balancer_type}=~m/^::/) {
+ $storage_type_args->{balancer_type} = 'DBIx::Class::Storage::DBI::Replicated::Balancer'.$storage_type_args->{balancer_type};
+ eval "require $storage_type_args->{balancer_type}";
+ }
- ## Return the formated connection information
- return $self->_connect_info;
+ return $class->meta->new_object(
+ __INSTANCE__ => $obj,
+ %$storage_type_args,
+ @_,
+ );
}
-sub select {
- shift->read_source->select( @_ );
-}
-sub select_single {
- shift->read_source->select_single( @_ );
-}
-sub throw_exception {
- shift->read_source->throw_exception( @_ );
-}
-sub sql_maker {
- shift->read_source->sql_maker( @_ );
-}
-sub columns_info_for {
- shift->read_source->columns_info_for( @_ );
-}
-sub sqlt_type {
- shift->read_source->sqlt_type( @_ );
-}
-sub create_ddl_dir {
- shift->read_source->create_ddl_dir( @_ );
-}
-sub deployment_statements {
- shift->read_source->deployment_statements( @_ );
-}
-sub datetime_parser {
- shift->read_source->datetime_parser( @_ );
-}
-sub datetime_parser_type {
- shift->read_source->datetime_parser_type( @_ );
-}
-sub build_datetime_parser {
- shift->read_source->build_datetime_parser( @_ );
+=head2 _build_master
+
+Lazy builder for the L</master> attribute.
+
+=cut
+
+sub _build_master {
+ DBIx::Class::Storage::DBI->new;
}
-sub limit_dialect { $_->limit_dialect( @_ ) for( shift->all_sources ) }
-sub quote_char { $_->quote_char( @_ ) for( shift->all_sources ) }
-sub name_sep { $_->quote_char( @_ ) for( shift->all_sources ) }
-sub disconnect { $_->disconnect( @_ ) for( shift->all_sources ) }
-sub set_schema { $_->set_schema( @_ ) for( shift->all_sources ) }
+=head2 _build_pool_type
-sub DESTROY {
- my $self = shift;
+Lazy builder for the L</pool_type> attribute.
- undef $self->{write_source};
- undef $self->{read_sources};
+=cut
+
+sub _build_pool_type {
+ return 'DBIx::Class::Storage::DBI::Replicated::Pool';
}
-sub last_insert_id {
- shift->write_source->last_insert_id( @_ );
+=head2 _build_pool
+
+Lazy builder for the L</pool> attribute.
+
+=cut
+
+sub _build_pool {
+ my $self = shift @_;
+ $self->create_pool(%{$self->pool_args});
}
-sub insert {
- shift->write_source->insert( @_ );
+
+=head2 _build_balancer_type
+
+Lazy builder for the L</balancer_type> attribute.
+
+=cut
+
+sub _build_balancer_type {
+ return 'DBIx::Class::Storage::DBI::Replicated::Balancer::First';
}
-sub update {
- shift->write_source->update( @_ );
+
+=head2 _build_balancer
+
+Lazy builder for the L</balancer> attribute. This takes a Pool object so that
+the balancer knows which pool it's balancing.
+
+=cut
+
+sub _build_balancer {
+ my $self = shift @_;
+ $self->create_balancer(
+ pool=>$self->pool,
+ master=>$self->master,
+ %{$self->balancer_args},
+ );
}
-sub update_all {
- shift->write_source->update_all( @_ );
+
+=head2 _build_write_handler
+
+Lazy builder for the L</write_handler> attribute. The default is to set this to
+the L</master>.
+
+=cut
+
+sub _build_write_handler {
+ return shift->master;
}
-sub delete {
- shift->write_source->delete( @_ );
+
+=head2 _build_read_handler
+
+Lazy builder for the L</read_handler> attribute. The default is to set this to
+the L</balancer>.
+
+=cut
+
+sub _build_read_handler {
+ return shift->balancer;
}
-sub delete_all {
- shift->write_source->delete_all( @_ );
+
+=head2 around: connect_replicants
+
+All calls to connect_replicants needs to have an existing $schema tacked onto
+top of the args, since L<DBIx::Storage::DBI> needs it.
+
+=cut
+
+around 'connect_replicants' => sub {
+ my ($method, $self, @args) = @_;
+ $self->$method($self->schema, @args);
+};
+
+=head2 all_storages
+
+Returns an array of of all the connected storage backends. The first element
+in the returned array is the master, and the remainings are each of the
+replicants.
+
+=cut
+
+sub all_storages {
+ my $self = shift @_;
+
+ return grep {defined $_ && blessed $_} (
+ $self->master,
+ $self->replicants,
+ );
}
-sub create {
- shift->write_source->create( @_ );
+
+=head2 execute_reliably ($coderef, ?@args)
+
+Given a coderef, saves the current state of the L</read_handler>, forces it to
+use reliable storage (ie sets it to the master), executes a coderef and then
+restores the original state.
+
+Example:
+
+ my $reliably = sub {
+ my $name = shift @_;
+ $schema->resultset('User')->create({name=>$name});
+ my $user_rs = $schema->resultset('User')->find({name=>$name});
+ return $user_rs;
+ };
+
+ my $user_rs = $schema->storage->execute_reliably($reliably, 'John');
+
+Use this when you must be certain of your database state, such as when you just
+inserted something and need to get a resultset including it, etc.
+
+=cut
+
+sub execute_reliably {
+ my ($self, $coderef, @args) = @_;
+
+ unless( ref $coderef eq 'CODE') {
+ $self->throw_exception('Second argument must be a coderef');
+ }
+
+ ##Get copy of master storage
+ my $master = $self->master;
+
+ ##Get whatever the current read hander is
+ my $current = $self->read_handler;
+
+ ##Set the read handler to master
+ $self->read_handler($master);
+
+ ## do whatever the caller needs
+ my @result;
+ my $want_array = wantarray;
+
+ eval {
+ if($want_array) {
+ @result = $coderef->(@args);
+ }
+ elsif(defined $want_array) {
+ ($result[0]) = ($coderef->(@args));
+ } else {
+ $coderef->(@args);
+ }
+ };
+
+ ##Reset to the original state
+ $self->read_handler($current);
+
+ ##Exception testing has to come last, otherwise you might leave the
+ ##read_handler set to master.
+
+ if($@) {
+ $self->throw_exception("coderef returned an error: $@");
+ } else {
+ return $want_array ? @result : $result[0];
+ }
}
-sub find_or_create {
- shift->write_source->find_or_create( @_ );
+
+=head2 set_reliable_storage
+
+Sets the current $schema to be 'reliable', that is all queries, both read and
+write are sent to the master
+
+=cut
+
+sub set_reliable_storage {
+ my $self = shift @_;
+ my $schema = $self->schema;
+ my $write_handler = $self->schema->storage->write_handler;
+
+ $schema->storage->read_handler($write_handler);
}
-sub update_or_create {
- shift->write_source->update_or_create( @_ );
+
+=head2 set_balanced_storage
+
+Sets the current $schema to be use the </balancer> for all reads, while all
+writea are sent to the master only
+
+=cut
+
+sub set_balanced_storage {
+ my $self = shift @_;
+ my $schema = $self->schema;
+ my $write_handler = $self->schema->storage->balancer;
+
+ $schema->storage->read_handler($write_handler);
}
+
+=head2 around: txn_do ($coderef)
+
+Overload to the txn_do method, which is delegated to whatever the
+L<write_handler> is set to. We overload this in order to wrap in inside a
+L</execute_reliably> method.
+
+=cut
+
+around 'txn_do' => sub {
+ my($txn_do, $self, $coderef, @args) = @_;
+ $self->execute_reliably(sub {$self->$txn_do($coderef, @args)});
+};
+
+=head2 reload_row ($row)
+
+Overload to the reload_row method so that the reloading is always directed to
+the master storage.
+
+=cut
+
+around 'reload_row' => sub {
+ my ($reload_row, $self, $row) = @_;
+ return $self->execute_reliably(sub {
+ return $self->$reload_row(shift);
+ }, $row);
+};
+
+=head2 connected
+
+Check that the master and at least one of the replicants is connected.
+
+=cut
+
sub connected {
- shift->write_source->connected( @_ );
+ my $self = shift @_;
+
+ return
+ $self->master->connected &&
+ $self->pool->connected_replicants;
}
+
+=head2 ensure_connected
+
+Make sure all the storages are connected.
+
+=cut
+
sub ensure_connected {
- shift->write_source->ensure_connected( @_ );
-}
-sub dbh {
- shift->write_source->dbh( @_ );
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->ensure_connected(@_);
+ }
}
-sub txn_do {
- shift->write_source->txn_do( @_ );
-}
-sub txn_commit {
- shift->write_source->txn_commit( @_ );
+
+=head2 limit_dialect
+
+Set the limit_dialect for all existing storages
+
+=cut
+
+sub limit_dialect {
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->limit_dialect(@_);
+ }
}
-sub txn_rollback {
- shift->write_source->txn_rollback( @_ );
+
+=head2 quote_char
+
+Set the quote_char for all existing storages
+
+=cut
+
+sub quote_char {
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->quote_char(@_);
+ }
}
-sub sth {
- shift->write_source->sth( @_ );
+
+=head2 name_sep
+
+Set the name_sep for all existing storages
+
+=cut
+
+sub name_sep {
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->name_sep(@_);
+ }
}
-sub deploy {
- shift->write_source->deploy( @_ );
+
+=head2 set_schema
+
+Set the schema object for all existing storages
+
+=cut
+
+sub set_schema {
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->set_schema(@_);
+ }
}
-sub _prep_for_execute {
- shift->write_source->_prep_for_execute(@_);
+
+=head2 debug
+
+set a debug flag across all storages
+
+=cut
+
+sub debug {
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->debug(@_);
+ }
}
+=head2 debugobj
+
+set a debug object across all storages
+
+=cut
+
sub debugobj {
- shift->write_source->debugobj(@_);
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->debugobj(@_);
+ }
}
-sub debug {
- shift->write_source->debug(@_);
+
+=head2 debugfh
+
+set a debugfh object across all storages
+
+=cut
+
+sub debugfh {
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->debugfh(@_);
+ }
}
-sub debugfh { shift->_not_supported( 'debugfh' ) };
-sub debugcb { shift->_not_supported( 'debugcb' ) };
+=head2 debugcb
-sub _not_supported {
- my( $self, $method ) = @_;
+set a debug callback across all storages
- die "This Storage does not support $method method.";
+=cut
+
+sub debugcb {
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->debugcb(@_);
+ }
}
-=head1 SEE ALSO
+=head2 disconnect
+
+disconnect everything
-L<DBI::Class::Storage::DBI>, L<DBD::Multi>, L<DBI>
+=cut
+
+sub disconnect {
+ my $self = shift @_;
+ foreach my $source ($self->all_storages) {
+ $source->disconnect(@_);
+ }
+}
=head1 AUTHOR
-Norbert Csongrádi <bert@cpan.org>
+ John Napiorkowski <john.napiorkowski@takkle.com>
-Peter Siklósi <einon@einon.hu>
+Based on code originated by:
-John Napiorkowski <john.napiorkowski@takkle.com>
+ Norbert Csongrádi <bert@cpan.org>
+ Peter Siklósi <einon@einon.hu>
=head1 LICENSE
--- /dev/null
+package DBIx::Class::Storage::DBI::Replicated::Balancer;
+
+use Moose::Role;
+requires 'next_storage';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Balancer; A Software Load Balancer
+
+=head1 SYNOPSIS
+
+This role is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
+
+=head1 DESCRIPTION
+
+Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
+database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
+method by which query load can be spread out across each replicant in the pool.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 auto_validate_every ($seconds)
+
+If auto_validate has some sort of value, run the L<validate_replicants> every
+$seconds. Be careful with this, because if you set it to 0 you will end up
+validating every query.
+
+=cut
+
+has 'auto_validate_every' => (
+ is=>'rw',
+ isa=>'Int',
+ predicate=>'has_auto_validate_every',
+);
+
+=head2 master
+
+The L<DBIx::Class::Storage::DBI> object that is the master database all the
+replicants are trying to follow. The balancer needs to know it since it's the
+ultimate fallback.
+
+=cut
+
+has 'master' => (
+ is=>'ro',
+ isa=>'DBIx::Class::Storage::DBI',
+ required=>1,
+);
+
+=head2 pool
+
+The L<DBIx::Class::Storage::DBI::Replicated::Pool> object that we are trying to
+balance.
+
+=cut
+
+has 'pool' => (
+ is=>'ro',
+ isa=>'DBIx::Class::Storage::DBI::Replicated::Pool',
+ required=>1,
+);
+
+=head2 current_replicant
+
+Replicant storages (slaves) handle all read only traffic. The assumption is
+that your database will become readbound well before it becomes write bound
+and that being able to spread your read only traffic around to multiple
+databases is going to help you to scale traffic.
+
+This attribute returns the next slave to handle a read request. Your L</pool>
+attribute has methods to help you shuffle through all the available replicants
+via it's balancer object.
+
+=cut
+
+has 'current_replicant' => (
+ is=> 'rw',
+ isa=>'DBIx::Class::Storage::DBI',
+ lazy_build=>1,
+ handles=>[qw/
+ select
+ select_single
+ columns_info_for
+ /],
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 _build_current_replicant
+
+Lazy builder for the L</current_replicant_storage> attribute.
+
+=cut
+
+sub _build_current_replicant {
+ my $self = shift @_;
+ $self->next_storage;
+}
+
+=head2 next_storage
+
+This method should be defined in the class which consumes this role.
+
+Given a pool object, return the next replicant that will serve queries. The
+default behavior is to grap the first replicant it finds but you can write
+your own subclasses of L<DBIx::Class::Storage::DBI::Replicated::Balancer> to
+support other balance systems.
+
+This returns from the pool of active replicants. If there are no active
+replicants, then you should have it return the master as an ultimate fallback.
+
+=head2 around: next_storage
+
+Advice on next storage to add the autovalidation. We have this broken out so
+that it's easier to break out the auto validation into a role.
+
+This also returns the master in the case that none of the replicants are active
+or just just forgot to create them :)
+
+=cut
+
+around 'next_storage' => sub {
+ my ($next_storage, $self, @args) = @_;
+ my $now = time;
+
+ ## Do we need to validate the replicants?
+ if(
+ $self->has_auto_validate_every &&
+ ($self->auto_validate_every + $self->pool->last_validated) <= $now
+ ) {
+ $self->pool->validate_replicants;
+ }
+
+ ## Get a replicant, or the master if none
+ my $next = $self->$next_storage(@args);
+ return $next ? $next:$self->master;
+};
+
+=head2 before: select
+
+Advice on the select attribute. Each time we use a replicant
+we need to change it via the storage pool algorithm. That way we are spreading
+the load evenly (hopefully) across existing capacity.
+
+=cut
+
+before 'select' => sub {
+ my $self = shift @_;
+ my $next_replicant = $self->next_storage;
+ $self->current_replicant($next_replicant);
+};
+
+=head2 before: select_single
+
+Advice on the select_single attribute. Each time we use a replicant
+we need to change it via the storage pool algorithm. That way we are spreading
+the load evenly (hopefully) across existing capacity.
+
+=cut
+
+before 'select_single' => sub {
+ my $self = shift @_;
+ my $next_replicant = $self->next_storage;
+ $self->current_replicant($next_replicant);
+};
+
+=head2 before: columns_info_for
+
+Advice on the current_replicant_storage attribute. Each time we use a replicant
+we need to change it via the storage pool algorithm. That way we are spreading
+the load evenly (hopefully) across existing capacity.
+
+=cut
+
+before 'columns_info_for' => sub {
+ my $self = shift @_;
+ my $next_replicant = $self->next_storage;
+ $self->current_replicant($next_replicant);
+};
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski@takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
--- /dev/null
+package DBIx::Class::Storage::DBI::Replicated::Balancer::First;
+
+use List::Util qw(shuffle);
+use Moose;
+with 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Balancer::First; Just get the First Balancer
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>. You
+shouldn't need to create instances of this class.
+
+=head1 DESCRIPTION
+
+Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
+database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
+method by which query load can be spread out across each replicant in the pool.
+
+This Balancer just get's whatever is the first replicant in the pool
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 next_storage
+
+Just get the first storage. Probably only good when you have one replicant.
+
+=cut
+
+sub next_storage {
+ return (shift->pool->active_replicants)[0];
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski@takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
\ No newline at end of file
--- /dev/null
+package DBIx::Class::Storage::DBI::Replicated::Balancer::Random;
+
+use List::Util qw(shuffle);
+use Moose;
+with 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Balancer::Random; A 'random' Balancer
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>. You
+shouldn't need to create instances of this class.
+
+=head1 DESCRIPTION
+
+Given a pool (L<DBIx::Class::Storage::DBI::Replicated::Pool>) of replicated
+database's (L<DBIx::Class::Storage::DBI::Replicated::Replicant>), defines a
+method by which query load can be spread out across each replicant in the pool.
+
+This Balancer uses L<List::Util> keyword 'shuffle' to randomly pick an active
+replicant from the associated pool. This may or may not be random enough for
+you, patches welcome.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 next_storage
+
+Returns an active replicant at random. Please note that due to the nature of
+the word 'random' this means it's possible for a particular active replicant to
+be requested several times in a row.
+
+=cut
+
+sub next_storage {
+ return (shuffle(shift->pool->active_replicants))[0];
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski@takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
\ No newline at end of file
--- /dev/null
+package DBIx::Class::Storage::DBI::Replicated::Pool;
+
+use Moose;
+use MooseX::AttributeHelpers;
+use DBIx::Class::Storage::DBI::Replicated::Replicant;
+use List::Util qw(sum);
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Pool; Manage a pool of replicants
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>. You
+shouldn't need to create instances of this class.
+
+=head1 DESCRIPTION
+
+In a replicated storage type, there is at least one replicant to handle the
+read only traffic. The Pool class manages this replicant, or list of
+replicants, and gives some methods for querying information about their status.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 maximum_lag ($num)
+
+This is a number which defines the maximum allowed lag returned by the
+L<DBIx::Class::Storage::DBI/lag_behind_master> method. The default is 0. In
+general, this should return a larger number when the replicant is lagging
+behind it's master, however the implementation of this is database specific, so
+don't count on this number having a fixed meaning. For example, MySQL will
+return a number of seconds that the replicating database is lagging.
+
+=cut
+
+has 'maximum_lag' => (
+ is=>'rw',
+ isa=>'Num',
+ required=>1,
+ lazy=>1,
+ default=>0,
+);
+
+=head2 last_validated
+
+This is an integer representing a time since the last time the replicants were
+validated. It's nothing fancy, just an integer provided via the perl time
+builtin.
+
+=cut
+
+has 'last_validated' => (
+ is=>'rw',
+ isa=>'Int',
+ reader=>'last_validated',
+ writer=>'_last_validated',
+ lazy=>1,
+ default=>sub {
+ 0;
+ },
+);
+
+=head2 replicant_type ($classname)
+
+Base class used to instantiate replicants that are in the pool. Unless you
+need to subclass L<DBIx::Class::Storage::DBI::Replicated::Replicant> you should
+just leave this alone.
+
+=cut
+
+has 'replicant_type' => (
+ is=>'ro',
+ isa=>'ClassName',
+ required=>1,
+ default=>'DBIx::Class::Storage::DBI',
+ handles=>{
+ 'create_replicant' => 'new',
+ },
+);
+
+=head2 replicants
+
+A hashref of replicant, with the key being the dsn and the value returning the
+actual replicant storage. For example if the $dsn element is something like:
+
+ "dbi:SQLite:dbname=dbfile"
+
+You could access the specific replicant via:
+
+ $schema->storage->replicants->{'dbname=dbfile'}
+
+This attributes also supports the following helper methods
+
+=over 4
+
+=item set_replicant($key=>$storage)
+
+Pushes a replicant onto the HashRef under $key
+
+=item get_replicant($key)
+
+Retrieves the named replicant
+
+=item has_replicants
+
+Returns true if the Pool defines replicants.
+
+=item num_replicants
+
+The number of replicants in the pool
+
+=item delete_replicant ($key)
+
+removes the replicant under $key from the pool
+
+=back
+
+=cut
+
+has 'replicants' => (
+ is=>'rw',
+ metaclass => 'Collection::Hash',
+ isa=>'HashRef[DBIx::Class::Storage::DBI]',
+ default=>sub {{}},
+ provides => {
+ 'set' => 'set_replicant',
+ 'get' => 'get_replicant',
+ 'empty' => 'has_replicants',
+ 'count' => 'num_replicants',
+ 'delete' => 'delete_replicant',
+ },
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 connect_replicants ($schema, Array[$connect_info])
+
+Given an array of $dsn suitable for connected to a database, create an
+L<DBIx::Class::Storage::DBI::Replicated::Replicant> object and store it in the
+L</replicants> attribute.
+
+=cut
+
+sub connect_replicants {
+ my $self = shift @_;
+ my $schema = shift @_;
+
+ my @newly_created = ();
+ foreach my $connect_info (@_) {
+
+ my $replicant = $self->create_replicant($schema);
+ $replicant->connect_info($connect_info);
+ $replicant->ensure_connected;
+ DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant);
+
+ my ($key) = ($connect_info->[0]=~m/^dbi\:.+\:(.+)$/);
+ $self->set_replicant( $key => $replicant);
+ push @newly_created, $replicant;
+ }
+
+ return @newly_created;
+}
+
+=head2 connected_replicants
+
+Returns true if there are connected replicants. Actually is overloaded to
+return the number of replicants. So you can do stuff like:
+
+ if( my $num_connected = $storage->has_connected_replicants ) {
+ print "I have $num_connected connected replicants";
+ } else {
+ print "Sorry, no replicants.";
+ }
+
+This method will actually test that each replicant in the L</replicants> hashref
+is actually connected, try not to hit this 10 times a second.
+
+=cut
+
+sub connected_replicants {
+ my $self = shift @_;
+ return sum( map {
+ $_->connected ? 1:0
+ } $self->all_replicants );
+}
+
+=head2 active_replicants
+
+This is an array of replicants that are considered to be active in the pool.
+This does not check to see if they are connected, but if they are not, DBIC
+should automatically reconnect them for us when we hit them with a query.
+
+=cut
+
+sub active_replicants {
+ my $self = shift @_;
+ return ( grep {$_} map {
+ $_->active ? $_:0
+ } $self->all_replicants );
+}
+
+=head2 all_replicants
+
+Just a simple array of all the replicant storages. No particular order to the
+array is given, nor should any meaning be derived.
+
+=cut
+
+sub all_replicants {
+ my $self = shift @_;
+ return values %{$self->replicants};
+}
+
+=head2 validate_replicants
+
+This does a check to see if 1) each replicate is connected (or reconnectable),
+2) that is ->is_replicating, and 3) that it is not exceeding the lag amount
+defined by L</maximum_lag>. Replicants that fail any of these tests are set to
+inactive, and thus removed from the replication pool.
+
+This tests L<all_replicants>, since a replicant that has been previous marked
+as inactive can be reactived should it start to pass the validation tests again.
+
+See L<DBIx::Class::Storage::DBI> for more about checking if a replicating
+connection is not following a master or is lagging.
+
+Calling this method will generate queries on the replicant databases so it is
+not recommended that you run them very often.
+
+=cut
+
+sub validate_replicants {
+ my $self = shift @_;
+ foreach my $replicant($self->all_replicants) {
+ if(
+ $replicant->is_replicating &&
+ $replicant->lag_behind_master <= $self->maximum_lag &&
+ $replicant->ensure_connected
+ ) {
+ ## TODO:: Hook debug for this
+ $replicant->active(1)
+ } else {
+ ## TODO:: Hook debug for this
+ $replicant->active(0);
+ }
+ }
+
+ ## Mark that we completed this validation.
+ $self->_last_validated(time);
+}
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski@takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
--- /dev/null
+package DBIx::Class::Storage::DBI::Replicated::Replicant;
+
+use Moose::Role;
+
+=head1 NAME
+
+DBIx::Class::Storage::DBI::Replicated::Replicant; A replicated DBI Storage Role
+
+=head1 SYNOPSIS
+
+This class is used internally by L<DBIx::Class::Storage::DBI::Replicated>.
+
+=head1 DESCRIPTION
+
+Replicants are DBI Storages that follow a master DBI Storage. Typically this
+is accomplished via an external replication system. Please see the documents
+for L<DBIx::Class::Storage::DBI::Replicated> for more details.
+
+This class exists to define methods of a DBI Storage that only make sense when
+it's a classic 'slave' in a pool of slave databases which replicate from a
+given master database.
+
+=head1 ATTRIBUTES
+
+This class defines the following attributes.
+
+=head2 active
+
+This is a boolean which allows you to programmatically activate or deactivate a
+replicant from the pool. This way to you do stuff like disallow a replicant
+when it get's too far behind the master, if it stops replicating, etc.
+
+This attribute DOES NOT reflect a replicant's internal status, i.e. if it is
+properly replicating from a master and has not fallen too many seconds behind a
+reliability threshold. For that, use L</is_replicating> and L</lag_behind_master>.
+Since the implementation of those functions database specific (and not all DBIC
+supported DB's support replication) you should refer your database specific
+storage driver for more information.
+
+=cut
+
+has 'active' => (
+ is=>'rw',
+ isa=>'Bool',
+ lazy=>1,
+ required=>1,
+ default=>1,
+);
+
+=head1 METHODS
+
+This class defines the following methods.
+
+=head2 after: _query_start
+
+advice iof the _query_start method to add more debuggin
+
+=cut
+
+around '_query_start' => sub {
+ my ($method, $self, $sql, @bind) = @_;
+ my $dsn = $self->connect_info->[0];
+ $self->$method("DSN: $dsn SQL: $sql", @bind);
+};
+
+=head1 AUTHOR
+
+John Napiorkowski <john.napiorkowski@takkle.com>
+
+=head1 LICENSE
+
+You may distribute this code under the same terms as Perl itself.
+
+=cut
+
+1;
\ No newline at end of file
$self->dbh->do("ROLLBACK TO SAVEPOINT $name")
}
+
+sub is_replicating {
+ my $status = shift->dbh->selectrow_hashref('show slave status');
+ return ($status->{Slave_IO_Running} eq 'Yes') && ($status->{Slave_SQL_Running} eq 'Yes');
+}
+
+sub lag_behind_master {
+ return shift->dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master};
+}
1;
my $othertable = $source->related_source($rel);
my $rel_table = $othertable->name;
+ # Force the order of @cond to match the order of ->add_columns
+ my $idx;
+ my %other_columns_idx = map {'foreign.'.$_ => ++$idx } $othertable->columns;
+ my @cond = sort { $other_columns_idx{$a} cmp $other_columns_idx{$b} } keys(%{$rel_info->{cond}});
+
# Get the key information, mapping off the foreign/self markers
- my @cond = keys(%{$rel_info->{cond}});
my @refkeys = map {/^\w+\.(\w+)$/} @cond;
my @keys = map {$rel_info->{cond}->{$_} =~ /^\w+\.(\w+)$/} @cond;
'DBIx::Class::Schema::Versioned' => { ignore => [ qw(connection) ] },
-# must kill authors.
-
- 'DBIx::Class::Storage::DBI::Replicated' => { skip => 1 },
+# don't bother since it's heavily deprecated
+ 'DBIx::Class::ResultSetManager' => { skip => 1 },
};
foreach my $module (@modules) {
# clean up our mess
END {
- $dbh->do("DROP TABLE artist") if $dbh;
-}
+ #$dbh->do("DROP TABLE artist") if $dbh;
+}
\ No newline at end of file
$schema->storage->debug(1);
my $tree_like =
- $schema->resultset('TreeLike')->find(4,
+ $schema->resultset('TreeLike')->find(5,
{ join => { parent => { parent => 'parent' } },
prefetch => { parent => { parent => 'parent' } } });
cmp_ok($queries, '==', 1, 'Only one query run');
-$tree_like = $schema->resultset('TreeLike')->search({'me.id' => 1});
+$tree_like = $schema->resultset('TreeLike')->search({'me.id' => 2});
$tree_like = $tree_like->search_related('children')->search_related('children')->search_related('children')->first;
is($tree_like->name, 'quux', 'Tree search_related ok');
$tree_like = $schema->resultset('TreeLike')->search_related('children',
- { 'children.id' => 2, 'children_2.id' => 3 },
+ { 'children.id' => 3, 'children_2.id' => 4 },
{ prefetch => { children => 'children' } }
)->first;
is(eval { $tree_like->children->first->children->first->name }, 'quux',
'Tree search_related with prefetch ok');
$tree_like = eval { $schema->resultset('TreeLike')->search(
- { 'children.id' => 2, 'children_2.id' => 5 },
+ { 'children.id' => 3, 'children_2.id' => 6 },
{ join => [qw/children children/] }
- )->search_related('children', { 'children_4.id' => 6 }, { prefetch => 'children' }
+ )->search_related('children', { 'children_4.id' => 7 }, { prefetch => 'children' }
)->first->children->first; };
is(eval { $tree_like->name }, 'fong', 'Tree with multiple has_many joins ok');
fourkeys_to_twokeys => [
{
'display' => 'fourkeys_to_twokeys->twokeys',
- 'name' => 'fourkeys_to_twokeys_fk_t_cd_t_artist', 'index_name' => 'fourkeys_to_twokeys_idx_t_cd_t_artist',
+ 'name' => 'fourkeys_to_twokeys_fk_t_artist_t_cd', 'index_name' => 'fourkeys_to_twokeys_idx_t_artist_t_cd',
'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'twokeys',
'selfcols' => ['t_artist', 't_cd'], 'foreigncols' => ['artist', 'cd'],
on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1,
},
{
- 'display' => 'fourkeys_to_twokeys->fourkeys', 'index_name' => 'fourkeys_to_twokeys_idx_f_foo_f_goodbye_f_hello_f_bar',
- 'name' => 'fourkeys_to_twokeys_fk_f_foo_f_goodbye_f_hello_f_bar',
+ 'display' => 'fourkeys_to_twokeys->fourkeys', 'index_name' => 'fourkeys_to_twokeys_idx_f_foo_f_bar_f_hello_f_goodbye',
+ 'name' => 'fourkeys_to_twokeys_fk_f_foo_f_bar_f_hello_f_goodbye',
'selftable' => 'fourkeys_to_twokeys', 'foreigntable' => 'fourkeys',
'selfcols' => [qw(f_foo f_bar f_hello f_goodbye)],
'foreigncols' => [qw(foo bar hello goodbye)],
'display' => 'forceforeign->artist',
'name' => 'forceforeign_fk_artist', 'index_name' => 'forceforeign_idx_artist',
'selftable' => 'forceforeign', 'foreigntable' => 'artist',
- 'selfcols' => ['artist'], 'foreigncols' => ['artist_id'],
+ 'selfcols' => ['artist'], 'foreigncols' => ['artistid'],
on_delete => '', on_update => '', deferrable => 1,
},
],
use warnings;
use lib qw(t/lib);
use Test::More;
+use Test::Exception;
+use DBICTest;
BEGIN {
- eval "use DBD::Multi";
+ eval "use Moose; use Test::Moose";
plan $@
- ? ( skip_all => 'needs DBD::Multi for testing' )
- : ( tests => 20 );
-}
+ ? ( skip_all => 'needs Moose for testing' )
+ : ( tests => 71 );
+}
+
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+use_ok 'DBIx::Class::Storage::DBI::Replicated';
+
+=head1 HOW TO USE
+
+ This is a test of the replicated storage system. This will work in one of
+ two ways, either it was try to fake replication with a couple of SQLite DBs
+ and creative use of copy, or if you define a couple of %ENV vars correctly
+ will try to test those. If you do that, it will assume the setup is properly
+ replicating. Your results may vary, but I have demonstrated this to work with
+ mysql native replication.
+
+=cut
+
## ----------------------------------------------------------------------------
## Build a class to hold all our required testing data and methods.
## ----------------------------------------------------------------------------
-TESTSCHEMACLASS: {
-
- package DBIx::Class::DBI::Replicated::TestReplication;
-
- use DBI;
- use DBICTest;
- use File::Copy;
-
- ## Create a constructor
-
+TESTSCHEMACLASSES: {
+
+ ## --------------------------------------------------------------------- ##
+ ## Create an object to contain your replicated stuff.
+ ## --------------------------------------------------------------------- ##
+
+ package DBIx::Class::DBI::Replicated::TestReplication;
+
+ use DBICTest;
+ use base qw/Class::Accessor::Fast/;
+
+ __PACKAGE__->mk_accessors( qw/schema/ );
+
+ ## Initialize the object
+
sub new {
- my $class = shift @_;
- my %params = @_;
-
- my $self = bless {
- db_paths => $params{db_paths},
- dsns => $class->init_dsns(%params),
- schema=>$class->init_schema,
- }, $class;
-
- $self->connect;
- return $self;
- }
-
- ## get the DSNs. We build this up from the list of file paths
-
- sub init_dsns {
- my $class = shift @_;
- my %params = @_;
- my $db_paths = $params{db_paths};
-
- my @dsn = map {
- "dbi:SQLite:${_}";
- } @$db_paths;
-
- return \@dsn;
- }
-
- ## get the Schema and set the replication storage type
-
- sub init_schema {
- my $class = shift @_;
- my $schema = DBICTest->init_schema();
- $schema->storage_type( '::DBI::Replicated' );
-
- return $schema;
- }
-
- ## connect the Schema
-
- sub connect {
- my $self = shift @_;
- my ($master, @slaves) = @{$self->{dsns}};
- my $master_connect_info = [$master, '','', {AutoCommit=>1, PrintError=>0}];
-
- my @slavesob;
- foreach my $slave (@slaves)
- {
- my $dbh = shift @{$self->{slaves}}
- || DBI->connect($slave,"","",{PrintError=>0, PrintWarn=>0});
-
- push @{$master_connect_info->[-1]->{slaves_connect_info}},
- [$dbh, '','',{priority=>10}];
-
- push @slavesob,
- $dbh;
- }
-
- ## Keep track of the created slave databases
- $self->{slaves} = \@slavesob;
-
- $self
- ->{schema}
- ->connect(@$master_connect_info);
- }
-
- ## replication
+ my $class = shift @_;
+ my $self = $class->SUPER::new(@_);
- sub replicate {
- my $self = shift @_;
- my ($master, @slaves) = @{$self->{db_paths}};
-
- foreach my $slave (@slaves) {
- copy($master, $slave);
- }
- }
-
- ## Cleanup afer ourselves.
-
- sub cleanup {
- my $self = shift @_;
- my ($master, @slaves) = @{$self->{db_paths}};
-
- foreach my $slave (@slaves) {
- unlink $slave;
- }
+ $self->schema( $self->init_schema );
+ return $self;
}
+
+ ## Get the Schema and set the replication storage type
+
+ sub init_schema {
+ my $class = shift @_;
+
+ my $schema = DBICTest->init_schema(
+ storage_type=>{
+ '::DBI::Replicated' => {
+ balancer_type=>'::Random',
+ balancer_args=>{
+ auto_validate_every=>100,
+ },
+ }
+ },
+ deploy_args=>{
+ add_drop_table => 1,
+ },
+ );
+
+ return $schema;
+ }
+
+ sub generate_replicant_connect_info {}
+ sub replicate {}
+ sub cleanup {}
+
+
+ ## --------------------------------------------------------------------- ##
+ ## Subclass for when you are using SQLite for testing, this provides a fake
+ ## replication support.
+ ## --------------------------------------------------------------------- ##
+
+ package DBIx::Class::DBI::Replicated::TestReplication::SQLite;
+
+ use DBICTest;
+ use File::Copy;
+ use base 'DBIx::Class::DBI::Replicated::TestReplication';
+
+ __PACKAGE__->mk_accessors( qw/master_path slave_paths/ );
+
+ ## Set the mastep path from DBICTest
+
+ sub new {
+ my $class = shift @_;
+ my $self = $class->SUPER::new(@_);
- ## Force a reconnection
+ $self->master_path( DBICTest->_sqlite_dbfilename );
+ $self->slave_paths([
+ "t/var/DBIxClass_slave1.db",
+ "t/var/DBIxClass_slave2.db",
+ ]);
+
+ return $self;
+ }
- sub reconnect {
- my $self = shift @_;
- my $schema = $self->connect;
- $self->{schema} = $schema;
- return $schema;
- }
+ ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+ ## $storage->connect_info to be used for connecting replicants.
+
+ sub generate_replicant_connect_info {
+ my $self = shift @_;
+ my @dsn = map {
+ "dbi:SQLite:${_}";
+ } @{$self->slave_paths};
+
+ return map { [$_,'','',{AutoCommit=>1}] } @dsn;
+ }
+
+ ## Do a 'good enough' replication by copying the master dbfile over each of
+ ## the slave dbfiles. If the master is SQLite we do this, otherwise we
+ ## just do a one second pause to let the slaves catch up.
+
+ sub replicate {
+ my $self = shift @_;
+ foreach my $slave (@{$self->slave_paths}) {
+ copy($self->master_path, $slave);
+ }
+ }
+
+ ## Cleanup after ourselves. Unlink all gthe slave paths.
+
+ sub cleanup {
+ my $self = shift @_;
+ foreach my $slave (@{$self->slave_paths}) {
+ unlink $slave;
+ }
+ }
+
+ ## --------------------------------------------------------------------- ##
+ ## Subclass for when you are setting the databases via custom export vars
+ ## This is for when you have a replicating database setup that you are
+ ## going to test against. You'll need to define the correct $ENV and have
+ ## two slave databases to test against, as well as a replication system
+ ## that will replicate in less than 1 second.
+ ## --------------------------------------------------------------------- ##
+
+ package DBIx::Class::DBI::Replicated::TestReplication::Custom;
+ use base 'DBIx::Class::DBI::Replicated::TestReplication';
+
+ ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for
+ ## $storage->connect_info to be used for connecting replicants.
+
+ sub generate_replicant_connect_info {
+ return (
+ [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}],
+ [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}],
+ );
+ }
+
+ ## pause a bit to let the replication catch up
+
+ sub replicate {
+ sleep 1;
+ }
}
## ----------------------------------------------------------------------------
## Create an object and run some tests
## ----------------------------------------------------------------------------
-my %params = (
- db_paths => [
- "t/var/DBIxClass.db",
- "t/var/DBIxClass_slave1.db",
- "t/var/DBIxClass_slave2.db",
- ],
-);
-
-ok my $replicate = DBIx::Class::DBI::Replicated::TestReplication->new(%params)
- => 'Created a replication object';
-
-isa_ok $replicate->{schema}
- => 'DBIx::Class::Schema';
+## Thi first bunch of tests are basic, just make sure all the bits are behaving
+
+my $replicated_class = DBICTest->has_custom_dsn ?
+ 'DBIx::Class::DBI::Replicated::TestReplication::Custom' :
+ 'DBIx::Class::DBI::Replicated::TestReplication::SQLite';
+
+ok my $replicated = $replicated_class->new
+ => 'Created a replication object';
+
+isa_ok $replicated->schema
+ => 'DBIx::Class::Schema';
+
+isa_ok $replicated->schema->storage
+ => 'DBIx::Class::Storage::DBI::Replicated';
+
+ok $replicated->schema->storage->meta
+ => 'has a meta object';
+
+isa_ok $replicated->schema->storage->master
+ => 'DBIx::Class::Storage::DBI';
+
+isa_ok $replicated->schema->storage->pool
+ => 'DBIx::Class::Storage::DBI::Replicated::Pool';
+
+does_ok $replicated->schema->storage->balancer
+ => 'DBIx::Class::Storage::DBI::Replicated::Balancer';
+
+ok my @replicant_connects = $replicated->generate_replicant_connect_info
+ => 'got replication connect information';
+
+ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects)
+ => 'Created some storages suitable for replicants';
+
+isa_ok $replicated->schema->storage->balancer->current_replicant
+ => 'DBIx::Class::Storage::DBI';
+
+ok $replicated->schema->storage->pool->has_replicants
+ => 'does have replicants';
+
+is $replicated->schema->storage->pool->num_replicants => 2
+ => 'has two replicants';
+
+does_ok $replicated_storages[0]
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated_storages[1]
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+my @replicant_names = keys %{$replicated->schema->storage->replicants};
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[0]}
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
+
+does_ok $replicated->schema->storage->replicants->{$replicant_names[1]}
+ => 'DBIx::Class::Storage::DBI::Replicated::Replicant';
## Add some info to the database
-$replicate
- ->{schema}
- ->populate('Artist', [
- [ qw/artistid name/ ],
- [ 4, "Ozric Tentacles"],
- ]);
-
+$replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ 4, "Ozric Tentacles"],
+ ]);
+
## Make sure all the slaves have the table definitions
-$replicate->replicate;
+$replicated->replicate;
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
## Make sure we can read the data.
-ok my $artist1 = $replicate->{schema}->resultset('Artist')->find(4)
- => 'Created Result';
+ok my $artist1 = $replicated->schema->resultset('Artist')->find(4)
+ => 'Created Result';
isa_ok $artist1
- => 'DBICTest::Artist';
-
+ => 'DBICTest::Artist';
+
is $artist1->name, 'Ozric Tentacles'
- => 'Found expected name for first result';
+ => 'Found expected name for first result';
## Add some new rows that only the master will have This is because
## we overload any type of write operation so that is must hit the master
## database.
-$replicate
- ->{schema}
- ->populate('Artist', [
- [ qw/artistid name/ ],
- [ 5, "Doom's Children"],
- [ 6, "Dead On Arrival"],
- [ 7, "Watergate"],
- ]);
-
-## Reconnect the database
-$replicate->reconnect;
-
-## Alright, the database 'cluster' is not in a consistent state. When we do
-## a read now we expect bad news
-
-is $replicate->{schema}->resultset('Artist')->find(5), undef
- => 'read after disconnect fails because it uses slave 1 which we have neglected to "replicate" yet';
+$replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ 5, "Doom's Children"],
+ [ 6, "Dead On Arrival"],
+ [ 7, "Watergate"],
+ ]);
+
+SKIP: {
+ ## We can't do this test if we have a custom replicants, since we assume
+ ## if there are custom one that you are trying to test a real replicating
+ ## system. See docs above for more.
+
+ skip 'Cannot test inconsistent replication since you have a real replication system', 1
+ if DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
+
+ ## Alright, the database 'cluster' is not in a consistent state. When we do
+ ## a read now we expect bad news
+ is $replicated->schema->resultset('Artist')->find(5), undef
+ => 'read after disconnect fails because it uses a replicant which we have neglected to "replicate" yet';
+}
## Make sure all the slaves have the table definitions
-$replicate->replicate;
+$replicated->replicate;
## Should find some data now
-ok my $artist2 = $replicate->{schema}->resultset('Artist')->find(5)
- => 'Sync succeed';
-
+ok my $artist2 = $replicated->schema->resultset('Artist')->find(5)
+ => 'Sync succeed';
+
isa_ok $artist2
- => 'DBICTest::Artist';
-
+ => 'DBICTest::Artist';
+
is $artist2->name, "Doom's Children"
- => 'Found expected name for first result';
-
-## What happens when we delete one of the slaves?
+ => 'Found expected name for first result';
-ok my $slave1 = @{$replicate->{slaves}}[0]
- => 'Got Slave1';
+## What happens when we disconnect all the replicants?
-ok $slave1->disconnect
- => 'disconnected slave1';
+is $replicated->schema->storage->pool->connected_replicants => 2
+ => "both replicants are connected";
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect;
+$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect;
-$replicate->reconnect;
+is $replicated->schema->storage->pool->connected_replicants => 0
+ => "both replicants are now disconnected";
-ok my $artist3 = $replicate->{schema}->resultset('Artist')->find(6)
- => 'Still finding stuff.';
-
+## All these should pass, since the database should automatically reconnect
+
+ok my $artist3 = $replicated->schema->resultset('Artist')->find(6)
+ => 'Still finding stuff.';
+
isa_ok $artist3
- => 'DBICTest::Artist';
-
+ => 'DBICTest::Artist';
+
is $artist3->name, "Dead On Arrival"
- => 'Found expected name for first result';
+ => 'Found expected name for first result';
+
+is $replicated->schema->storage->pool->connected_replicants => 1
+ => "One replicant reconnected to handle the job";
+
+## What happens when we try to select something that doesn't exist?
+
+ok ! $replicated->schema->resultset('Artist')->find(666)
+ => 'Correctly failed to find something.';
+
+## test the reliable option
+
+TESTRELIABLE: {
+
+ $replicated->schema->storage->set_reliable_storage;
-## Let's delete all the slaves
+ ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Read from master 1';
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'Read from master 2';
+
+ $replicated->schema->storage->set_balanced_storage;
+
+ ok $replicated->schema->resultset('Artist')->find(3)
+ => 'Read from replicant';
+}
-ok my $slave2 = @{$replicate->{slaves}}[1]
- => 'Got Slave2';
+## Make sure when reliable goes out of scope, we are using replicants again
+
+ok $replicated->schema->resultset('Artist')->find(1)
+ => 'back to replicant 1.';
+
+ok $replicated->schema->resultset('Artist')->find(2)
+ => 'back to replicant 2.';
+
+## set all the replicants to inactive, and make sure the balancer falls back to
+## the master.
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+
+ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Fallback to master';
+
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+
+ok $replicated->schema->resultset('Artist')->find(2)
+ => 'Returned to replicates';
+
+## Getting slave status tests
+
+SKIP: {
+ ## We skip this tests unless you have a custom replicants, since the default
+ ## sqlite based replication tests don't support these functions.
+
+ skip 'Cannot Test Replicant Status on Non Replicating Database', 9
+ unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
+
+ $replicated->replicate; ## Give the slaves a chance to catchup.
+
+ ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating
+ => 'Replicants are replicating';
+
+ is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0
+ => 'Replicant is zero seconds behind master';
+
+ ## Test the validate replicants
+
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 2
+ => 'Still have 2 replicants after validation';
+
+ ## Force the replicants to fail the validate test by required their lag to
+ ## be negative (ie ahead of the master!)
+
+ $replicated->schema->storage->pool->maximum_lag(-10);
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 0
+ => 'No way a replicant be be ahead of the master';
+
+ ## Let's be fair to the replicants again. Let them lag up to 5
+
+ $replicated->schema->storage->pool->maximum_lag(5);
+ $replicated->schema->storage->pool->validate_replicants;
+
+ is $replicated->schema->storage->pool->active_replicants, 2
+ => 'Both replicants in good standing again';
+
+ ## Check auto validate
+
+ is $replicated->schema->storage->balancer->auto_validate_every, 100
+ => "Got the expected value for auto validate";
+
+ ## This will make sure we auto validatge everytime
+ $replicated->schema->storage->balancer->auto_validate_every(0);
+
+ ## set all the replicants to inactive, and make sure the balancer falls back to
+ ## the master.
+
+ $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0);
+ $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0);
+
+ ## Ok, now when we go to run a query, autovalidate SHOULD reconnect
+
+ is $replicated->schema->storage->pool->active_replicants => 0
+ => "both replicants turned off";
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'replicant reactivated';
+
+ is $replicated->schema->storage->pool->active_replicants => 2
+ => "both replicants reactivated";
+}
-ok $slave2->disconnect
- => 'Disconnected slave2';
+## Test the reliably callback
-$replicate->reconnect;
+ok my $reliably = sub {
+
+ ok $replicated->schema->resultset('Artist')->find(5)
+ => 'replicant reactivated';
+
+} => 'created coderef properly';
-## We expect an error now, since all the slaves are dead
+$replicated->schema->storage->execute_reliably($reliably);
-eval {
- $replicate->{schema}->resultset('Artist')->find(4)->name;
-};
+## Try something with an error
-ok $@ => 'Got error when trying to find artistid 4';
+ok my $unreliably = sub {
+
+ ok $replicated->schema->resultset('ArtistXX')->find(5)
+ => 'replicant reactivated';
+
+} => 'created coderef properly';
-## This should also be an error
+throws_ok {$replicated->schema->storage->execute_reliably($unreliably)}
+ qr/Can't find source for ArtistXX/
+ => 'Bad coderef throws proper error';
+
+## Make sure replication came back
-eval {
- my $artist4 = $replicate->{schema}->resultset('Artist')->find(7);
-};
+ok $replicated->schema->resultset('Artist')->find(3)
+ => 'replicant reactivated';
+
+## make sure transactions are set to execute_reliably
-ok $@ => 'Got read errors after everything failed';
+ok my $transaction = sub {
+
+ my $id = shift @_;
+
+ $replicated
+ ->schema
+ ->populate('Artist', [
+ [ qw/artistid name/ ],
+ [ $id, "Children of the Grave"],
+ ]);
+
+ ok my $result = $replicated->schema->resultset('Artist')->find($id);
+ ok my $more = $replicated->schema->resultset('Artist')->find(1);
+
+ return ($result, $more);
+
+};
-## make sure ->connect_info returns something sane
+## Test the transaction with multi return
+{
+ ok my @return = $replicated->schema->txn_do($transaction, 666)
+ => 'did transaction';
+
+ is $return[0]->id, 666
+ => 'first returned value is correct';
+
+ is $return[1]->id, 1
+ => 'second returned value is correct';
+}
-ok $replicate->{schema}->storage->connect_info
- => 'got something out of ->connect_info';
+## Test that asking for single return works
+{
+ ok my $return = $replicated->schema->txn_do($transaction, 777)
+ => 'did transaction';
+
+ is $return->id, 777
+ => 'first returned value is correct';
+}
-## Force a connection to the write source for testing.
+## Test transaction returning a single value
-$replicate->{schema}->storage($replicate->{schema}->storage->write_source);
+{
+ ok my $result = $replicated->schema->txn_do(sub {
+ ok my $more = $replicated->schema->resultset('Artist')->find(1);
+ return $more;
+ }) => 'successfully processed transaction';
+
+ is $result->id, 1
+ => 'Got expected single result from transaction';
+}
-## What happens when we do a find for something that doesn't exist?
+## Make sure replication came back
-ok ! $replicate->{schema}->resultset('Artist')->find(666)
- => 'Correctly did not find a bad artist id';
+ok $replicated->schema->resultset('Artist')->find(1)
+ => 'replicant reactivated';
+
+## Test Discard changes
+{
+ ok my $artist = $replicated->schema->resultset('Artist')->find(2)
+ => 'got an artist to test discard changes';
+
+ ok $artist->discard_changes
+ => 'properly discard changes';
+}
+
## Delete the old database files
-$replicate->cleanup;
+$replicated->cleanup;
my $schema = DBICTest->init_schema(
no_deploy=>1,
no_populate=>1,
+ storage_type=>'::DBI::Replicated',
+ storage_type_args=>{
+ balancer_type=>'DBIx::Class::Storage::DBI::Replicated::Balancer::Random'
+ },
);
This method removes the test SQLite database in t/var/DBIxClass.db
=cut
+sub has_custom_dsn {
+ return $ENV{"DBICTEST_DSN"} ? 1:0;
+}
+
+sub _sqlite_dbfilename {
+ return "t/var/DBIxClass.db";
+}
+
sub _database {
my $self = shift;
- my $db_file = "t/var/DBIxClass.db";
+ my $db_file = $self->_sqlite_dbfilename;
unlink($db_file) if -e $db_file;
unlink($db_file . "-journal") if -e $db_file . "-journal";
} else {
$schema = DBICTest::Schema->compose_namespace('DBICTest');
}
+ if( $args{storage_type}) {
+ $schema->storage_type($args{storage_type});
+ }
if ( !$args{no_connect} ) {
$schema = $schema->connect($self->_database);
- $schema->storage->on_connect_do(['PRAGMA synchronous = OFF']);
+ $schema->storage->on_connect_do(['PRAGMA synchronous = OFF'])
+ unless $self->has_custom_dsn;
}
if ( !$args{no_deploy} ) {
- __PACKAGE__->deploy_schema( $schema );
- __PACKAGE__->populate_schema( $schema ) if( !$args{no_populate} );
+ __PACKAGE__->deploy_schema( $schema, $args{deploy_args} );
+ __PACKAGE__->populate_schema( $schema )
+ if( !$args{no_populate} );
}
return $schema;
}
sub deploy_schema {
my $self = shift;
my $schema = shift;
+ my $args = shift || {};
- if ($ENV{"DBICTEST_SQLT_DEPLOY"}) {
- return $schema->deploy();
+ if ($ENV{"DBICTEST_SQLT_DEPLOY"}) {
+ $schema->deploy($args);
} else {
open IN, "t/lib/sqlite.sql";
my $sql;
close IN;
($schema->storage->dbh->do($_) || print "Error on SQL: $_\n") for split(/;\n/, $sql);
}
+ return;
}
=head2 populate_schema
[ 1, 2 ],
[ 1, 3 ],
]);
-
+
$schema->populate('TreeLike', [
[ qw/id parent name/ ],
- [ 1, 0, 'foo' ],
- [ 2, 1, 'bar' ],
- [ 5, 1, 'blop' ],
- [ 3, 2, 'baz' ],
- [ 4, 3, 'quux' ],
- [ 6, 2, 'fong' ],
+ [ 1, undef, 'root' ],
+ [ 2, 1, 'foo' ],
+ [ 3, 2, 'bar' ],
+ [ 6, 2, 'blop' ],
+ [ 4, 3, 'baz' ],
+ [ 5, 4, 'quux' ],
+ [ 7, 3, 'fong' ],
]);
$schema->populate('Track', [
[ 1, "Tools" ],
[ 2, "Body Parts" ],
]);
-
- $schema->populate('CollectionObject', [
- [ qw/collection object/ ],
- [ 1, 1 ],
- [ 1, 2 ],
- [ 1, 3 ],
- [ 2, 4 ],
- [ 2, 5 ],
- ]);
-
+
$schema->populate('TypedObject', [
[ qw/objectid type value/ ],
[ 1, "pointy", "Awl" ],
[ 4, "pointy", "Tooth" ],
[ 5, "round", "Head" ],
]);
+ $schema->populate('CollectionObject', [
+ [ qw/collection object/ ],
+ [ 1, 1 ],
+ [ 1, 2 ],
+ [ 1, 3 ],
+ [ 2, 4 ],
+ [ 2, 5 ],
+ ]);
$schema->populate('Owners', [
[ qw/ownerid name/ ],
# since it uses the PK
__PACKAGE__->might_have(
'artist_1', 'DBICTest::Schema::Artist', {
- 'foreign.artist_id' => 'self.artist',
+ 'foreign.artistid' => 'self.artist',
}, {
is_foreign_key_constraint => 1,
},
__PACKAGE__->table('treelike');
__PACKAGE__->add_columns(
'id' => { data_type => 'integer', is_auto_increment => 1 },
- 'parent' => { data_type => 'integer' },
+ 'parent' => { data_type => 'integer' , is_nullable=>1},
'name' => { data_type => 'varchar',
size => 100,
},
{ 'foreign.id' => 'self.parent' });
__PACKAGE__->has_many('children', 'TreeLike', { 'foreign.parent' => 'self.id' });
+## since this is a self referential table we need to do a post deploy hook and get
+## some data in while constraints are off
+
+ sub sqlt_deploy_hook {
+ my ($self, $sqlt_table) = @_;
+
+ $sqlt_table->add_index(name => 'idx_name', fields => ['name']);
+ }
1;
);
__PACKAGE__->set_primary_key(qw/artist cd/);
-__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' );
+__PACKAGE__->belongs_to(
+ artist => 'DBICTest::Schema::Artist',
+ {'foreign.artistid'=>'self.artist'},
+);
+
__PACKAGE__->belongs_to( cd => 'DBICTest::Schema::CD', undef, { is_deferrable => 0, add_fk_index => 0 } );
__PACKAGE__->has_many(
--
CREATE TABLE treelike (
id INTEGER PRIMARY KEY NOT NULL,
- parent integer NOT NULL,
+ parent integer NULL,
name varchar(100) NOT NULL
);