From: John Napiorkowski Date: Wed, 14 May 2008 13:40:07 +0000 (+0000) Subject: Merge 'trunk' into 'replication_dedux' X-Git-Tag: v0.08240~402^2~40 X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=aae8ca08d65518a4c076746a858f9a5f76f1dff1;hp=5aac79f733809095bdc342947712586fbd85aa9a;p=dbsrgits%2FDBIx-Class.git Merge 'trunk' into 'replication_dedux' r12028@dev (orig r4378): captainL | 2008-05-11 13:43:58 -0500 added test for find or create related functionality in nested inserts r12029@dev (orig r4379): captainL | 2008-05-11 14:03:27 -0500 sanified new multi_create test r12030@dev (orig r4381): ribasushi | 2008-05-12 06:09:20 -0500 Add failing tests for missing safeguards for multilevel prefetch (don't know the internals well enough to fix the issue itself) --- diff --git a/Makefile.PL b/Makefile.PL index a98fe9f..4d3cb66 100644 --- a/Makefile.PL +++ b/Makefile.PL @@ -24,6 +24,7 @@ requires 'JSON::Any' => 1.00; requires 'Scope::Guard' => 0.03; requires 'Digest::SHA1' => 2.00; requires 'Path::Class' => 0; +requires 'List::Util' => 1.19; # Perl 5.8.0 doesn't have utf8::is_utf8() requires 'Encode' => 0 if ($] <= 5.008000); diff --git a/lib/DBIx/Class/Schema.pm b/lib/DBIx/Class/Schema.pm index df6131d..a51ab96 100644 --- a/lib/DBIx/Class/Schema.pm +++ b/lib/DBIx/Class/Schema.pm @@ -637,9 +637,9 @@ sub setup_connection_class { =over 4 -=item Arguments: $storage_type +=item Arguments: $storage_type|{$storage_type, \%args} -=item Return Value: $storage_type +=item Return Value: $storage_type|{$storage_type, \%args} =back @@ -653,6 +653,13 @@ in cases where the appropriate subclass is not autodetected, such as when dealing with MSSQL via L, in which case you'd set it to C<::DBI::Sybase::MSSQL>. +If your storage type requires instantiation arguments, those are defined as a +second argument in the form of a hashref and the entire value needs to be +wrapped into an arrayref or a hashref. We support both types of refs here in +order to play nice with your Config::[class] or your choice. + +See L for an example of this. + =head2 connection =over 4 @@ -675,19 +682,33 @@ or L in general. sub connection { my ($self, @info) = @_; return $self if !@info && $self->storage; - my $storage_class = $self->storage_type; + + my ($storage_class, $args) = ref $self->storage_type ? + ($self->_normalize_storage_type($self->storage_type),{}) : ($self->storage_type, {}); + $storage_class = 'DBIx::Class::Storage'.$storage_class if $storage_class =~ m/^::/; eval "require ${storage_class};"; $self->throw_exception( "No arguments to load_classes and couldn't load ${storage_class} ($@)" ) if $@; - my $storage = $storage_class->new($self); + my $storage = $storage_class->new($self=>$args); $storage->connect_info(\@info); $self->storage($storage); return $self; } +sub _normalize_storage_type { + my ($self, $storage_type) = @_; + if(ref $storage_type eq 'ARRAY') { + return @$storage_type; + } elsif(ref $storage_type eq 'HASH') { + return %$storage_type; + } else { + $self->throw_exception('Unsupported REFTYPE given: '. ref $storage_type); + } +} + =head2 connect =over 4 diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 7d76be6..212be1e 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -1063,6 +1063,7 @@ sub _query_start { if ( $self->debug ) { @bind = $self->_fix_bind_params(@bind); + $self->debugobj->query_start( $sql, @bind ); } } @@ -1707,6 +1708,31 @@ sub build_datetime_parser { } } +=head2 is_replicating + +A boolean that reports if a particular L is set to +replicate from a master database. Default is undef, which is the result +returned by databases that don't support replication. + +=cut + +sub is_replicating { + return; + +} + +=head2 lag_behind_master + +Returns a number that represents a certain amount of lag behind a master db +when a given storage is replicating. The number is database dependent, but +starts at zero and increases with the amount of lag. Default in undef + +=cut + +sub lag_behind_master { + return; +} + sub DESTROY { my $self = shift; return if !$self->_dbh; diff --git a/lib/DBIx/Class/Storage/DBI/Replicated.pm b/lib/DBIx/Class/Storage/DBI/Replicated.pm index d736c41..27136b1 100644 --- a/lib/DBIx/Class/Storage/DBI/Replicated.pm +++ b/lib/DBIx/Class/Storage/DBI/Replicated.pm @@ -1,14 +1,12 @@ package DBIx::Class::Storage::DBI::Replicated; -use strict; -use warnings; - +use Moose; use DBIx::Class::Storage::DBI; -use DBD::Multi; - -use base qw/Class::Accessor::Fast/; +use DBIx::Class::Storage::DBI::Replicated::Pool; +use DBIx::Class::Storage::DBI::Replicated::Balancer; +use Scalar::Util qw(blessed); -__PACKAGE__->mk_accessors( qw/read_source write_source/ ); +extends 'DBIx::Class::Storage::DBI', 'Moose::Object'; =head1 NAME @@ -17,41 +15,21 @@ DBIx::Class::Storage::DBI::Replicated - ALPHA Replicated database support =head1 SYNOPSIS The Following example shows how to change an existing $schema to a replicated -storage type and update it's connection information to contain a master DSN and -an array of slaves. +storage type, add some replicated (readonly) databases, and perform reporting +tasks. ## Change storage_type in your schema class - $schema->storage_type( '::DBI::Replicated' ); + $schema->storage_type( ['::DBI::Replicated', {balancer=>'::Random'}] ); + + ## Add some slaves. Basically this is an array of arrayrefs, where each + ## arrayref is database connect information - ## Set your connection. - $schema->connect( - $dsn, $user, $password, { - AutoCommit => 1, - ## Other standard DBI connection or DBD custom attributes added as - ## usual. Additionally, we have two custom attributes for defining - ## slave information and controlling how the underlying DBD::Multi - slaves_connect_info => [ - ## Define each slave like a 'normal' DBI connection, but you add - ## in a DBD::Multi custom attribute to define how the slave is - ## prioritized. Please see DBD::Multi for more. - [$slave1dsn, $user, $password, {%slave1opts, priority=>10}], - [$slave2dsn, $user, $password, {%slave2opts, priority=>10}], - [$slave3dsn, $user, $password, {%slave3opts, priority=>20}], - ## add in a preexisting database handle - [$dbh, '','', {priority=>30}], - ## DBD::Multi will call this coderef for connects - [sub { DBI->connect(< DSN info >) }, '', '', {priority=>40}], - ## If the last item is hashref, we use that for DBD::Multi's - ## configuration information. Again, see DBD::Multi for more. - {timeout=>25, failed_max=>2}, - ], - }, + $schema->storage->connect_replicants( + [$dsn1, $user, $pass, \%opts], + [$dsn1, $user, $pass, \%opts], + [$dsn1, $user, $pass, \%opts], ); - ## Now, just use the schema as normal - $schema->resultset('Table')->find(< unique >); ## Reads will use slaves - $schema->resultset('Table')->create(\%info); ## Writes will use master - =head1 DESCRIPTION Warning: This class is marked ALPHA. We are using this in development and have @@ -64,203 +42,505 @@ one master and numerous slave database connections. All write-type queries (INSERT, UPDATE, DELETE and even LAST_INSERT_ID) are routed to master database, all read-type queries (SELECTs) go to the slave database. -For every slave database you can define a priority value, which controls data -source usage pattern. It uses L, so first the lower priority data -sources used (if they have the same priority, the are used randomized), than -if all low priority data sources fail, higher ones tried in order. +Basically, any method request that L would normally +handle gets delegated to one of the two attributes: L or to +L. Additionally, some methods need to be distributed +to all existing storages. This way our storage class is a drop in replacement +for L. + +Read traffic is spread across the replicants (slaves) occuring to a user +selected algorithm. The default algorithm is random weighted. + +TODO more details about the algorithm. -=head1 CONFIGURATION +=head1 ATTRIBUTES -Please see L for most configuration information. +This class defines the following attributes. + +=head2 pool_type + +Contains the classname which will instantiate the L object. Defaults +to: L. =cut -sub new { - my $proto = shift; - my $class = ref( $proto ) || $proto; - my $self = {}; +has 'pool_type' => ( + is=>'ro', + isa=>'ClassName', + lazy_build=>1, + handles=>{ + 'create_pool' => 'new', + }, +); - bless( $self, $class ); +=head2 pool_args - $self->write_source( DBIx::Class::Storage::DBI->new ); - $self->read_source( DBIx::Class::Storage::DBI->new ); +Contains a hashref of initialized information to pass to the Balancer object. +See L for available arguments. - return $self; -} +=cut -sub all_sources { - my $self = shift; +has 'pool_args' => ( + is=>'ro', + isa=>'HashRef', + lazy=>1, + required=>1, + default=>sub { {} }, +); - my @sources = ($self->read_source, $self->write_source); - return wantarray ? @sources : \@sources; -} +=head2 balancer_type -sub _connect_info { - my $self = shift; - my $master = $self->write_source->_connect_info; - $master->[-1]->{slave_connect_info} = $self->read_source->_connect_info; - return $master; -} +The replication pool requires a balance class to provider the methods for +choose how to spread the query load across each replicant in the pool. -sub connect_info { - my ($self, $source_info) = @_; +=cut - ## if there is no $source_info, treat this sub like an accessor - return $self->_connect_info - if !$source_info; - - ## Alright, let's conect the master - $self->write_source->connect_info($source_info); - - ## Now, build and then connect the Slaves - my @slaves_connect_info = @{$source_info->[-1]->{slaves_connect_info}}; - my $dbd_multi_config = ref $slaves_connect_info[-1] eq 'HASH' - ? pop @slaves_connect_info : {}; - - ## We need to do this since SQL::Abstract::Limit can't guess what DBD::Multi is - $dbd_multi_config->{limit_dialect} = $self->write_source->sql_maker->limit_dialect - unless defined $dbd_multi_config->{limit_dialect}; - - @slaves_connect_info = map { - ## if the first element in the arrayhash is a ref, make that the value - my $db = ref $_->[0] ? $_->[0] : $_; - my $priority = $_->[-1]->{priority} || 10; ## default priority is 10 - $priority => $db; - } @slaves_connect_info; +has 'balancer_type' => ( + is=>'ro', + isa=>'ClassName', + lazy_build=>1, + handles=>{ + 'create_balancer' => 'new', + }, +); + +=head2 balancer_args + +Contains a hashref of initialized information to pass to the Balancer object. +See L for available arguments. + +=cut + +has 'balancer_args' => ( + is=>'ro', + isa=>'HashRef', + lazy=>1, + required=>1, + default=>sub { {} }, +); + +=head2 pool + +Is a or derived class. This is a +container class for one or more replicated databases. + +=cut + +has 'pool' => ( + is=>'ro', + isa=>'DBIx::Class::Storage::DBI::Replicated::Pool', + lazy_build=>1, + handles=>[qw/ + connect_replicants + replicants + has_replicants + /], +); + +=head2 balancer + +Is a or derived class. This +is a class that takes a pool () + +=cut + +has 'balancer' => ( + is=>'ro', + isa=>'DBIx::Class::Storage::DBI::Replicated::Balancer', + lazy_build=>1, + handles=>[qw/auto_validate_every/], +); + +=head2 master + +The master defines the canonical state for a pool of connected databases. All +the replicants are expected to match this databases state. Thus, in a classic +Master / Slaves distributed system, all the slaves are expected to replicate +the Master's state as quick as possible. This is the only database in the +pool of databases that is allowed to handle write traffic. + +=cut + +has 'master' => ( + is=> 'ro', + isa=>'DBIx::Class::Storage::DBI', + lazy_build=>1, +); + +=head1 ATTRIBUTES IMPLEMENTING THE DBIx::Storage::DBI INTERFACE + +The following methods are delegated all the methods required for the +L interface. + +=head2 read_handler + +Defines an object that implements the read side of L. + +=cut + +has 'read_handler' => ( + is=>'rw', + isa=>'Object', + lazy_build=>1, + handles=>[qw/ + select + select_single + columns_info_for + /], +); + +=head2 write_handler + +Defines an object that implements the write side of L. + +=cut + +has 'write_handler' => ( + is=>'ro', + isa=>'Object', + lazy_build=>1, + lazy_build=>1, + handles=>[qw/ + on_connect_do + on_disconnect_do + connect_info + throw_exception + sql_maker + sqlt_type + create_ddl_dir + deployment_statements + datetime_parser + datetime_parser_type + last_insert_id + insert + insert_bulk + update + delete + dbh + txn_do + txn_commit + txn_rollback + sth + deploy + schema + /], +); + +=head1 METHODS + +This class defines the following methods. + +=head2 new + +L when instantiating it's storage passed itself as the +first argument. We need to invoke L on the underlying parent class, make +sure we properly give it a L meta class, and then correctly instantiate +our attributes. Basically we pass on whatever the schema has in it's class +data for 'storage_type_args' to our replicated storage type. + +=cut + +sub new { + my $class = shift @_; + my $schema = shift @_; + my $storage_type_args = shift @_; + my $obj = $class->SUPER::new($schema, $storage_type_args, @_); - $self->read_source->connect_info([ - 'dbi:Multi:', undef, undef, { - dsns => [@slaves_connect_info], - %$dbd_multi_config, - }, - ]); + ## Hate to do it this way, but can't seem to get advice on the attribute working right + ## maybe we can do a type and coercion for it. + if( $storage_type_args->{balancer_type} && $storage_type_args->{balancer_type}=~m/^::/) { + $storage_type_args->{balancer_type} = 'DBIx::Class::Storage::DBI::Replicated::Balancer'.$storage_type_args->{balancer_type}; + eval "require $storage_type_args->{balancer_type}"; + } - ## Return the formated connection information - return $self->_connect_info; + return $class->meta->new_object( + __INSTANCE__ => $obj, + %$storage_type_args, + @_, + ); } -sub select { - shift->read_source->select( @_ ); -} -sub select_single { - shift->read_source->select_single( @_ ); -} -sub throw_exception { - shift->read_source->throw_exception( @_ ); -} -sub sql_maker { - shift->read_source->sql_maker( @_ ); -} -sub columns_info_for { - shift->read_source->columns_info_for( @_ ); -} -sub sqlt_type { - shift->read_source->sqlt_type( @_ ); -} -sub create_ddl_dir { - shift->read_source->create_ddl_dir( @_ ); -} -sub deployment_statements { - shift->read_source->deployment_statements( @_ ); -} -sub datetime_parser { - shift->read_source->datetime_parser( @_ ); -} -sub datetime_parser_type { - shift->read_source->datetime_parser_type( @_ ); -} -sub build_datetime_parser { - shift->read_source->build_datetime_parser( @_ ); -} +=head2 _build_master -sub limit_dialect { $_->limit_dialect( @_ ) for( shift->all_sources ) } -sub quote_char { $_->quote_char( @_ ) for( shift->all_sources ) } -sub name_sep { $_->quote_char( @_ ) for( shift->all_sources ) } -sub disconnect { $_->disconnect( @_ ) for( shift->all_sources ) } -sub set_schema { $_->set_schema( @_ ) for( shift->all_sources ) } +Lazy builder for the L attribute. -sub DESTROY { - my $self = shift; +=cut - undef $self->{write_source}; - undef $self->{read_sources}; +sub _build_master { + DBIx::Class::Storage::DBI->new; } -sub last_insert_id { - shift->write_source->last_insert_id( @_ ); +=head2 _build_pool_type + +Lazy builder for the L attribute. + +=cut + +sub _build_pool_type { + return 'DBIx::Class::Storage::DBI::Replicated::Pool'; } -sub insert { - shift->write_source->insert( @_ ); + +=head2 _build_pool + +Lazy builder for the L attribute. + +=cut + +sub _build_pool { + my $self = shift @_; + $self->create_pool(%{$self->pool_args}); } -sub update { - shift->write_source->update( @_ ); + +=head2 _build_balancer_type + +Lazy builder for the L attribute. + +=cut + +sub _build_balancer_type { + return 'DBIx::Class::Storage::DBI::Replicated::Balancer::First'; } -sub update_all { - shift->write_source->update_all( @_ ); + +=head2 _build_balancer + +Lazy builder for the L attribute. This takes a Pool object so that +the balancer knows which pool it's balancing. + +=cut + +sub _build_balancer { + my $self = shift @_; + $self->create_balancer( + pool=>$self->pool, + master=>$self->master, + %{$self->balancer_args},); } -sub delete { - shift->write_source->delete( @_ ); + +=head2 _build_write_handler + +Lazy builder for the L attribute. The default is to set this to +the L. + +=cut + +sub _build_write_handler { + return shift->master; } -sub delete_all { - shift->write_source->delete_all( @_ ); + +=head2 _build_read_handler + +Lazy builder for the L attribute. The default is to set this to +the L. + +=cut + +sub _build_read_handler { + return shift->balancer; } -sub create { - shift->write_source->create( @_ ); + +=head2 around: connect_replicants + +All calls to connect_replicants needs to have an existing $schema tacked onto +top of the args, since L needs it. + +=cut + +around 'connect_replicants' => sub { + my ($method, $self, @args) = @_; + $self->$method($self->schema, @args); +}; + +=head2 all_storages + +Returns an array of of all the connected storage backends. The first element +in the returned array is the master, and the remainings are each of the +replicants. + +=cut + +sub all_storages { + my $self = shift @_; + + return grep {defined $_ && blessed $_} ( + $self->master, + $self->replicants, + ); } -sub find_or_create { - shift->write_source->find_or_create( @_ ); + +=head2 set_reliable_storage + +Sets the current $schema to be 'reliable', that is all queries, both read and +write are sent to the master + +=cut + +sub set_reliable_storage { + my $self = shift @_; + my $schema = $self->schema; + my $write_handler = $self->schema->storage->write_handler; + + $schema->storage->read_handler($write_handler); } -sub update_or_create { - shift->write_source->update_or_create( @_ ); + +=head2 set_balanced_storage + +Sets the current $schema to be use the for all reads, while all +writea are sent to the master only + +=cut + +sub set_balanced_storage { + my $self = shift @_; + my $schema = $self->schema; + my $write_handler = $self->schema->storage->balancer; + + $schema->storage->read_handler($write_handler); } + +=head2 connected + +Check that the master and at least one of the replicants is connected. + +=cut + sub connected { - shift->write_source->connected( @_ ); + my $self = shift @_; + + return + $self->master->connected && + $self->pool->connected_replicants; } + +=head2 ensure_connected + +Make sure all the storages are connected. + +=cut + sub ensure_connected { - shift->write_source->ensure_connected( @_ ); -} -sub dbh { - shift->write_source->dbh( @_ ); -} -sub txn_do { - shift->write_source->txn_do( @_ ); + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->ensure_connected(@_); + } } -sub txn_commit { - shift->write_source->txn_commit( @_ ); + +=head2 limit_dialect + +Set the limit_dialect for all existing storages + +=cut + +sub limit_dialect { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->limit_dialect(@_); + } } -sub txn_rollback { - shift->write_source->txn_rollback( @_ ); + +=head2 quote_char + +Set the quote_char for all existing storages + +=cut + +sub quote_char { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->quote_char(@_); + } } -sub sth { - shift->write_source->sth( @_ ); + +=head2 name_sep + +Set the name_sep for all existing storages + +=cut + +sub name_sep { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->name_sep(@_); + } } -sub deploy { - shift->write_source->deploy( @_ ); + +=head2 set_schema + +Set the schema object for all existing storages + +=cut + +sub set_schema { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->set_schema(@_); + } } -sub _prep_for_execute { - shift->write_source->_prep_for_execute(@_); + +=head2 debug + +set a debug flag across all storages + +=cut + +sub debug { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->debug(@_); + } } +=head2 debugobj + +set a debug object across all storages + +=cut + sub debugobj { - shift->write_source->debugobj(@_); + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->debugobj(@_); + } } -sub debug { - shift->write_source->debug(@_); + +=head2 debugfh + +set a debugfh object across all storages + +=cut + +sub debugfh { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->debugfh(@_); + } } -sub debugfh { shift->_not_supported( 'debugfh' ) }; -sub debugcb { shift->_not_supported( 'debugcb' ) }; +=head2 debugcb + +set a debug callback across all storages -sub _not_supported { - my( $self, $method ) = @_; +=cut - die "This Storage does not support $method method."; +sub debugcb { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->debugcb(@_); + } } -=head1 SEE ALSO +=head2 disconnect + +disconnect everything -L, L, L +=cut + +sub disconnect { + my $self = shift @_; + foreach my $source ($self->all_storages) { + $source->disconnect(@_); + } +} =head1 AUTHOR diff --git a/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm b/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm new file mode 100644 index 0000000..ed7007d --- /dev/null +++ b/lib/DBIx/Class/Storage/DBI/Replicated/Balancer.pm @@ -0,0 +1,195 @@ +package DBIx::Class::Storage::DBI::Replicated::Balancer; + +use Moose::Role; +requires 'next_storage'; + +=head1 NAME + +DBIx::Class::Storage::DBI::Replicated::Balancer; A Software Load Balancer + +=head1 SYNOPSIS + +This role is used internally by L. + +=head1 DESCRIPTION + +Given a pool (L) of replicated +database's (L), defines a +method by which query load can be spread out across each replicant in the pool. + +=head1 ATTRIBUTES + +This class defines the following attributes. + +=head2 auto_validate_every ($seconds) + +If auto_validate has some sort of value, run the L every +$seconds. Be careful with this, because if you set it to 0 you will end up +validating every query. + +=cut + +has 'auto_validate_every' => ( + is=>'rw', + isa=>'Int', + predicate=>'has_auto_validate_every', +); + +=head2 master + +The L object that is the master database all the +replicants are trying to follow. The balancer needs to know it since it's the +ultimate fallback. + +=cut + +has 'master' => ( + is=>'ro', + isa=>'DBIx::Class::Storage::DBI', + required=>1, +); + +=head2 pool + +The L object that we are trying to +balance. + +=cut + +has 'pool' => ( + is=>'ro', + isa=>'DBIx::Class::Storage::DBI::Replicated::Pool', + required=>1, +); + +=head2 current_replicant + +Replicant storages (slaves) handle all read only traffic. The assumption is +that your database will become readbound well before it becomes write bound +and that being able to spread your read only traffic around to multiple +databases is going to help you to scale traffic. + +This attribute returns the next slave to handle a read request. Your L +attribute has methods to help you shuffle through all the available replicants +via it's balancer object. + +=cut + +has 'current_replicant' => ( + is=> 'rw', + isa=>'DBIx::Class::Storage::DBI', + lazy_build=>1, + handles=>[qw/ + select + select_single + columns_info_for + /], +); + +=head1 METHODS + +This class defines the following methods. + +=head2 _build_current_replicant + +Lazy builder for the L attribute. + +=cut + +sub _build_current_replicant { + my $self = shift @_; + $self->next_storage; +} + +=head2 next_storage + +This method should be defined in the class which consumes this role. + +Given a pool object, return the next replicant that will serve queries. The +default behavior is to grap the first replicant it finds but you can write +your own subclasses of L to +support other balance systems. + +This returns from the pool of active replicants. If there are no active +replicants, then you should have it return the master as an ultimate fallback. + +=head2 around: next_storage + +Advice on next storage to add the autovalidation. We have this broken out so +that it's easier to break out the auto validation into a role. + +This also returns the master in the case that none of the replicants are active +or just just forgot to create them :) + +=cut + +around 'next_storage' => sub { + my ($next_storage, $self, @args) = @_; + my $now = time; + + ## Do we need to validate the replicants? + if( + $self->has_auto_validate_every && + ($self->auto_validate_every + $self->pool->last_validated) <= $now + ) { + $self->pool->validate_replicants; + } + + ## Get a replicant, or the master if none + my $next = $self->$next_storage(@args); + return $next ? $next:$self->master; +}; + +=head2 before: select + +Advice on the select attribute. Each time we use a replicant +we need to change it via the storage pool algorithm. That way we are spreading +the load evenly (hopefully) across existing capacity. + +=cut + +before 'select' => sub { + my $self = shift @_; + my $next_replicant = $self->next_storage; + $self->current_replicant($next_replicant); +}; + +=head2 before: select_single + +Advice on the select_single attribute. Each time we use a replicant +we need to change it via the storage pool algorithm. That way we are spreading +the load evenly (hopefully) across existing capacity. + +=cut + +before 'select_single' => sub { + my $self = shift @_; + my $next_replicant = $self->next_storage; + $self->current_replicant($next_replicant); +}; + +=head2 before: columns_info_for + +Advice on the current_replicant_storage attribute. Each time we use a replicant +we need to change it via the storage pool algorithm. That way we are spreading +the load evenly (hopefully) across existing capacity. + +=cut + +before 'columns_info_for' => sub { + my $self = shift @_; + my $next_replicant = $self->next_storage; + $self->current_replicant($next_replicant); +}; + +=head1 AUTHOR + +John Napiorkowski + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut + +1; diff --git a/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm b/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm new file mode 100644 index 0000000..495b41d --- /dev/null +++ b/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/First.pm @@ -0,0 +1,52 @@ +package DBIx::Class::Storage::DBI::Replicated::Balancer::First; + +use List::Util qw(shuffle); +use Moose; +with 'DBIx::Class::Storage::DBI::Replicated::Balancer'; + +=head1 NAME + +DBIx::Class::Storage::DBI::Replicated::Balancer::First; Just get the First Balancer + +=head1 SYNOPSIS + +This class is used internally by L. You +shouldn't need to create instances of this class. + +=head1 DESCRIPTION + +Given a pool (L) of replicated +database's (L), defines a +method by which query load can be spread out across each replicant in the pool. + +This Balancer just get's whatever is the first replicant in the pool + +=head1 ATTRIBUTES + +This class defines the following attributes. + +=head1 METHODS + +This class defines the following methods. + +=head2 next_storage + +Just get the first storage. Probably only good when you have one replicant. + +=cut + +sub next_storage { + return (shift->pool->active_replicants)[0]; +} + +=head1 AUTHOR + +John Napiorkowski + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut + +1; \ No newline at end of file diff --git a/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm b/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm new file mode 100644 index 0000000..e37b291 --- /dev/null +++ b/lib/DBIx/Class/Storage/DBI/Replicated/Balancer/Random.pm @@ -0,0 +1,56 @@ +package DBIx::Class::Storage::DBI::Replicated::Balancer::Random; + +use List::Util qw(shuffle); +use Moose; +with 'DBIx::Class::Storage::DBI::Replicated::Balancer'; + +=head1 NAME + +DBIx::Class::Storage::DBI::Replicated::Balancer::Random; A 'random' Balancer + +=head1 SYNOPSIS + +This class is used internally by L. You +shouldn't need to create instances of this class. + +=head1 DESCRIPTION + +Given a pool (L) of replicated +database's (L), defines a +method by which query load can be spread out across each replicant in the pool. + +This Balancer uses L keyword 'shuffle' to randomly pick an active +replicant from the associated pool. This may or may not be random enough for +you, patches welcome. + +=head1 ATTRIBUTES + +This class defines the following attributes. + +=head1 METHODS + +This class defines the following methods. + +=head2 next_storage + +Returns an active replicant at random. Please note that due to the nature of +the word 'random' this means it's possible for a particular active replicant to +be requested several times in a row. + +=cut + +sub next_storage { + return (shuffle(shift->pool->active_replicants))[0]; +} + +=head1 AUTHOR + +John Napiorkowski + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut + +1; \ No newline at end of file diff --git a/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm b/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm new file mode 100644 index 0000000..56a40c9 --- /dev/null +++ b/lib/DBIx/Class/Storage/DBI/Replicated/Pool.pm @@ -0,0 +1,266 @@ +package DBIx::Class::Storage::DBI::Replicated::Pool; + +use Moose; +use MooseX::AttributeHelpers; +use DBIx::Class::Storage::DBI::Replicated::Replicant; +use List::Util qw(sum); + +=head1 NAME + +DBIx::Class::Storage::DBI::Replicated::Pool; Manage a pool of replicants + +=head1 SYNOPSIS + +This class is used internally by L. You +shouldn't need to create instances of this class. + +=head1 DESCRIPTION + +In a replicated storage type, there is at least one replicant to handle the +read only traffic. The Pool class manages this replicant, or list of +replicants, and gives some methods for querying information about their status. + +=head1 ATTRIBUTES + +This class defines the following attributes. + +=head2 maximum_lag ($num) + +This is a number which defines the maximum allowed lag returned by the +L method. The default is 0. In +general, this should return a larger number when the replicant is lagging +behind it's master, however the implementation of this is database specific, so +don't count on this number having a fixed meaning. For example, MySQL will +return a number of seconds that the replicating database is lagging. + +=cut + +has 'maximum_lag' => ( + is=>'rw', + isa=>'Num', + required=>1, + lazy=>1, + default=>0, +); + +=head2 last_validated + +This is an integer representing a time since the last time the replicants were +validated. It's nothing fancy, just an integer provided via the perl time +builtin. + +=cut + +has 'last_validated' => ( + is=>'rw', + isa=>'Int', + reader=>'last_validated', + writer=>'_last_validated', + lazy=>1, + default=>sub { + time; + }, +); + +=head2 replicant_type ($classname) + +Base class used to instantiate replicants that are in the pool. Unless you +need to subclass L you should +just leave this alone. + +=cut + +has 'replicant_type' => ( + is=>'ro', + isa=>'ClassName', + required=>1, + default=>'DBIx::Class::Storage::DBI', + handles=>{ + 'create_replicant' => 'new', + }, +); + +=head2 replicants + +A hashref of replicant, with the key being the dsn and the value returning the +actual replicant storage. For example if the $dsn element is something like: + + "dbi:SQLite:dbname=dbfile" + +You could access the specific replicant via: + + $schema->storage->replicants->{'dbname=dbfile'} + +This attributes also supports the following helper methods + +=over 4 + +=item set_replicant($key=>$storage) + +Pushes a replicant onto the HashRef under $key + +=item get_replicant($key) + +Retrieves the named replicant + +=item has_replicants + +Returns true if the Pool defines replicants. + +=item num_replicants + +The number of replicants in the pool + +=item delete_replicant ($key) + +removes the replicant under $key from the pool + +=back + +=cut + +has 'replicants' => ( + is=>'rw', + metaclass => 'Collection::Hash', + isa=>'HashRef[DBIx::Class::Storage::DBI]', + default=>sub {{}}, + provides => { + 'set' => 'set_replicant', + 'get' => 'get_replicant', + 'empty' => 'has_replicants', + 'count' => 'num_replicants', + 'delete' => 'delete_replicant', + }, +); + +=head1 METHODS + +This class defines the following methods. + +=head2 connect_replicants ($schema, Array[$connect_info]) + +Given an array of $dsn suitable for connected to a database, create an +L object and store it in the +L attribute. + +=cut + +sub connect_replicants { + my $self = shift @_; + my $schema = shift @_; + + my @newly_created = (); + foreach my $connect_info (@_) { + + my $replicant = $self->create_replicant($schema); + $replicant->connect_info($connect_info); + $replicant->ensure_connected; + DBIx::Class::Storage::DBI::Replicated::Replicant->meta->apply($replicant); + + my ($key) = ($connect_info->[0]=~m/^dbi\:.+\:(.+)$/); + $self->set_replicant( $key => $replicant); + push @newly_created, $replicant; + } + + return @newly_created; +} + +=head2 connected_replicants + +Returns true if there are connected replicants. Actually is overloaded to +return the number of replicants. So you can do stuff like: + + if( my $num_connected = $storage->has_connected_replicants ) { + print "I have $num_connected connected replicants"; + } else { + print "Sorry, no replicants."; + } + +This method will actually test that each replicant in the L hashref +is actually connected, try not to hit this 10 times a second. + +=cut + +sub connected_replicants { + my $self = shift @_; + return sum( map { + $_->connected ? 1:0 + } $self->all_replicants ); +} + +=head2 active_replicants + +This is an array of replicants that are considered to be active in the pool. +This does not check to see if they are connected, but if they are not, DBIC +should automatically reconnect them for us when we hit them with a query. + +=cut + +sub active_replicants { + my $self = shift @_; + return ( grep {$_} map { + $_->active ? $_:0 + } $self->all_replicants ); +} + +=head2 all_replicants + +Just a simple array of all the replicant storages. No particular order to the +array is given, nor should any meaning be derived. + +=cut + +sub all_replicants { + my $self = shift @_; + return values %{$self->replicants}; +} + +=head2 validate_replicants + +This does a check to see if 1) each replicate is connected (or reconnectable), +2) that is ->is_replicating, and 3) that it is not exceeding the lag amount +defined by L. Replicants that fail any of these tests are set to +inactive, and thus removed from the replication pool. + +This tests L, since a replicant that has been previous marked +as inactive can be reactived should it start to pass the validation tests again. + +See L for more about checking if a replicating +connection is not following a master or is lagging. + +Calling this method will generate queries on the replicant databases so it is +not recommended that you run them very often. + +=cut + +sub validate_replicants { + my $self = shift @_; + foreach my $replicant($self->all_replicants) { + if( + $replicant->is_replicating && + $replicant->lag_behind_master <= $self->maximum_lag && + $replicant->ensure_connected + ) { + ## TODO:: Hook debug for this + $replicant->active(1) + } else { + ## TODO:: Hook debug for this + $replicant->active(0); + } + } + + ## Mark that we completed this validation. + $self->_last_validated(time); +} + +=head1 AUTHOR + +John Napiorkowski + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut + +1; diff --git a/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm b/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm new file mode 100644 index 0000000..e1e7f8d --- /dev/null +++ b/lib/DBIx/Class/Storage/DBI/Replicated/Replicant.pm @@ -0,0 +1,77 @@ +package DBIx::Class::Storage::DBI::Replicated::Replicant; + +use Moose::Role; + +=head1 NAME + +DBIx::Class::Storage::DBI::Replicated::Replicant; A replicated DBI Storage Role + +=head1 SYNOPSIS + +This class is used internally by L. + +=head1 DESCRIPTION + +Replicants are DBI Storages that follow a master DBI Storage. Typically this +is accomplished via an external replication system. Please see the documents +for L for more details. + +This class exists to define methods of a DBI Storage that only make sense when +it's a classic 'slave' in a pool of slave databases which replicate from a +given master database. + +=head1 ATTRIBUTES + +This class defines the following attributes. + +=head2 active + +This is a boolean which allows you to programmatically activate or deactivate a +replicant from the pool. This way to you do stuff like disallow a replicant +when it get's too far behind the master, if it stops replicating, etc. + +This attribute DOES NOT reflect a replicant's internal status, i.e. if it is +properly replicating from a master and has not fallen too many seconds behind a +reliability threshold. For that, use L and L. +Since the implementation of those functions database specific (and not all DBIC +supported DB's support replication) you should refer your database specific +storage driver for more information. + +=cut + +has 'active' => ( + is=>'rw', + isa=>'Bool', + lazy=>1, + required=>1, + default=>1, +); + +=head1 METHODS + +This class defines the following methods. + +=head2 after: _query_start + +advice iof the _query_start method to add more debuggin + +=cut + +around '_query_start' => sub { + my ($method, $self, $sql, @bind) = @_; + my $dsn = $self->connect_info->[0]; + $self->$method("DSN: $dsn SQL: $sql", @bind); +}; + + +=head1 AUTHOR + +John Napiorkowski + +=head1 LICENSE + +You may distribute this code under the same terms as Perl itself. + +=cut + +1; \ No newline at end of file diff --git a/lib/DBIx/Class/Storage/DBI/mysql.pm b/lib/DBIx/Class/Storage/DBI/mysql.pm index ec36176..dadcbf0 100644 --- a/lib/DBIx/Class/Storage/DBI/mysql.pm +++ b/lib/DBIx/Class/Storage/DBI/mysql.pm @@ -33,6 +33,15 @@ sub _svp_rollback { $self->dbh->do("ROLLBACK TO SAVEPOINT $name") } + +sub is_replicating { + my $status = shift->dbh->selectrow_hashref('show slave status'); + return ($status->{Slave_IO_Running} eq 'Yes') && ($status->{Slave_SQL_Running} eq 'Yes'); +} + +sub lag_behind_master { + return shift->dbh->selectrow_hashref('show slave status')->{Seconds_Behind_Master}; +} 1; diff --git a/lib/SQL/Translator/Parser/DBIx/Class.pm b/lib/SQL/Translator/Parser/DBIx/Class.pm index 468256e..e960ff6 100644 --- a/lib/SQL/Translator/Parser/DBIx/Class.pm +++ b/lib/SQL/Translator/Parser/DBIx/Class.pm @@ -122,8 +122,15 @@ sub parse { my $rel_table = $othertable->name; # Get the key information, mapping off the foreign/self markers - my @cond = keys(%{$rel_info->{cond}}); + my @cond = keys(%{$rel_info->{cond}}); my @refkeys = map {/^\w+\.(\w+)$/} @cond; + + # Force the order of the referenced fields to be the same as + # ->add_columns method. + my $idx; + my %other_columns_idx = map {$_ => ++$idx } $othertable->columns; + @refkeys = sort { $other_columns_idx{$a} cmp $other_columns_idx{$b} } @refkeys; + my @keys = map {$rel_info->{cond}->{$_} =~ /^\w+\.(\w+)$/} @cond; if($rel_table) diff --git a/t/71mysql.t b/t/71mysql.t index 4aa48bd..f5ee39e 100644 --- a/t/71mysql.t +++ b/t/71mysql.t @@ -115,5 +115,5 @@ NULLINSEARCH: { # clean up our mess END { - $dbh->do("DROP TABLE artist") if $dbh; -} + #$dbh->do("DROP TABLE artist") if $dbh; +} \ No newline at end of file diff --git a/t/77prefetch.t b/t/77prefetch.t index a0ab5f2..a7834ba 100644 --- a/t/77prefetch.t +++ b/t/77prefetch.t @@ -227,7 +227,7 @@ $schema->storage->debugcb(sub { $queries++ }); $schema->storage->debug(1); my $tree_like = - $schema->resultset('TreeLike')->find(4, + $schema->resultset('TreeLike')->find(5, { join => { parent => { parent => 'parent' } }, prefetch => { parent => { parent => 'parent' } } }); @@ -244,21 +244,21 @@ $schema->storage->debugobj->callback(undef); cmp_ok($queries, '==', 1, 'Only one query run'); -$tree_like = $schema->resultset('TreeLike')->search({'me.id' => 1}); +$tree_like = $schema->resultset('TreeLike')->search({'me.id' => 2}); $tree_like = $tree_like->search_related('children')->search_related('children')->search_related('children')->first; is($tree_like->name, 'quux', 'Tree search_related ok'); $tree_like = $schema->resultset('TreeLike')->search_related('children', - { 'children.id' => 2, 'children_2.id' => 3 }, + { 'children.id' => 3, 'children_2.id' => 4 }, { prefetch => { children => 'children' } } )->first; is(eval { $tree_like->children->first->children->first->name }, 'quux', 'Tree search_related with prefetch ok'); $tree_like = eval { $schema->resultset('TreeLike')->search( - { 'children.id' => 2, 'children_2.id' => 5 }, + { 'children.id' => 3, 'children_2.id' => 6 }, { join => [qw/children children/] } - )->search_related('children', { 'children_4.id' => 6 }, { prefetch => 'children' } + )->search_related('children', { 'children_4.id' => 7 }, { prefetch => 'children' } )->first->children->first; }; is(eval { $tree_like->name }, 'fong', 'Tree with multiple has_many joins ok'); diff --git a/t/86sqlt.t b/t/86sqlt.t index 366f907..7db1d28 100644 --- a/t/86sqlt.t +++ b/t/86sqlt.t @@ -153,9 +153,9 @@ my %fk_constraints = ( treelike => [ { 'display' => 'treelike->treelike for parent', - 'name' => 'treelike_fk_parent', 'index_name' => 'parent', + 'name' => 'treelike_fk_parent_fk', 'index_name' => 'parent_fk', 'selftable' => 'treelike', 'foreigntable' => 'treelike', - 'selfcols' => ['parent'], 'foreigncols' => ['id'], + 'selfcols' => ['parent_fk'], 'foreigncols' => ['id'], on_delete => 'CASCADE', on_update => 'CASCADE', deferrable => 1, }, ], @@ -198,7 +198,7 @@ my %fk_constraints = ( 'display' => 'forceforeign->artist', 'name' => 'forceforeign_fk_artist', 'index_name' => 'artist', 'selftable' => 'forceforeign', 'foreigntable' => 'artist', - 'selfcols' => ['artist'], 'foreigncols' => ['artist_id'], + 'selfcols' => ['artist'], 'foreigncols' => ['artistid'], on_delete => '', on_update => '', deferrable => 1, }, ], diff --git a/t/93storage_replication.t b/t/93storage_replication.t index 62a4d15..7617af3 100644 --- a/t/93storage_replication.t +++ b/t/93storage_replication.t @@ -2,265 +2,439 @@ use strict; use warnings; use lib qw(t/lib); use Test::More; +use DBICTest; BEGIN { - eval "use DBD::Multi"; + eval "use Moose; use Test::Moose"; plan $@ - ? ( skip_all => 'needs DBD::Multi for testing' ) - : ( tests => 20 ); -} + ? ( skip_all => 'needs Moose for testing' ) + : ( tests => 50 ); +} + +use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool'; +use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer'; +use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant'; +use_ok 'DBIx::Class::Storage::DBI::Replicated'; + +=head1 HOW TO USE + + This is a test of the replicated storage system. This will work in one of + two ways, either it was try to fake replication with a couple of SQLite DBs + and creative use of copy, or if you define a couple of %ENV vars correctly + will try to test those. If you do that, it will assume the setup is properly + replicating. Your results may vary, but I have demonstrated this to work with + mysql native replication. + +=cut + ## ---------------------------------------------------------------------------- ## Build a class to hold all our required testing data and methods. ## ---------------------------------------------------------------------------- -TESTSCHEMACLASS: { - - package DBIx::Class::DBI::Replicated::TestReplication; - - use DBI; - use DBICTest; - use File::Copy; - - ## Create a constructor - +TESTSCHEMACLASSES: { + + ## --------------------------------------------------------------------- ## + ## Create an object to contain your replicated stuff. + ## --------------------------------------------------------------------- ## + + package DBIx::Class::DBI::Replicated::TestReplication; + + use DBICTest; + use base qw/Class::Accessor::Fast/; + + __PACKAGE__->mk_accessors( qw/schema/ ); + + ## Initialize the object + sub new { - my $class = shift @_; - my %params = @_; - - my $self = bless { - db_paths => $params{db_paths}, - dsns => $class->init_dsns(%params), - schema=>$class->init_schema, - }, $class; - - $self->connect; - return $self; - } - - ## get the DSNs. We build this up from the list of file paths + my $class = shift @_; + my $self = $class->SUPER::new(@_); - sub init_dsns { - my $class = shift @_; - my %params = @_; - my $db_paths = $params{db_paths}; - - my @dsn = map { - "dbi:SQLite:${_}"; - } @$db_paths; - - return \@dsn; - } - - ## get the Schema and set the replication storage type - - sub init_schema { - my $class = shift @_; - my $schema = DBICTest->init_schema(); - $schema->storage_type( '::DBI::Replicated' ); - - return $schema; - } - - ## connect the Schema - - sub connect { - my $self = shift @_; - my ($master, @slaves) = @{$self->{dsns}}; - my $master_connect_info = [$master, '','', {AutoCommit=>1, PrintError=>0}]; - - my @slavesob; - foreach my $slave (@slaves) - { - my $dbh = shift @{$self->{slaves}} - || DBI->connect($slave,"","",{PrintError=>0, PrintWarn=>0}); - - push @{$master_connect_info->[-1]->{slaves_connect_info}}, - [$dbh, '','',{priority=>10}]; - - push @slavesob, - $dbh; - } - - ## Keep track of the created slave databases - $self->{slaves} = \@slavesob; - - $self - ->{schema} - ->connect(@$master_connect_info); - } - - ## replication - - sub replicate { - my $self = shift @_; - my ($master, @slaves) = @{$self->{db_paths}}; - - foreach my $slave (@slaves) { - copy($master, $slave); - } - } - - ## Cleanup afer ourselves. - - sub cleanup { - my $self = shift @_; - my ($master, @slaves) = @{$self->{db_paths}}; - - foreach my $slave (@slaves) { - unlink $slave; - } + $self->schema( $self->init_schema ); + return $self; } + + ## Get the Schema and set the replication storage type + + sub init_schema { + my $class = shift @_; + + my $schema = DBICTest->init_schema( + storage_type=>{ + '::DBI::Replicated' => { + balancer_type=>'::Random', + balancer_args=>{ + auto_validate_every=>100, + }, + } + }, + deploy_args=>{ + add_drop_table => 1, + }, + ); + + return $schema; + } + + sub generate_replicant_connect_info {} + sub replicate {} + sub cleanup {} + + + ## --------------------------------------------------------------------- ## + ## Subclass for when you are using SQLite for testing, this provides a fake + ## replication support. + ## --------------------------------------------------------------------- ## + + package DBIx::Class::DBI::Replicated::TestReplication::SQLite; + + use DBICTest; + use File::Copy; + use base 'DBIx::Class::DBI::Replicated::TestReplication'; + + __PACKAGE__->mk_accessors( qw/master_path slave_paths/ ); + + ## Set the mastep path from DBICTest + + sub new { + my $class = shift @_; + my $self = $class->SUPER::new(@_); - ## Force a reconnection + $self->master_path( DBICTest->_sqlite_dbfilename ); + $self->slave_paths([ + "t/var/DBIxClass_slave1.db", + "t/var/DBIxClass_slave2.db", + ]); + + return $self; + } - sub reconnect { - my $self = shift @_; - my $schema = $self->connect; - $self->{schema} = $schema; - return $schema; - } + ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for + ## $storage->connect_info to be used for connecting replicants. + + sub generate_replicant_connect_info { + my $self = shift @_; + my @dsn = map { + "dbi:SQLite:${_}"; + } @{$self->slave_paths}; + + return map { [$_,'','',{AutoCommit=>1}] } @dsn; + } + + ## Do a 'good enough' replication by copying the master dbfile over each of + ## the slave dbfiles. If the master is SQLite we do this, otherwise we + ## just do a one second pause to let the slaves catch up. + + sub replicate { + my $self = shift @_; + foreach my $slave (@{$self->slave_paths}) { + copy($self->master_path, $slave); + } + } + + ## Cleanup after ourselves. Unlink all gthe slave paths. + + sub cleanup { + my $self = shift @_; + foreach my $slave (@{$self->slave_paths}) { + unlink $slave; + } + } + + ## --------------------------------------------------------------------- ## + ## Subclass for when you are setting the databases via custom export vars + ## This is for when you have a replicating database setup that you are + ## going to test against. You'll need to define the correct $ENV and have + ## two slave databases to test against, as well as a replication system + ## that will replicate in less than 1 second. + ## --------------------------------------------------------------------- ## + + package DBIx::Class::DBI::Replicated::TestReplication::Custom; + use base 'DBIx::Class::DBI::Replicated::TestReplication'; + + ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for + ## $storage->connect_info to be used for connecting replicants. + + sub generate_replicant_connect_info { + return ( + [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}], + [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}], + ); + } + + ## pause a bit to let the replication catch up + + sub replicate { + sleep 1; + } } ## ---------------------------------------------------------------------------- ## Create an object and run some tests ## ---------------------------------------------------------------------------- -my %params = ( - db_paths => [ - "t/var/DBIxClass.db", - "t/var/DBIxClass_slave1.db", - "t/var/DBIxClass_slave2.db", - ], -); - -ok my $replicate = DBIx::Class::DBI::Replicated::TestReplication->new(%params) - => 'Created a replication object'; - -isa_ok $replicate->{schema} - => 'DBIx::Class::Schema'; +## Thi first bunch of tests are basic, just make sure all the bits are behaving + +my $replicated_class = DBICTest->has_custom_dsn ? + 'DBIx::Class::DBI::Replicated::TestReplication::Custom' : + 'DBIx::Class::DBI::Replicated::TestReplication::SQLite'; + +ok my $replicated = $replicated_class->new + => 'Created a replication object'; + +isa_ok $replicated->schema + => 'DBIx::Class::Schema'; + +isa_ok $replicated->schema->storage + => 'DBIx::Class::Storage::DBI::Replicated'; + +ok $replicated->schema->storage->meta + => 'has a meta object'; + +isa_ok $replicated->schema->storage->master + => 'DBIx::Class::Storage::DBI'; + +isa_ok $replicated->schema->storage->pool + => 'DBIx::Class::Storage::DBI::Replicated::Pool'; + +does_ok $replicated->schema->storage->balancer + => 'DBIx::Class::Storage::DBI::Replicated::Balancer'; + +ok my @replicant_connects = $replicated->generate_replicant_connect_info + => 'got replication connect information'; + +ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects) + => 'Created some storages suitable for replicants'; + +isa_ok $replicated->schema->storage->balancer->current_replicant + => 'DBIx::Class::Storage::DBI'; + +ok $replicated->schema->storage->pool->has_replicants + => 'does have replicants'; + +is $replicated->schema->storage->pool->num_replicants => 2 + => 'has two replicants'; + +does_ok $replicated_storages[0] + => 'DBIx::Class::Storage::DBI::Replicated::Replicant'; + +does_ok $replicated_storages[1] + => 'DBIx::Class::Storage::DBI::Replicated::Replicant'; + +my @replicant_names = keys %{$replicated->schema->storage->replicants}; + +does_ok $replicated->schema->storage->replicants->{$replicant_names[0]} + => 'DBIx::Class::Storage::DBI::Replicated::Replicant'; + +does_ok $replicated->schema->storage->replicants->{$replicant_names[1]} + => 'DBIx::Class::Storage::DBI::Replicated::Replicant'; ## Add some info to the database -$replicate - ->{schema} - ->populate('Artist', [ - [ qw/artistid name/ ], - [ 4, "Ozric Tentacles"], - ]); - +$replicated + ->schema + ->populate('Artist', [ + [ qw/artistid name/ ], + [ 4, "Ozric Tentacles"], + ]); + ## Make sure all the slaves have the table definitions -$replicate->replicate; +$replicated->replicate; ## Make sure we can read the data. -ok my $artist1 = $replicate->{schema}->resultset('Artist')->find(4) - => 'Created Result'; +ok my $artist1 = $replicated->schema->resultset('Artist')->find(4) + => 'Created Result'; isa_ok $artist1 - => 'DBICTest::Artist'; - + => 'DBICTest::Artist'; + is $artist1->name, 'Ozric Tentacles' - => 'Found expected name for first result'; + => 'Found expected name for first result'; ## Add some new rows that only the master will have This is because ## we overload any type of write operation so that is must hit the master ## database. -$replicate - ->{schema} - ->populate('Artist', [ - [ qw/artistid name/ ], - [ 5, "Doom's Children"], - [ 6, "Dead On Arrival"], - [ 7, "Watergate"], - ]); - -## Reconnect the database -$replicate->reconnect; - -## Alright, the database 'cluster' is not in a consistent state. When we do -## a read now we expect bad news - -is $replicate->{schema}->resultset('Artist')->find(5), undef - => 'read after disconnect fails because it uses slave 1 which we have neglected to "replicate" yet'; +$replicated + ->schema + ->populate('Artist', [ + [ qw/artistid name/ ], + [ 5, "Doom's Children"], + [ 6, "Dead On Arrival"], + [ 7, "Watergate"], + ]); + +SKIP: { + ## We can't do this test if we have a custom replicants, since we assume + ## if there are custom one that you are trying to test a real replicating + ## system. See docs above for more. + + skip 'Cannot test inconsistent replication since you have a real replication system', 1 + if DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"}; + + ## Alright, the database 'cluster' is not in a consistent state. When we do + ## a read now we expect bad news + is $replicated->schema->resultset('Artist')->find(5), undef + => 'read after disconnect fails because it uses a replicant which we have neglected to "replicate" yet'; +} ## Make sure all the slaves have the table definitions -$replicate->replicate; +$replicated->replicate; ## Should find some data now -ok my $artist2 = $replicate->{schema}->resultset('Artist')->find(5) - => 'Sync succeed'; - +ok my $artist2 = $replicated->schema->resultset('Artist')->find(5) + => 'Sync succeed'; + isa_ok $artist2 - => 'DBICTest::Artist'; - + => 'DBICTest::Artist'; + is $artist2->name, "Doom's Children" - => 'Found expected name for first result'; - -## What happens when we delete one of the slaves? + => 'Found expected name for first result'; -ok my $slave1 = @{$replicate->{slaves}}[0] - => 'Got Slave1'; +## What happens when we disconnect all the replicants? -ok $slave1->disconnect - => 'disconnected slave1'; +is $replicated->schema->storage->pool->connected_replicants => 2 + => "both replicants are connected"; + +$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect; +$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect; -$replicate->reconnect; +is $replicated->schema->storage->pool->connected_replicants => 0 + => "both replicants are now disconnected"; -ok my $artist3 = $replicate->{schema}->resultset('Artist')->find(6) - => 'Still finding stuff.'; - +## All these should pass, since the database should automatically reconnect + +ok my $artist3 = $replicated->schema->resultset('Artist')->find(6) + => 'Still finding stuff.'; + isa_ok $artist3 - => 'DBICTest::Artist'; - + => 'DBICTest::Artist'; + is $artist3->name, "Dead On Arrival" - => 'Found expected name for first result'; - -## Let's delete all the slaves - -ok my $slave2 = @{$replicate->{slaves}}[1] - => 'Got Slave2'; - -ok $slave2->disconnect - => 'Disconnected slave2'; - -$replicate->reconnect; - -## We expect an error now, since all the slaves are dead - -eval { - $replicate->{schema}->resultset('Artist')->find(4)->name; -}; - -ok $@ => 'Got error when trying to find artistid 4'; - -## This should also be an error + => 'Found expected name for first result'; -eval { - my $artist4 = $replicate->{schema}->resultset('Artist')->find(7); -}; +is $replicated->schema->storage->pool->connected_replicants => 1 + => "One replicant reconnected to handle the job"; + +## What happens when we try to select something that doesn't exist? -ok $@ => 'Got read errors after everything failed'; +ok ! $replicated->schema->resultset('Artist')->find(666) + => 'Correctly failed to find something.'; + +## test the reliable option -## make sure ->connect_info returns something sane - -ok $replicate->{schema}->storage->connect_info - => 'got something out of ->connect_info'; - -## Force a connection to the write source for testing. - -$replicate->{schema}->storage($replicate->{schema}->storage->write_source); - -## What happens when we do a find for something that doesn't exist? +TESTRELIABLE: { + + $replicated->schema->storage->set_reliable_storage; + + ok $replicated->schema->resultset('Artist')->find(2) + => 'Read from master 1'; + + ok $replicated->schema->resultset('Artist')->find(5) + => 'Read from master 2'; + + $replicated->schema->storage->set_balanced_storage; + + ok $replicated->schema->resultset('Artist')->find(3) + => 'Read from replicant'; +} -ok ! $replicate->{schema}->resultset('Artist')->find(666) - => 'Correctly did not find a bad artist id'; +## Make sure when reliable goes out of scope, we are using replicants again + +ok $replicated->schema->resultset('Artist')->find(1) + => 'back to replicant 1.'; + +ok $replicated->schema->resultset('Artist')->find(2) + => 'back to replicant 2.'; + +## set all the replicants to inactive, and make sure the balancer falls back to +## the master. + +$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0); +$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0); + +ok $replicated->schema->resultset('Artist')->find(2) + => 'Fallback to master'; + +$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1); +$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1); + +ok $replicated->schema->resultset('Artist')->find(2) + => 'Returned to replicates'; + +## Getting slave status tests + +SKIP: { + ## We skip this tests unless you have a custom replicants, since the default + ## sqlite based replication tests don't support these functions. + + skip 'Cannot Test Replicant Status on Non Replicating Database', 9 + unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"}; + + $replicated->replicate; ## Give the slaves a chance to catchup. + + ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating + => 'Replicants are replicating'; + + is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0 + => 'Replicant is zero seconds behind master'; + + ## Test the validate replicants + + $replicated->schema->storage->pool->validate_replicants; + + is $replicated->schema->storage->pool->active_replicants, 2 + => 'Still have 2 replicants after validation'; + + ## Force the replicants to fail the validate test by required their lag to + ## be negative (ie ahead of the master!) + + $replicated->schema->storage->pool->maximum_lag(-10); + $replicated->schema->storage->pool->validate_replicants; + + is $replicated->schema->storage->pool->active_replicants, 0 + => 'No way a replicant be be ahead of the master'; + + ## Let's be fair to the replicants again. Let them lag up to 5 + + $replicated->schema->storage->pool->maximum_lag(5); + $replicated->schema->storage->pool->validate_replicants; + + is $replicated->schema->storage->pool->active_replicants, 2 + => 'Both replicants in good standing again'; + + ## Check auto validate + + is $replicated->schema->storage->balancer->auto_validate_every, 100 + => "Got the expected value for auto validate"; + + ## This will make sure we auto validatge everytime + $replicated->schema->storage->balancer->auto_validate_every(0); + + ## set all the replicants to inactive, and make sure the balancer falls back to + ## the master. + + $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0); + $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0); + + ## Ok, now when we go to run a query, autovalidate SHOULD reconnect + + is $replicated->schema->storage->pool->active_replicants => 0 + => "both replicants turned off"; + + ok $replicated->schema->resultset('Artist')->find(5) + => 'replicant reactivated'; + + is $replicated->schema->storage->pool->active_replicants => 2 + => "both replicants reactivated"; +} ## Delete the old database files -$replicate->cleanup; +$replicated->cleanup; diff --git a/t/lib/DBICTest.pm b/t/lib/DBICTest.pm index 5c76153..8252ecc 100755 --- a/t/lib/DBICTest.pm +++ b/t/lib/DBICTest.pm @@ -29,6 +29,10 @@ DBIx::Class. my $schema = DBICTest->init_schema( no_deploy=>1, no_populate=>1, + storage_type=>'::DBI::Replicated', + storage_type_args=>{ + balancer_type=>'DBIx::Class::Storage::DBI::Replicated::Balancer::Random' + }, ); This method removes the test SQLite database in t/var/DBIxClass.db @@ -42,9 +46,17 @@ default, unless the no_deploy or no_populate flags are set. =cut +sub has_custom_dsn { + return $ENV{"DBICTEST_DSN"} ? 1:0; +} + +sub _sqlite_dbfilename { + return "t/var/DBIxClass.db"; +} + sub _database { my $self = shift; - my $db_file = "t/var/DBIxClass.db"; + my $db_file = $self->_sqlite_dbfilename; unlink($db_file) if -e $db_file; unlink($db_file . "-journal") if -e $db_file . "-journal"; @@ -72,13 +84,18 @@ sub init_schema { } else { $schema = DBICTest::Schema->compose_namespace('DBICTest'); } + if( $args{storage_type}) { + $schema->storage_type($args{storage_type}); + } if ( !$args{no_connect} ) { $schema = $schema->connect($self->_database); - $schema->storage->on_connect_do(['PRAGMA synchronous = OFF']); + $schema->storage->on_connect_do(['PRAGMA synchronous = OFF']) + unless $self->has_custom_dsn; } if ( !$args{no_deploy} ) { - __PACKAGE__->deploy_schema( $schema ); - __PACKAGE__->populate_schema( $schema ) if( !$args{no_populate} ); + __PACKAGE__->deploy_schema( $schema, $args{deploy_args} ); + __PACKAGE__->populate_schema( $schema ) + if( !$args{no_populate} ); } return $schema; } @@ -98,9 +115,10 @@ of tables for testing. sub deploy_schema { my $self = shift; my $schema = shift; + my $args = shift || {}; - if ($ENV{"DBICTEST_SQLT_DEPLOY"}) { - return $schema->deploy(); + if ($ENV{"DBICTEST_SQLT_DEPLOY"}) { + $schema->deploy($args); } else { open IN, "t/lib/sqlite.sql"; my $sql; @@ -108,6 +126,7 @@ sub deploy_schema { close IN; ($schema->storage->dbh->do($_) || print "Error on SQL: $_\n") for split(/;\n/, $sql); } + return; } =head2 populate_schema @@ -208,15 +227,16 @@ sub populate_schema { [ 1, 2 ], [ 1, 3 ], ]); - + $schema->populate('TreeLike', [ - [ qw/id parent name/ ], - [ 1, 0, 'foo' ], - [ 2, 1, 'bar' ], - [ 5, 1, 'blop' ], - [ 3, 2, 'baz' ], - [ 4, 3, 'quux' ], - [ 6, 2, 'fong' ], + [ qw/id parent_fk name/ ], + [ 1, undef, 'root' ], + [ 2, 1, 'foo' ], + [ 3, 2, 'bar' ], + [ 6, 2, 'blop' ], + [ 4, 3, 'baz' ], + [ 5, 4, 'quux' ], + [ 7, 3, 'fong' ], ]); $schema->populate('Track', [ @@ -258,16 +278,7 @@ sub populate_schema { [ 1, "Tools" ], [ 2, "Body Parts" ], ]); - - $schema->populate('CollectionObject', [ - [ qw/collection object/ ], - [ 1, 1 ], - [ 1, 2 ], - [ 1, 3 ], - [ 2, 4 ], - [ 2, 5 ], - ]); - + $schema->populate('TypedObject', [ [ qw/objectid type value/ ], [ 1, "pointy", "Awl" ], @@ -276,6 +287,14 @@ sub populate_schema { [ 4, "pointy", "Tooth" ], [ 5, "round", "Head" ], ]); + $schema->populate('CollectionObject', [ + [ qw/collection object/ ], + [ 1, 1 ], + [ 1, 2 ], + [ 1, 3 ], + [ 2, 4 ], + [ 2, 5 ], + ]); $schema->populate('Owners', [ [ qw/ownerid name/ ], diff --git a/t/lib/DBICTest/Schema/ForceForeign.pm b/t/lib/DBICTest/Schema/ForceForeign.pm index e3b2857..149f759 100644 --- a/t/lib/DBICTest/Schema/ForceForeign.pm +++ b/t/lib/DBICTest/Schema/ForceForeign.pm @@ -14,7 +14,7 @@ __PACKAGE__->set_primary_key(qw/artist/); # since it uses the PK __PACKAGE__->might_have( 'artist_1', 'DBICTest::Schema::Artist', { - 'foreign.artist_id' => 'self.artist', + 'foreign.artistid' => 'self.artist', }, { is_foreign_key_constraint => 1, }, diff --git a/t/lib/DBICTest/Schema/TreeLike.pm b/t/lib/DBICTest/Schema/TreeLike.pm index 297cfc6..c124241 100644 --- a/t/lib/DBICTest/Schema/TreeLike.pm +++ b/t/lib/DBICTest/Schema/TreeLike.pm @@ -6,14 +6,22 @@ use base qw/DBIx::Class::Core/; __PACKAGE__->table('treelike'); __PACKAGE__->add_columns( 'id' => { data_type => 'integer', is_auto_increment => 1 }, - 'parent' => { data_type => 'integer' }, + 'parent_fk' => { data_type => 'integer' , is_nullable=>1}, 'name' => { data_type => 'varchar', size => 100, }, ); __PACKAGE__->set_primary_key(qw/id/); __PACKAGE__->belongs_to('parent', 'TreeLike', - { 'foreign.id' => 'self.parent' }); -__PACKAGE__->has_many('children', 'TreeLike', { 'foreign.parent' => 'self.id' }); + { 'foreign.id' => 'self.parent_fk' }); +__PACKAGE__->has_many('children', 'TreeLike', { 'foreign.parent_fk' => 'self.id' }); +## since this is a self referential table we need to do a post deploy hook and get +## some data in while constraints are off + + sub sqlt_deploy_hook { + my ($self, $sqlt_table) = @_; + + $sqlt_table->add_index(name => 'idx_name', fields => ['name']); + } 1; diff --git a/t/lib/DBICTest/Schema/TwoKeys.pm b/t/lib/DBICTest/Schema/TwoKeys.pm index b6dedf0..beced31 100755 --- a/t/lib/DBICTest/Schema/TwoKeys.pm +++ b/t/lib/DBICTest/Schema/TwoKeys.pm @@ -10,7 +10,11 @@ __PACKAGE__->add_columns( ); __PACKAGE__->set_primary_key(qw/artist cd/); -__PACKAGE__->belongs_to( artist => 'DBICTest::Schema::Artist' ); +__PACKAGE__->belongs_to( + artist => 'DBICTest::Schema::Artist', + {'foreign.artistid'=>'self.artist'}, +); + __PACKAGE__->belongs_to( cd => 'DBICTest::Schema::CD', undef, { is_deferrable => 0 } ); __PACKAGE__->has_many( diff --git a/t/lib/sqlite.sql b/t/lib/sqlite.sql index 5f17ebe..828c85a 100644 --- a/t/lib/sqlite.sql +++ b/t/lib/sqlite.sql @@ -151,7 +151,7 @@ CREATE TABLE tags ( -- CREATE TABLE treelike ( id INTEGER PRIMARY KEY NOT NULL, - parent integer NOT NULL, + parent_fk integer NULL, name varchar(100) NOT NULL );