X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=t%2F93storage_replication.t;h=ef2092a63f38601f62d15510eaf0c82525c81e2a;hb=d59cf2f2c53d1c57d62fd3313f22d4a2cbaf5ceb;hp=43d3e77d2c3f7311a3daa282e7e5bb41f16c2f0e;hpb=2bf79155a8be1532cce2538e967f32c4ff22a87b;p=dbsrgits%2FDBIx-Class.git diff --git a/t/93storage_replication.t b/t/93storage_replication.t index 43d3e77..ef2092a 100644 --- a/t/93storage_replication.t +++ b/t/93storage_replication.t @@ -2,331 +2,837 @@ use strict; use warnings; use lib qw(t/lib); use Test::More; -use Data::Dump qw/dump/; +use Test::Exception; +use DBICTest; +use List::Util 'first'; +use Scalar::Util 'reftype'; +use File::Spec; +use IO::Handle; BEGIN { - eval "use Moose"; + eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose"; plan $@ - ? ( skip_all => 'needs Moose for testing' ) - : ( tests => 2 ); -} + ? ( skip_all => "Deps not installed: $@" ) + : ( tests => 126 ); +} + +use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool'; +use_ok 'DBIx::Class::Storage::DBI::Replicated::Balancer'; +use_ok 'DBIx::Class::Storage::DBI::Replicated::Replicant'; +use_ok 'DBIx::Class::Storage::DBI::Replicated'; + +use Moose(); +use MooseX::Types(); +diag "Using Moose version $Moose::VERSION and MooseX::Types version $MooseX::Types::VERSION"; + +=head1 HOW TO USE + + This is a test of the replicated storage system. This will work in one of + two ways, either it was try to fake replication with a couple of SQLite DBs + and creative use of copy, or if you define a couple of %ENV vars correctly + will try to test those. If you do that, it will assume the setup is properly + replicating. Your results may vary, but I have demonstrated this to work with + mysql native replication. + +=cut + ## ---------------------------------------------------------------------------- ## Build a class to hold all our required testing data and methods. ## ---------------------------------------------------------------------------- -TESTSCHEMACLASS: { +TESTSCHEMACLASSES: { + + ## --------------------------------------------------------------------- ## + ## Create an object to contain your replicated stuff. + ## --------------------------------------------------------------------- ## package DBIx::Class::DBI::Replicated::TestReplication; - + use DBICTest; use base qw/Class::Accessor::Fast/; - + __PACKAGE__->mk_accessors( qw/schema/ ); ## Initialize the object - - sub new { - my $proto = shift; - my $class = ref( $proto ) || $proto; - my $self = {}; - - bless( $self, $class ); - - $self->schema( $self->init_schema ); - - return $self; - } - - ## get the Schema and set the replication storage type - + + sub new { + my ($class, $schema_method) = (shift, shift); + my $self = $class->SUPER::new(@_); + + $self->schema( $self->init_schema($schema_method) ); + return $self; + } + + ## Get the Schema and set the replication storage type + sub init_schema { - my $class = shift @_; - my $schema = DBICTest->init_schema(storage_type=>'::DBI::Replicated'); + # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here + local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ }; + + my ($class, $schema_method) = @_; + + my $method = "get_schema_$schema_method"; + my $schema = $class->$method; + return $schema; } + + sub get_schema_by_storage_type { + DBICTest->init_schema( + sqlite_use_file => 1, + storage_type=>{ + '::DBI::Replicated' => { + balancer_type=>'::Random', + balancer_args=>{ + auto_validate_every=>100, + master_read_weight => 1 + }, + } + }, + deploy_args=>{ + add_drop_table => 1, + }, + ); + } + + sub get_schema_by_connect_info { + DBICTest->init_schema( + sqlite_use_file => 1, + storage_type=> '::DBI::Replicated', + balancer_type=>'::Random', + balancer_args=> { + auto_validate_every=>100, + master_read_weight => 1 + }, + deploy_args=>{ + add_drop_table => 1, + }, + ); + } + + sub generate_replicant_connect_info {} + sub replicate {} + sub cleanup {} + + ## --------------------------------------------------------------------- ## + ## Add a connect_info option to test option merging. + ## --------------------------------------------------------------------- ## + { + package DBIx::Class::Storage::DBI::Replicated; + + use Moose; + + __PACKAGE__->meta->make_mutable; + + around connect_info => sub { + my ($next, $self, $info) = @_; + $info->[3]{master_option} = 1; + $self->$next($info); + }; + + __PACKAGE__->meta->make_immutable; + + no Moose; + } + + ## --------------------------------------------------------------------- ## + ## Subclass for when you are using SQLite for testing, this provides a fake + ## replication support. + ## --------------------------------------------------------------------- ## + + package DBIx::Class::DBI::Replicated::TestReplication::SQLite; + + use DBICTest; + use File::Copy; + use base 'DBIx::Class::DBI::Replicated::TestReplication'; + + __PACKAGE__->mk_accessors(qw/master_path slave_paths/); + + ## Set the master path from DBICTest + + sub new { + my $class = shift @_; + my $self = $class->SUPER::new(@_); + + $self->master_path( DBICTest->_sqlite_dbfilename ); + $self->slave_paths([ + File::Spec->catfile(qw/t var DBIxClass_slave1.db/), + File::Spec->catfile(qw/t var DBIxClass_slave2.db/), + ]); + + return $self; + } + + ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for + ## $storage->connect_info to be used for connecting replicants. + + sub generate_replicant_connect_info { + my $self = shift @_; + my @dsn = map { + "dbi:SQLite:${_}"; + } @{$self->slave_paths}; + + my @connect_infos = map { [$_,'','',{AutoCommit=>1}] } @dsn; + + ## Make sure nothing is left over from a failed test + $self->cleanup; + + ## try a hashref too + my $c = $connect_infos[0]; + $connect_infos[0] = { + dsn => $c->[0], + user => $c->[1], + password => $c->[2], + %{ $c->[3] } + }; + + @connect_infos + } + + ## Do a 'good enough' replication by copying the master dbfile over each of + ## the slave dbfiles. If the master is SQLite we do this, otherwise we + ## just do a one second pause to let the slaves catch up. + + sub replicate { + my $self = shift @_; + foreach my $slave (@{$self->slave_paths}) { + copy($self->master_path, $slave); + } + } + + ## Cleanup after ourselves. Unlink all gthe slave paths. + + sub cleanup { + my $self = shift @_; + foreach my $slave (@{$self->slave_paths}) { + if(-e $slave) { + unlink $slave; + } + } + } + + ## --------------------------------------------------------------------- ## + ## Subclass for when you are setting the databases via custom export vars + ## This is for when you have a replicating database setup that you are + ## going to test against. You'll need to define the correct $ENV and have + ## two slave databases to test against, as well as a replication system + ## that will replicate in less than 1 second. + ## --------------------------------------------------------------------- ## + + package DBIx::Class::DBI::Replicated::TestReplication::Custom; + use base 'DBIx::Class::DBI::Replicated::TestReplication'; + + ## Return an Array of ArrayRefs where each ArrayRef is suitable to use for + ## $storage->connect_info to be used for connecting replicants. + + sub generate_replicant_connect_info { + return ( + [$ENV{"DBICTEST_SLAVE0_DSN"}, $ENV{"DBICTEST_SLAVE0_DBUSER"}, $ENV{"DBICTEST_SLAVE0_DBPASS"}, {AutoCommit => 1}], + [$ENV{"DBICTEST_SLAVE1_DSN"}, $ENV{"DBICTEST_SLAVE1_DBUSER"}, $ENV{"DBICTEST_SLAVE1_DBPASS"}, {AutoCommit => 1}], + ); + } + + ## pause a bit to let the replication catch up + + sub replicate { + sleep 1; + } } ## ---------------------------------------------------------------------------- ## Create an object and run some tests ## ---------------------------------------------------------------------------- -my %params = ( - db_paths => [ - "t/var/DBIxClass.db", - "t/var/DBIxClass_slave1.db", - "t/var/DBIxClass_slave2.db", - ], -); +## Thi first bunch of tests are basic, just make sure all the bits are behaving -ok my $replicate = DBIx::Class::DBI::Replicated::TestReplication->new() - => 'Created a replication object'; - -isa_ok $replicate->schema - => 'DBIx::Class::Schema'; - - - warn dump $replicate->schema->storage->meta; - - warn dump $replicate->schema->storage->master; +my $replicated_class = DBICTest->has_custom_dsn ? + 'DBIx::Class::DBI::Replicated::TestReplication::Custom' : + 'DBIx::Class::DBI::Replicated::TestReplication::SQLite'; +my $replicated; -__END__ +for my $method (qw/by_connect_info by_storage_type/) { + ok $replicated = $replicated_class->new($method) + => "Created a replication object $method"; -## ---------------------------------------------------------------------------- -## Build a class to hold all our required testing data and methods. -## ---------------------------------------------------------------------------- + isa_ok $replicated->schema + => 'DBIx::Class::Schema'; + + isa_ok $replicated->schema->storage + => 'DBIx::Class::Storage::DBI::Replicated'; -TESTSCHEMACLASS: { - - package DBIx::Class::DBI::Replicated::TestReplication; - - use DBI; - use DBICTest; - use File::Copy; - - ## Create a constructor - - sub new { - my $class = shift @_; - my %params = @_; - - my $self = bless { - db_paths => $params{db_paths}, - dsns => $class->init_dsns(%params), - schema=>$class->init_schema, - }, $class; - - $self->connect; - return $self; - } - - ## get the DSNs. We build this up from the list of file paths - - sub init_dsns { - my $class = shift @_; - my %params = @_; - my $db_paths = $params{db_paths}; - - my @dsn = map { - "dbi:SQLite:${_}"; - } @$db_paths; - - return \@dsn; - } - - ## get the Schema and set the replication storage type - - sub init_schema { - my $class = shift @_; - my $schema = DBICTest->init_schema(); - $schema->storage_type( '::DBI::Replicated' ); - - return $schema; - } - - ## connect the Schema - - sub connect { - my $self = shift @_; - my ($master, @slaves) = @{$self->{dsns}}; - my $master_connect_info = [$master, '','', {AutoCommit=>1, PrintError=>0}]; - - my @slavesob; - foreach my $slave (@slaves) - { - my $dbh = shift @{$self->{slaves}} - || DBI->connect($slave,"","",{PrintError=>0, PrintWarn=>0}); - - push @{$master_connect_info->[-1]->{slaves_connect_info}}, - [$dbh, '','',{priority=>10}]; - - push @slavesob, - $dbh; - } - - ## Keep track of the created slave databases - $self->{slaves} = \@slavesob; - - $self - ->{schema} - ->connect(@$master_connect_info); - } - - ## replication - - sub replicate { - my $self = shift @_; - my ($master, @slaves) = @{$self->{db_paths}}; - - foreach my $slave (@slaves) { - copy($master, $slave); - } - } - - ## Cleanup afer ourselves. - - sub cleanup { - my $self = shift @_; - my ($master, @slaves) = @{$self->{db_paths}}; - - foreach my $slave (@slaves) { - unlink $slave; - } - } - - ## Force a reconnection - - sub reconnect { - my $self = shift @_; - my $schema = $self->connect; - $self->{schema} = $schema; - return $schema; - } + isa_ok $replicated->schema->storage->balancer + => 'DBIx::Class::Storage::DBI::Replicated::Balancer::Random' + => 'configured balancer_type'; } -## ---------------------------------------------------------------------------- -## Create an object and run some tests -## ---------------------------------------------------------------------------- +ok $replicated->schema->storage->meta + => 'has a meta object'; + +isa_ok $replicated->schema->storage->master + => 'DBIx::Class::Storage::DBI'; + +isa_ok $replicated->schema->storage->pool + => 'DBIx::Class::Storage::DBI::Replicated::Pool'; + +does_ok $replicated->schema->storage->balancer + => 'DBIx::Class::Storage::DBI::Replicated::Balancer'; + +ok my @replicant_connects = $replicated->generate_replicant_connect_info + => 'got replication connect information'; + +ok my @replicated_storages = $replicated->schema->storage->connect_replicants(@replicant_connects) + => 'Created some storages suitable for replicants'; + +our %debug; +$replicated->schema->storage->debug(1); +$replicated->schema->storage->debugcb(sub { + my ($op, $info) = @_; + ##warn "\n$op, $info\n"; + %debug = ( + op => $op, + info => $info, + dsn => ($info=~m/\[(.+)\]/)[0], + storage_type => $info=~m/REPLICANT/ ? 'REPLICANT' : 'MASTER', + ); +}); + +ok my @all_storages = $replicated->schema->storage->all_storages + => '->all_storages'; + +is scalar @all_storages, + 3 + => 'correct number of ->all_storages'; + +is ((grep $_->isa('DBIx::Class::Storage::DBI'), @all_storages), + 3 + => '->all_storages are correct type'); + +my @all_storage_opts = + grep { (reftype($_)||'') eq 'HASH' } + map @{ $_->_connect_info }, @all_storages; + +is ((grep $_->{master_option}, @all_storage_opts), + 3 + => 'connect_info was merged from master to replicants'); + +my @replicant_names = keys %{ $replicated->schema->storage->replicants }; + +ok @replicant_names, "found replicant names @replicant_names"; + +## Silence warning about not supporting the is_replicating method if using the +## sqlite dbs. +$replicated->schema->storage->debugobj->silence(1) + if first { m{^t/} } @replicant_names; + +isa_ok $replicated->schema->storage->balancer->current_replicant + => 'DBIx::Class::Storage::DBI'; -my %params = ( - db_paths => [ - "t/var/DBIxClass.db", - "t/var/DBIxClass_slave1.db", - "t/var/DBIxClass_slave2.db", - ], -); +$replicated->schema->storage->debugobj->silence(0); -ok my $replicate = DBIx::Class::DBI::Replicated::TestReplication->new(%params) - => 'Created a replication object'; - -isa_ok $replicate->{schema} - => 'DBIx::Class::Schema'; +ok $replicated->schema->storage->pool->has_replicants + => 'does have replicants'; + +is $replicated->schema->storage->pool->num_replicants => 2 + => 'has two replicants'; + +does_ok $replicated_storages[0] + => 'DBIx::Class::Storage::DBI::Replicated::Replicant'; + +does_ok $replicated_storages[1] + => 'DBIx::Class::Storage::DBI::Replicated::Replicant'; + +does_ok $replicated->schema->storage->replicants->{$replicant_names[0]} + => 'DBIx::Class::Storage::DBI::Replicated::Replicant'; + +does_ok $replicated->schema->storage->replicants->{$replicant_names[1]} + => 'DBIx::Class::Storage::DBI::Replicated::Replicant'; ## Add some info to the database -$replicate - ->{schema} - ->populate('Artist', [ - [ qw/artistid name/ ], - [ 4, "Ozric Tentacles"], - ]); - +$replicated + ->schema + ->populate('Artist', [ + [ qw/artistid name/ ], + [ 4, "Ozric Tentacles"], + ]); + + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; + + like $debug{info}, qr/INSERT/, 'Last was an insert'; + ## Make sure all the slaves have the table definitions -$replicate->replicate; +$replicated->replicate; +$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1); +$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1); + +## Silence warning about not supporting the is_replicating method if using the +## sqlite dbs. +$replicated->schema->storage->debugobj->silence(1) + if first { m{^t/} } @replicant_names; + +$replicated->schema->storage->pool->validate_replicants; + +$replicated->schema->storage->debugobj->silence(0); ## Make sure we can read the data. -ok my $artist1 = $replicate->{schema}->resultset('Artist')->find(4) - => 'Created Result'; +ok my $artist1 = $replicated->schema->resultset('Artist')->find(4) + => 'Created Result'; + +## We removed testing here since master read weight is on, so we can't tell in +## advance what storage to expect. We turn master read weight off a bit lower +## is $debug{storage_type}, 'REPLICANT' +## => "got last query from a replicant: $debug{dsn}, $debug{info}"; isa_ok $artist1 - => 'DBICTest::Artist'; - + => 'DBICTest::Artist'; + is $artist1->name, 'Ozric Tentacles' - => 'Found expected name for first result'; + => 'Found expected name for first result'; + +## Check that master_read_weight is honored +{ + no warnings qw/once redefine/; + + local + *DBIx::Class::Storage::DBI::Replicated::Balancer::Random::_random_number = + sub { 999 }; + + $replicated->schema->storage->balancer->increment_storage; + + is $replicated->schema->storage->balancer->current_replicant, + $replicated->schema->storage->master + => 'master_read_weight is honored'; + + ## turn it off for the duration of the test + $replicated->schema->storage->balancer->master_read_weight(0); + $replicated->schema->storage->balancer->increment_storage; +} ## Add some new rows that only the master will have This is because ## we overload any type of write operation so that is must hit the master ## database. -$replicate - ->{schema} - ->populate('Artist', [ - [ qw/artistid name/ ], - [ 5, "Doom's Children"], - [ 6, "Dead On Arrival"], - [ 7, "Watergate"], - ]); - -## Reconnect the database -$replicate->reconnect; +$replicated + ->schema + ->populate('Artist', [ + [ qw/artistid name/ ], + [ 5, "Doom's Children"], + [ 6, "Dead On Arrival"], + [ 7, "Watergate"], + ]); -## Alright, the database 'cluster' is not in a consistent state. When we do -## a read now we expect bad news + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; -is $replicate->{schema}->resultset('Artist')->find(5), undef - => 'read after disconnect fails because it uses slave 1 which we have neglected to "replicate" yet'; + like $debug{info}, qr/INSERT/, 'Last was an insert'; ## Make sure all the slaves have the table definitions -$replicate->replicate; +$replicated->replicate; ## Should find some data now -ok my $artist2 = $replicate->{schema}->resultset('Artist')->find(5) - => 'Sync succeed'; - +ok my $artist2 = $replicated->schema->resultset('Artist')->find(5) + => 'Sync succeed'; + +is $debug{storage_type}, 'REPLICANT' + => "got last query from a replicant: $debug{dsn}"; + isa_ok $artist2 - => 'DBICTest::Artist'; - + => 'DBICTest::Artist'; + is $artist2->name, "Doom's Children" - => 'Found expected name for first result'; - -## What happens when we delete one of the slaves? + => 'Found expected name for first result'; + +## What happens when we disconnect all the replicants? + +is $replicated->schema->storage->pool->connected_replicants => 2 + => "both replicants are connected"; -ok my $slave1 = @{$replicate->{slaves}}[0] - => 'Got Slave1'; +$replicated->schema->storage->replicants->{$replicant_names[0]}->disconnect; +$replicated->schema->storage->replicants->{$replicant_names[1]}->disconnect; -ok $slave1->disconnect - => 'disconnected slave1'; +is $replicated->schema->storage->pool->connected_replicants => 0 + => "both replicants are now disconnected"; -$replicate->reconnect; +## All these should pass, since the database should automatically reconnect + +ok my $artist3 = $replicated->schema->resultset('Artist')->find(6) + => 'Still finding stuff.'; + +is $debug{storage_type}, 'REPLICANT' + => "got last query from a replicant: $debug{dsn}"; -ok my $artist3 = $replicate->{schema}->resultset('Artist')->find(6) - => 'Still finding stuff.'; - isa_ok $artist3 - => 'DBICTest::Artist'; - + => 'DBICTest::Artist'; + is $artist3->name, "Dead On Arrival" - => 'Found expected name for first result'; - -## Let's delete all the slaves + => 'Found expected name for first result'; -ok my $slave2 = @{$replicate->{slaves}}[1] - => 'Got Slave2'; +is $replicated->schema->storage->pool->connected_replicants => 1 + => "At Least One replicant reconnected to handle the job"; -ok $slave2->disconnect - => 'Disconnected slave2'; +## What happens when we try to select something that doesn't exist? -$replicate->reconnect; +ok ! $replicated->schema->resultset('Artist')->find(666) + => 'Correctly failed to find something.'; -## We expect an error now, since all the slaves are dead +is $debug{storage_type}, 'REPLICANT' + => "got last query from a replicant: $debug{dsn}"; -eval { - $replicate->{schema}->resultset('Artist')->find(4)->name; -}; +## test the reliable option -ok $@ => 'Got error when trying to find artistid 4'; +TESTRELIABLE: { -## This should also be an error + $replicated->schema->storage->set_reliable_storage; -eval { - my $artist4 = $replicate->{schema}->resultset('Artist')->find(7); -}; + ok $replicated->schema->resultset('Artist')->find(2) + => 'Read from master 1'; -ok $@ => 'Got read errors after everything failed'; + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; -## make sure ->connect_info returns something sane + ok $replicated->schema->resultset('Artist')->find(5) + => 'Read from master 2'; -ok $replicate->{schema}->storage->connect_info - => 'got something out of ->connect_info'; + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; -## Force a connection to the write source for testing. + $replicated->schema->storage->set_balanced_storage; -$replicate->{schema}->storage($replicate->{schema}->storage->write_source); + ok $replicated->schema->resultset('Artist')->find(3) + => 'Read from replicant'; -## What happens when we do a find for something that doesn't exist? + is $debug{storage_type}, 'REPLICANT', + "got last query from a replicant: $debug{dsn}"; +} -ok ! $replicate->{schema}->resultset('Artist')->find(666) - => 'Correctly did not find a bad artist id'; +## Make sure when reliable goes out of scope, we are using replicants again -## Delete the old database files -$replicate->cleanup; +ok $replicated->schema->resultset('Artist')->find(1) + => 'back to replicant 1.'; + + is $debug{storage_type}, 'REPLICANT', + "got last query from a replicant: $debug{dsn}"; + +ok $replicated->schema->resultset('Artist')->find(2) + => 'back to replicant 2.'; + + is $debug{storage_type}, 'REPLICANT', + "got last query from a replicant: $debug{dsn}"; + +## set all the replicants to inactive, and make sure the balancer falls back to +## the master. + +$replicated->schema->storage->replicants->{$replicant_names[0]}->active(0); +$replicated->schema->storage->replicants->{$replicant_names[1]}->active(0); + +{ + ## catch the fallback to master warning + open my $debugfh, '>', \my $fallback_warning; + my $oldfh = $replicated->schema->storage->debugfh; + $replicated->schema->storage->debugfh($debugfh); + + ok $replicated->schema->resultset('Artist')->find(2) + => 'Fallback to master'; + + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; + + like $fallback_warning, qr/falling back to master/ + => 'emits falling back to master warning'; + + $replicated->schema->storage->debugfh($oldfh); +} + +$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1); +$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1); + +## Silence warning about not supporting the is_replicating method if using the +## sqlite dbs. +$replicated->schema->storage->debugobj->silence(1) + if first { m{^t/} } @replicant_names; + +$replicated->schema->storage->pool->validate_replicants; + +$replicated->schema->storage->debugobj->silence(0); + +ok $replicated->schema->resultset('Artist')->find(2) + => 'Returned to replicates'; + +is $debug{storage_type}, 'REPLICANT', + "got last query from a replicant: $debug{dsn}"; + +## Getting slave status tests + +SKIP: { + ## We skip this tests unless you have a custom replicants, since the default + ## sqlite based replication tests don't support these functions. + + skip 'Cannot Test Replicant Status on Non Replicating Database', 10 + unless DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"}; + + $replicated->replicate; ## Give the slaves a chance to catchup. + + ok $replicated->schema->storage->replicants->{$replicant_names[0]}->is_replicating + => 'Replicants are replicating'; + + is $replicated->schema->storage->replicants->{$replicant_names[0]}->lag_behind_master, 0 + => 'Replicant is zero seconds behind master'; + + ## Test the validate replicants + + $replicated->schema->storage->pool->validate_replicants; + + is $replicated->schema->storage->pool->active_replicants, 2 + => 'Still have 2 replicants after validation'; + + ## Force the replicants to fail the validate test by required their lag to + ## be negative (ie ahead of the master!) + + $replicated->schema->storage->pool->maximum_lag(-10); + $replicated->schema->storage->pool->validate_replicants; + + is $replicated->schema->storage->pool->active_replicants, 0 + => 'No way a replicant be be ahead of the master'; + + ## Let's be fair to the replicants again. Let them lag up to 5 + + $replicated->schema->storage->pool->maximum_lag(5); + $replicated->schema->storage->pool->validate_replicants; + + is $replicated->schema->storage->pool->active_replicants, 2 + => 'Both replicants in good standing again'; + + ## Check auto validate + + is $replicated->schema->storage->balancer->auto_validate_every, 100 + => "Got the expected value for auto validate"; + + ## This will make sure we auto validatge everytime + $replicated->schema->storage->balancer->auto_validate_every(0); + + ## set all the replicants to inactive, and make sure the balancer falls back to + ## the master. + + $replicated->schema->storage->replicants->{$replicant_names[0]}->active(0); + $replicated->schema->storage->replicants->{$replicant_names[1]}->active(0); + + ## Ok, now when we go to run a query, autovalidate SHOULD reconnect + + is $replicated->schema->storage->pool->active_replicants => 0 + => "both replicants turned off"; + + ok $replicated->schema->resultset('Artist')->find(5) + => 'replicant reactivated'; + + is $debug{storage_type}, 'REPLICANT', + "got last query from a replicant: $debug{dsn}"; + + is $replicated->schema->storage->pool->active_replicants => 2 + => "both replicants reactivated"; +} + +## Test the reliably callback + +ok my $reliably = sub { + + ok $replicated->schema->resultset('Artist')->find(5) + => 'replicant reactivated'; + + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; + +} => 'created coderef properly'; + +$replicated->schema->storage->execute_reliably($reliably); + +## Try something with an error + +ok my $unreliably = sub { + + ok $replicated->schema->resultset('ArtistXX')->find(5) + => 'replicant reactivated'; +} => 'created coderef properly'; +throws_ok {$replicated->schema->storage->execute_reliably($unreliably)} + qr/Can't find source for ArtistXX/ + => 'Bad coderef throws proper error'; +## Make sure replication came back +ok $replicated->schema->resultset('Artist')->find(3) + => 'replicant reactivated'; +is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"; + +## make sure transactions are set to execute_reliably + +ok my $transaction = sub { + + my $id = shift @_; + + $replicated + ->schema + ->populate('Artist', [ + [ qw/artistid name/ ], + [ $id, "Children of the Grave"], + ]); + + ok my $result = $replicated->schema->resultset('Artist')->find($id) + => "Found expected artist for $id"; + + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; + + ok my $more = $replicated->schema->resultset('Artist')->find(1) + => 'Found expected artist again for 1'; + + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; + + return ($result, $more); + +} => 'Created a coderef properly'; + +## Test the transaction with multi return +{ + ok my @return = $replicated->schema->txn_do($transaction, 666) + => 'did transaction'; + + is $return[0]->id, 666 + => 'first returned value is correct'; + + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; + + is $return[1]->id, 1 + => 'second returned value is correct'; + + is $debug{storage_type}, 'MASTER', + "got last query from a master: $debug{dsn}"; + +} + +## Test that asking for single return works +{ + ok my @return = $replicated->schema->txn_do($transaction, 777) + => 'did transaction'; + + is $return[0]->id, 777 + => 'first returned value is correct'; + + is $return[1]->id, 1 + => 'second returned value is correct'; +} + +## Test transaction returning a single value + +{ + ok my $result = $replicated->schema->txn_do(sub { + ok my $more = $replicated->schema->resultset('Artist')->find(1) + => 'found inside a transaction'; + is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}"; + return $more; + }) => 'successfully processed transaction'; + + is $result->id, 1 + => 'Got expected single result from transaction'; +} + +## Make sure replication came back + +ok $replicated->schema->resultset('Artist')->find(1) + => 'replicant reactivated'; + +is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"; + +## Test Discard changes + +{ + ok my $artist = $replicated->schema->resultset('Artist')->find(2) + => 'got an artist to test discard changes'; + + is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"; + + ok $artist->get_from_storage({force_pool=>'master'}) + => 'properly discard changes'; + + is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}"; + +} + +## Test some edge cases, like trying to do a transaction inside a transaction, etc + +{ + ok my $result = $replicated->schema->txn_do(sub { + return $replicated->schema->txn_do(sub { + ok my $more = $replicated->schema->resultset('Artist')->find(1) + => 'found inside a transaction inside a transaction'; + is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}"; + return $more; + }); + }) => 'successfully processed transaction'; + + is $result->id, 1 + => 'Got expected single result from transaction'; +} + +{ + ok my $result = $replicated->schema->txn_do(sub { + return $replicated->schema->storage->execute_reliably(sub { + return $replicated->schema->txn_do(sub { + return $replicated->schema->storage->execute_reliably(sub { + ok my $more = $replicated->schema->resultset('Artist')->find(1) + => 'found inside crazy deep transactions and execute_reliably'; + is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}"; + return $more; + }); + }); + }); + }) => 'successfully processed transaction'; + + is $result->id, 1 + => 'Got expected single result from transaction'; +} + +## Test the force_pool resultset attribute. + +{ + ok my $artist_rs = $replicated->schema->resultset('Artist') + => 'got artist resultset'; + + ## Turn on Forced Pool Storage + ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'}) + => 'Created a resultset using force_pool storage'; + + ok my $artist = $reliable_artist_rs->find(2) + => 'got an artist result via force_pool storage'; + + is $debug{storage_type}, 'MASTER', "got last query from a master: $debug{dsn}"; +} + +## Test the force_pool resultset attribute part two. + +{ + ok my $artist_rs = $replicated->schema->resultset('Artist') + => 'got artist resultset'; + + ## Turn on Forced Pool Storage + ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>$replicant_names[0]}) + => 'Created a resultset using force_pool storage'; + + ok my $artist = $reliable_artist_rs->find(2) + => 'got an artist result via force_pool storage'; + + is $debug{storage_type}, 'REPLICANT', "got last query from a replicant: $debug{dsn}"; +} +## Delete the old database files +$replicated->cleanup; +# vim: sw=4 sts=4 :