- added tests to replication to make sure dbd::multi fails over when a dtabase is...
John Napiorkowski [Thu, 17 Apr 2008 13:41:17 +0000 (13:41 +0000)]
- updated DBIC::DBI::Replication so that in addition to a dsn string, you can pass a preexisting database handle.

lib/DBIx/Class/Storage/DBI/Replication.pm
t/93storage_replication.t

index 36e8b24..6ec74e9 100644 (file)
@@ -21,6 +21,8 @@ DBIx::Class::Storage::DBI::Replication - EXPERIMENTAL Replicated database suppor
                     [ "dbi:mysql:database=test;hostname=master", "username", "password", { AutoCommit => 1 } ], # master
                     [ "dbi:mysql:database=test;hostname=slave1", "username", "password", { priority => 10 } ],  # slave1
                     [ "dbi:mysql:database=test;hostname=slave2", "username", "password", { priority => 10 } ],  # slave2
+                    [ $dbh, '','', {priority=>10}], # add in a preexisting database handle
+                    [ sub {  DBI->connect }, '', '', {priority=>10}], # DBD::Multi will call this coderef for connects
                     <...>,
                     { limit_dialect => 'LimitXY' } # If needed, see below
                    ] );
@@ -83,15 +85,24 @@ sub connect_info {
     $global_options = ref $info->[-1] eq 'HASH' ? pop( @$info ) : {};
     if( ref( $options = $info->[0]->[-1] ) eq 'HASH' ) {
        # Local options present in dsn, merge them with global options
-       map { $global_options->{$_} = $options->{$_} } keys %$options;
-       pop @{$info->[0]};
+        map { $global_options->{$_} = $options->{$_} } keys %$options;
+        pop @{$info->[0]};
     }
 
     # We need to copy-pass $global_options, since connect_info clears it while
     # processing options
     $self->write_source->connect_info( @{$info->[0]}, { %$global_options } );
 
-    @dsns = map { ($_->[3]->{priority} || 10) => $_ } @{$info->[0]}[1..@{$info->[0]}-1];
+       ## allow either a DSN string or an already connect $dbh.  Just remember if
+       ## you use the $dbh option then DBD::Multi has no idea how to reconnect in
+       ## the event of a failure.
+       
+    @dsns = map {
+        ## if the first element in the arrayhash is a ref, make that the value
+        my $db = ref $_->[0] ? $_->[0] : $_;
+        ($_->[3]->{priority} || 10) => $db;
+    } @{$info->[0]}[1..@{$info->[0]}-1];
+    
     $global_options->{dsns} = \@dsns;
 
     $self->read_source->connect_info( [ 'dbi:Multi:', undef, undef, { %$global_options } ] );
index 67b09c2..fd2241d 100644 (file)
 use strict;
 use warnings;
 use lib qw(t/lib);
-
-use File::Copy;
-
-use DBICTest;
-
 use Test::More;
+use Data::Dump qw/dump/;
 
 BEGIN {
     eval "use DBD::Multi";
     plan $@
         ? ( skip_all => 'needs DBD::Multi for testing' )
-        : ( tests => 3 );
+        : ( tests => 18 );
+}      
+
+## ----------------------------------------------------------------------------
+## Build a class to hold all our required testing data and methods.
+## ----------------------------------------------------------------------------
+
+TESTSCHEMACLASS: {
+       
+       package DBIx::Class::DBI::Replication::TestReplication;
+
+       use DBI;        
+       use DBICTest;
+       use File::Copy;
+       
+       ## Create a constructor
+       
+       sub new {
+               my $class = shift @_;
+               my %params = @_;
+               
+               my $self = bless {
+                       db_paths => $params{db_paths},
+                       dsns => $class->init_dsns(%params),
+                       schema=>$class->init_schema,
+               }, $class;
+               
+               $self->connect;
+               return $self;
+       }
+       
+       ## get the DSNs.  We build this up from the list of file paths
+       
+       sub init_dsns {
+               my $class = shift @_;
+               my %params = @_;
+               my $db_paths = $params{db_paths};
+
+               my @dsn = map {
+                       "dbi:SQLite:${_}";
+               } @$db_paths;
+               
+               return \@dsn;
+       }
+
+       ## get the Schema and set the replication storage type
+       
+       sub init_schema {
+               my $class = shift @_;
+               my $schema = DBICTest->init_schema();
+               $schema->storage_type( '::DBI::Replication' );
+               
+               return $schema;
+       }
+       
+       ## connect the Schema
+       
+       sub connect {
+               my $self = shift @_;
+               my ($master, @slaves) = @{$self->{dsns}};
+               my @connections = ([$master, '','', {AutoCommit=>1, PrintError=>0}]);
+               my @slavesob;
+               
+               foreach my $slave (@slaves)
+               {
+                       my $dbh = shift @{$self->{slaves}}
+                        || DBI->connect($slave,"","",{PrintError=>0, PrintWarn=>0});
+                       
+                       push @connections,
+                        [$dbh, '','',{priority=>10}];
+                        
+                       push @slavesob,
+                        $dbh;
+               }
+               
+               ## Keep track of the created slave databases
+               $self->{slaves} = \@slavesob;
+               
+               $self
+                       ->{schema}
+                       ->connect([
+                               @connections,
+                               {limit_dialect => 'LimitXY'}
+                       ]);
+       }
+       
+       ## replication
+       
+       sub replicate {
+               my $self = shift @_;
+               my ($master, @slaves) = @{$self->{db_paths}};
+               
+               foreach my $slave (@slaves) {
+                       copy($master, $slave);
+               }
+       }
+       
+       ## Cleanup afer ourselves.
+       
+       sub cleanup {
+               my $self = shift @_;
+               my ($master, @slaves) = @{$self->{db_paths}};
+               
+               foreach my $slave (@slaves) {
+                       unlink $slave;
+               }               
+       }
+       
+       ## Force a reconnection
+       
+       sub reconnect {
+               my $self = shift @_;
+               my $schema = $self->connect;
+               $self->{schema} = $schema;
+               return $schema;
+       }
 }
 
-my $schema = DBICTest->init_schema();
-
-$schema->storage_type( '::DBI::Replication' );
-
-
-my $db_file1 = "t/var/DBIxClass.db";
-my $db_file2 = "t/var/DBIxClass_slave1.db";
-my $db_file3 = "t/var/DBIxClass_slave2.db";
-my $dsn1 = $ENV{"DBICTEST_DSN"} || "dbi:SQLite:${db_file1}";
-my $dsn2 = $ENV{"DBICTEST_DSN2"} || "dbi:SQLite:${db_file2}";
-my $dsn3 = $ENV{"DBICTEST_DSN3"} || "dbi:SQLite:${db_file3}";
-
-$schema->connect( [
-                  [ $dsn1, '', '', { AutoCommit => 1 } ],
-                  [ $dsn2, '', '', { priority => 10 } ],
-                  [ $dsn3, '', '', { priority => 10 } ]
-                 ]
-               );
-
-$schema->populate('Artist', [
-                            [ qw/artistid name/ ],
-                            [ 4, 'Ozric Tentacles']
-                           ]);
-
-my $new_artist1 = $schema->resultset('Artist')->find(4);
-
-isa_ok ($new_artist1, 'DBICTest::Artist');
-
-# reconnect
-my $schema2 = $schema->connect( [
-                                [ $dsn1, '', '', { AutoCommit => 1 } ],
-                                [ $dsn2, '', '', { priority => 10 } ],
-                                [ $dsn3, '', '', { priority => 10 } ]
-                               ]
-                             );
-
-# try and read (should fail)
-eval { my $new_artist2 = $schema2->resultset('Artist')->find(4); };
-ok($@, 'read after disconnect fails because it uses slave 1 which we have neglected to "replicate" yet');
-
-# try and read (should succede after faked synchronisation)
-copy($db_file1, $db_file2);
-$schema2 = $schema->connect( [
-                             [ $dsn1, '', '', { AutoCommit => 1 } ],
-                             [ $dsn2, '', '', { priority => 10 } ],
-                             [ $dsn3, '', '', { priority => 10 } ]
-                            ]
-                          );
-my $new_artist3 = $schema2->resultset('Artist')->find(4);
-isa_ok ($new_artist3, 'DBICTest::Artist');
-
-unlink $db_file2;
+## ----------------------------------------------------------------------------
+## Create an object and run some tests
+## ----------------------------------------------------------------------------
+
+my %params = (
+       db_paths => [
+               "t/var/DBIxClass.db",
+               "t/var/DBIxClass_slave1.db",
+               "t/var/DBIxClass_slave2.db",
+       ],
+);
+
+ok my $replicate = DBIx::Class::DBI::Replication::TestReplication->new(%params)
+       => 'Created a replication object';
+       
+isa_ok $replicate->{schema}
+       => 'DBIx::Class::Schema';
+
+## Add some info to the database
+
+$replicate
+       ->{schema}
+       ->populate('Artist', [
+               [ qw/artistid name/ ],
+               [ 4, "Ozric Tentacles"],
+       ]);
+                           
+## Make sure all the slaves have the table definitions
+
+$replicate->replicate;
+
+## Make sure we can read the data.
+
+ok my $artist1 = $replicate->{schema}->resultset('Artist')->find(4)
+       => 'Created Result';
+
+isa_ok $artist1
+       => 'DBICTest::Artist';
+       
+is $artist1->name, 'Ozric Tentacles'
+       => 'Found expected name for first result';
+
+## Add some new rows that only the master will have  This is because
+## we overload any type of write operation so that is must hit the master
+## database.
+
+use Fcntl qw (:flock);
+
+my $master_path = $replicate->{db_paths}->[0];
+open LOCKFILE, ">>$master_path"
+ or die "Cannot open $master_path";
+flock(LOCKFILE, LOCK_EX);
+
+$replicate
+       ->{schema}
+       ->populate('Artist', [
+               [ qw/artistid name/ ],
+               [ 5, "Doom's Children"],
+               [ 6, "Dead On Arrival"],
+               [ 7, "Watergate"],
+       ]);
+       
+## Reconnect the database
+$replicate->reconnect;
+
+## Alright, the database 'cluster' is not in a consistent state.  When we do
+## a read now we expect bad news
+
+is $replicate->{schema}->resultset('Artist')->find(5), undef
+       => 'read after disconnect fails because it uses slave 1 which we have neglected to "replicate" yet';
+
+## Make sure all the slaves have the table definitions
+$replicate->replicate;
+
+## Should find some data now
+
+ok my $artist2 = $replicate->{schema}->resultset('Artist')->find(5)
+       => 'Sync succeed';
+       
+isa_ok $artist2
+       => 'DBICTest::Artist';
+       
+is $artist2->name, "Doom's Children"
+       => 'Found expected name for first result';
+       
+## What happens when we delete one of the slaves?
+
+ok my $slave1 = @{$replicate->{slaves}}[0]
+       => 'Got Slave1';
+
+ok $slave1->disconnect
+       => 'disconnected slave1';
+
+$replicate->reconnect;
+
+ok my $artist3 = $replicate->{schema}->resultset('Artist')->find(6)
+       => 'Still finding stuff.';
+       
+isa_ok $artist3
+       => 'DBICTest::Artist';
+       
+is $artist3->name, "Dead On Arrival"
+       => 'Found expected name for first result';
+       
+## Let's delete all the slaves
+
+ok my $slave2 = @{$replicate->{slaves}}[1]
+       => 'Got Slave2';
+
+ok $slave2->disconnect
+       => 'Disconnected slave2';
+
+$replicate->reconnect;
+
+## We expect an error now, since all the slaves are dead
+
+eval {
+       $replicate->{schema}->resultset('Artist')->find(4)->name;
+};
+
+ok $@ => 'Got error when trying to find artistid 4';
+
+## This should also be an error
+
+eval {
+       my $artist4 = $replicate->{schema}->resultset('Artist')->find(7);       
+};
+
+ok $@ => 'Got read errors after everything failed';
+
+## Delete the old database files
+$replicate->cleanup;
+
+
+
+
+
+