changes to replication so that if a replicant is offline when we do the initial conne...
[dbsrgits/DBIx-Class.git] / t / 93storage_replication.t
index b2b3732..161013b 100644 (file)
@@ -6,10 +6,10 @@ use Test::Exception;
 use DBICTest;
 
 BEGIN {
-    eval "use Moose; use Test::Moose";
+    eval "use DBIx::Class::Storage::DBI::Replicated; use Test::Moose";
     plan $@
-        ? ( skip_all => 'needs Moose for testing' )
-        : ( tests => 71 );
+        ? ( skip_all => "Deps not installed: $@" )
+        : ( tests => 79 );
 }
 
 use_ok 'DBIx::Class::Storage::DBI::Replicated::Pool';
@@ -59,9 +59,13 @@ TESTSCHEMACLASSES: {
     ## Get the Schema and set the replication storage type
     
     sub init_schema {
+        # current SQLT SQLite producer does not handle DROP TABLE IF EXISTS, trap warnings here
+        local $SIG{__WARN__} = sub { warn @_ unless $_[0] =~ /no such table.+DROP TABLE/ };
+
         my $class = shift @_;
-        
+
         my $schema = DBICTest->init_schema(
+            sqlite_use_file => 1,
             storage_type=>{
                '::DBI::Replicated' => {
                        balancer_type=>'::Random',
@@ -243,6 +247,9 @@ $replicated
 ## Make sure all the slaves have the table definitions
 
 $replicated->replicate;
+$replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
+$replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+$replicated->schema->storage->pool->validate_replicants;
 
 ## Make sure we can read the data.
 
@@ -268,20 +275,6 @@ $replicated
         [ 7, "Watergate"],
     ]);
 
-SKIP: {
-    ## We can't do this test if we have a custom replicants, since we assume
-    ## if there are custom one that you are trying to test a real replicating
-    ## system.  See docs above for more.
-    
-    skip 'Cannot test inconsistent replication since you have a real replication system', 1
-     if DBICTest->has_custom_dsn && $ENV{"DBICTEST_SLAVE0_DSN"};
-    
-       ## Alright, the database 'cluster' is not in a consistent state.  When we do
-       ## a read now we expect bad news    
-    is $replicated->schema->resultset('Artist')->find(5), undef
-    => 'read after disconnect fails because it uses a replicant which we have neglected to "replicate" yet'; 
-}
-
 ## Make sure all the slaves have the table definitions
 $replicated->replicate;
 
@@ -363,6 +356,7 @@ ok $replicated->schema->resultset('Artist')->find(2)
 
 $replicated->schema->storage->replicants->{$replicant_names[0]}->active(1);
 $replicated->schema->storage->replicants->{$replicant_names[1]}->active(1);
+$replicated->schema->storage->pool->validate_replicants;
 
 ok $replicated->schema->resultset('Artist')->find(2)
     => 'Returned to replicates';
@@ -476,12 +470,15 @@ ok my $transaction = sub {
                [ $id, "Children of the Grave"],
            ]);
            
-   ok my $result = $replicated->schema->resultset('Artist')->find($id);
-   ok my $more = $replicated->schema->resultset('Artist')->find(1);
-   
+    ok my $result = $replicated->schema->resultset('Artist')->find($id)
+        => 'Found expected artist';
+        
+    ok my $more = $replicated->schema->resultset('Artist')->find(1)
+        => 'Found expected artist again';
+        
    return ($result, $more);
    
-};
+} => 'Created a coderef properly';
 
 ## Test the transaction with multi return
 {
@@ -508,7 +505,8 @@ ok my $transaction = sub {
 
 {
        ok my $result = $replicated->schema->txn_do(sub {
-               ok my $more = $replicated->schema->resultset('Artist')->find(1);
+               ok my $more = $replicated->schema->resultset('Artist')->find(1)
+               => 'found inside a transaction';
                return $more;
        }) => 'successfully processed transaction';
        
@@ -530,7 +528,53 @@ ok $replicated->schema->resultset('Artist')->find(1)
        ok $artist->discard_changes
           => 'properly discard changes';
 }
-        
+
+## Test some edge cases, like trying to do a transaction inside a transaction, etc
+
+{
+    ok my $result = $replicated->schema->txn_do(sub {
+       return $replicated->schema->txn_do(sub {
+               ok my $more = $replicated->schema->resultset('Artist')->find(1)
+               => 'found inside a transaction inside a transaction';
+               return $more;                   
+       });
+    }) => 'successfully processed transaction';
+    
+    is $result->id, 1
+       => 'Got expected single result from transaction';         
+}
+
+{
+    ok my $result = $replicated->schema->txn_do(sub {
+       return $replicated->schema->storage->execute_reliably(sub {
+               return $replicated->schema->txn_do(sub {
+                       return $replicated->schema->storage->execute_reliably(sub {
+                               ok my $more = $replicated->schema->resultset('Artist')->find(1)
+                               => 'found inside crazy deep transactions and execute_reliably';
+                               return $more;                           
+                       });
+               });     
+       });
+    }) => 'successfully processed transaction';
+    
+    is $result->id, 1
+       => 'Got expected single result from transaction';         
+}     
+
+## Test the force_pool resultset attribute.
+
+{
+       ok my $artist_rs = $replicated->schema->resultset('Artist')
+        => 'got artist resultset';
+          
+       ## Turn on Forced Pool Storage
+       ok my $reliable_artist_rs = $artist_rs->search(undef, {force_pool=>'master'})
+        => 'Created a resultset using force_pool storage';
+          
+    ok my $artist = $reliable_artist_rs->find(2) 
+        => 'got an artist result via force_pool storage';
+}
+
 ## Delete the old database files
 $replicated->cleanup;