From: Peter Rabbitson Date: Tue, 14 Feb 2012 22:05:04 +0000 (+0100) Subject: Test suite wide leaktesting X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=commitdiff_plain;h=65d35121;p=dbsrgits%2FDBIx-Class-Historic.git Test suite wide leaktesting --- diff --git a/Changes b/Changes index d983c85..5b196b2 100644 --- a/Changes +++ b/Changes @@ -26,6 +26,8 @@ Revision history for DBIx::Class when the position column is part of a unique constraint * Misc + - Centralized leak-checks for all instances of DBICTest::Schema + from within any test - Codebase is now trailing-whitespace-free - Cleanup of complex resultset update/delete oprations - storage specific code moved back to ResultSet and replaced by checks diff --git a/t/34exception_action.t b/t/34exception_action.t index e19ef1f..6de03f0 100644 --- a/t/34exception_action.t +++ b/t/34exception_action.t @@ -14,12 +14,11 @@ my $schema = DBICTest->init_schema; # which might need updating at some future time to be some other # exception-generating statement: -sub throwex { $schema->resultset("Artist")->search(1,1,1); } +my $throw = sub { $schema->resultset("Artist")->search(1,1,1) }; my $ex_regex = qr/Odd number of arguments to search/; # Basic check, normal exception -throws_ok { throwex } - $ex_regex; +throws_ok \&$throw, $ex_regex; my $e = $@; @@ -30,27 +29,26 @@ isa_ok( $@, 'DBIx::Class::Exception' ); # Now lets rethrow via exception_action $schema->exception_action(sub { die @_ }); -throws_ok { throwex } - $ex_regex; +throws_ok \&$throw, $ex_regex; # # This should have never worked!!! # # Now lets suppress the error $schema->exception_action(sub { 1 }); -throws_ok { throwex } +throws_ok \&$throw, qr/exception_action handler .+ did \*not\* result in an exception.+original error: $ex_regex/; # Now lets fall through and let croak take back over $schema->exception_action(sub { return }); throws_ok { - warnings_are { throwex } + warnings_are \&$throw, qr/exception_action handler installed .+ returned false instead throwing an exception/; } $ex_regex; # again to see if no warning throws_ok { - warnings_are { throwex } + warnings_are \&$throw, []; } $ex_regex; @@ -75,7 +73,7 @@ throws_ok { # Try the exception class $schema->exception_action(sub { DBICTest::Exception->throw(@_) }); -throws_ok { throwex } +throws_ok \&$throw, qr/DBICTest::Exception is handling this: $ex_regex/; # While we're at it, lets throw a custom exception through Storage::DBI diff --git a/t/40compose_connection.t b/t/40compose_connection.t index 051ab9b..6cd62ff 100644 --- a/t/40compose_connection.t +++ b/t/40compose_connection.t @@ -16,4 +16,15 @@ warnings_exist { DBICTest->init_schema( compose_connection => 1, sqlite_use_file cmp_ok(DBICTest->resultset('Artist')->count, '>', 0, 'count is valid'); +# cleanup globals so we do not trigger the leaktest +for ( map { DBICTest->schema->class($_) } DBICTest->schema->sources ) { + $_->class_resolver(undef); + $_->resultset_instance(undef); + $_->result_source_instance(undef); +} +{ + no warnings qw/redefine once/; + *DBICTest::schema = sub {}; +} + done_testing; diff --git a/t/52leaks.t b/t/52leaks.t index d76fa38..793e036 100644 --- a/t/52leaks.t +++ b/t/52leaks.t @@ -35,6 +35,7 @@ if ($ENV{DBICTEST_IN_PERSISTENT_ENV}) { use lib qw(t/lib); use DBICTest::RunMode; +use DBICTest::Util qw/populate_weakregistry assert_empty_weakregistry/; use DBIx::Class; use B 'svref_2object'; BEGIN { @@ -42,8 +43,6 @@ BEGIN { if DBIx::Class::_ENV_::PEEPEENESS; } -use Scalar::Util qw/refaddr reftype weaken/; - # this is what holds all weakened refs to be checked for leakage my $weak_registry = {}; @@ -53,19 +52,6 @@ my $has_dt; # Skip the heavy-duty leak tracing when just doing an install unless (DBICTest::RunMode->is_plain) { - # have our own little stack maker - Carp infloops due to the bless override - my $trace = sub { - my $depth = 1; - my (@stack, @frame); - - while (@frame = caller($depth++)) { - push @stack, [@frame[3,1,2]]; - } - - $stack[0][0] = ''; - return join "\tinvoked as ", map { sprintf ("%s at %s line %d\n", @$_ ) } @stack; - }; - # redefine the bless override so that we can catch each and every object created no warnings qw/redefine once/; no strict qw/refs/; @@ -81,29 +67,15 @@ unless (DBICTest::RunMode->is_plain) { } ); - my $slot = (sprintf '%s=%s(0x%x)', # so we don't trigger stringification - ref $obj, - reftype $obj, - refaddr $obj, - ); - # weaken immediately to avoid weird side effects - $weak_registry->{$slot} = { weakref => $obj, strace => $trace->() }; - weaken $weak_registry->{$slot}{weakref}; - - return $obj; + return populate_weakregistry ($weak_registry, $obj ); }; require Try::Tiny; for my $func (qw/try catch finally/) { my $orig = \&{"Try::Tiny::$func"}; *{"Try::Tiny::$func"} = sub (&;@) { - - my $slot = sprintf ('CODE(0x%x)', refaddr $_[0]); - - $weak_registry->{$slot} = { weakref => $_[0], strace => $trace->() }; - weaken $weak_registry->{$slot}{weakref}; - + populate_weakregistry( $weak_registry, $_[0] ); goto $orig; } } @@ -309,10 +281,8 @@ my @compose_ns_classes; } } - for (keys %$base_collection) { - $weak_registry->{"basic $_"} = { weakref => $base_collection->{$_} }; - weaken $weak_registry->{"basic $_"}{weakref}; - } + populate_weakregistry ($weak_registry, $base_collection->{$_}, "basic $_") + for keys %$base_collection; } # check that "phantom-chaining" works - we never lose track of the original $schema @@ -344,16 +314,7 @@ my @compose_ns_classes; sub { shift->delete }, sub { shift->insert }, ) { - $phantom = $_->($phantom); - - my $slot = (sprintf 'phantom %s=%s(0x%x)', # so we don't trigger stringification - ref $phantom, - reftype $phantom, - refaddr $phantom, - ); - - $weak_registry->{$slot} = $phantom; - weaken $weak_registry->{$slot}; + $phantom = populate_weakregistry ( $weak_registry, scalar $_->($phantom) ); } ok( $phantom->in_storage, 'Properly deleted/reinserted' ); @@ -433,21 +394,7 @@ TODO: { or $r->result_source(undef); } -for my $slot (sort keys %$weak_registry) { - - ok (! defined $weak_registry->{$slot}{weakref}, "No leaks of $slot") or do { - my $diag = ''; - - $diag .= Devel::FindRef::track ($weak_registry->{$slot}{weakref}, 20) . "\n" - if ( $ENV{TEST_VERBOSE} && eval { require Devel::FindRef }); - - if (my $stack = $weak_registry->{$slot}{strace}) { - $diag .= " Reference first seen$stack"; - } - - diag $diag if $diag; - }; -} +assert_empty_weakregistry ($weak_registry); # we got so far without a failure - this is a good thing # now let's try to rerun this script under a "persistent" environment diff --git a/t/72pg.t b/t/72pg.t index f3250ed..e2acc10 100644 --- a/t/72pg.t +++ b/t/72pg.t @@ -449,7 +449,8 @@ done_testing; END { return unless $schema; drop_test_schema($schema); - eapk_drop_all( $schema) + eapk_drop_all($schema); + undef $schema; }; diff --git a/t/73oracle_hq.t b/t/73oracle_hq.t index af526d9..aa5ad21 100644 --- a/t/73oracle_hq.t +++ b/t/73oracle_hq.t @@ -557,13 +557,15 @@ sub do_creates { # clean up our mess END { - eval { - my $dbh = $schema->storage->dbh; - $dbh->do("DROP SEQUENCE artist_pk_seq"); - $dbh->do("DROP SEQUENCE cd_seq"); - $dbh->do("DROP SEQUENCE track_seq"); - $dbh->do("DROP TABLE artist"); - $dbh->do("DROP TABLE track"); - $dbh->do("DROP TABLE cd"); + if ($schema and my $dbh = $schema->storage->dbh) { + eval { $dbh->do($_) } for ( + 'DROP SEQUENCE artist_pk_seq', + 'DROP SEQUENCE cd_seq', + 'DROP SEQUENCE track_seq', + 'DROP TABLE artist', + 'DROP TABLE track', + 'DROP TABLE cd', + ); }; + undef $schema; } diff --git a/t/745db2.t b/t/745db2.t index 90576d0..573fe0e 100644 --- a/t/745db2.t +++ b/t/745db2.t @@ -158,6 +158,7 @@ done_testing; # clean up our mess END { - my $dbh = eval { $schema->storage->_dbh }; - $dbh->do("DROP TABLE artist") if $dbh; + my $dbh = eval { $schema->storage->_dbh }; + $dbh->do("DROP TABLE artist") if $dbh; + undef $schema; } diff --git a/t/746db2_400.t b/t/746db2_400.t index b3c55eb..3a5d902 100644 --- a/t/746db2_400.t +++ b/t/746db2_400.t @@ -86,6 +86,7 @@ is_deeply($type_info, $test_type_info, 'columns_info_for - column data types'); # clean up our mess END { - my $dbh = eval { $schema->storage->_dbh }; - $dbh->do("DROP TABLE artist") if $dbh; + my $dbh = eval { $schema->storage->_dbh }; + $dbh->do("DROP TABLE artist") if $dbh; + undef $schema; } diff --git a/t/746mssql.t b/t/746mssql.t index 12573b4..71f49a2 100644 --- a/t/746mssql.t +++ b/t/746mssql.t @@ -580,5 +580,6 @@ END { eval { $dbh->do("DROP TABLE $_") } for qw/artist artist_guid money_test books owners/; } + undef $schema; } # vim:sw=2 sts=2 diff --git a/t/746sybase.t b/t/746sybase.t index 6a4b4ef..b59fe70 100644 --- a/t/746sybase.t +++ b/t/746sybase.t @@ -638,4 +638,6 @@ END { eval { $dbh->do("DROP TABLE $_") } for qw/artist bindtype_test money_test computed_column_test/; } + + undef $schema; } diff --git a/t/747mssql_ado.t b/t/747mssql_ado.t index a2f99d5..71fc5b7 100644 --- a/t/747mssql_ado.t +++ b/t/747mssql_ado.t @@ -93,5 +93,7 @@ END { eval { $dbh->do("DROP TABLE $_") } for qw/artist/; } + + undef $schema; } # vim:sw=2 sts=2 diff --git a/t/748informix.t b/t/748informix.t index 11204b9..42bdac8 100644 --- a/t/748informix.t +++ b/t/748informix.t @@ -145,6 +145,7 @@ done_testing; # clean up our mess END { - my $dbh = eval { $schema->storage->_dbh }; - $dbh->do("DROP TABLE artist") if $dbh; + my $dbh = eval { $schema->storage->_dbh }; + $dbh->do("DROP TABLE artist") if $dbh; + undef $schema; } diff --git a/t/749sqlanywhere.t b/t/749sqlanywhere.t index a0e55f9..396e103 100644 --- a/t/749sqlanywhere.t +++ b/t/749sqlanywhere.t @@ -48,7 +48,7 @@ foreach my $info (@info) { auto_savepoint => 1 }); - my $guard = Scope::Guard->new(\&cleanup); + my $guard = Scope::Guard->new(sub{ cleanup($schema) }); my $dbh = $schema->storage->dbh; @@ -259,6 +259,7 @@ SQL done_testing; sub cleanup { + my $schema = shift; eval { $schema->storage->dbh->do("DROP TABLE $_") } for qw/artist artist_guid bindtype_test/; } diff --git a/t/74mssql.t b/t/74mssql.t index 0ccaa23..2ec7fa5 100644 --- a/t/74mssql.t +++ b/t/74mssql.t @@ -10,6 +10,7 @@ BEGIN { use Test::More; use Test::Exception; +use Scalar::Util 'weaken'; use DBIx::Class::Optional::Dependencies (); use lib qw(t/lib); use DBICTest; @@ -123,7 +124,8 @@ for my $storage_type (@test_storages) { SQL }); - my $rs = $schema->resultset('Money'); + my $rs = $schema->resultset('Money'); + weaken(my $rs_cp = $rs); # nested closure refcounting is an utter mess in perl my $row; lives_ok { @@ -151,7 +153,7 @@ SQL # test simple transaction with commit lives_ok { $schema->txn_do(sub { - $rs->create({ amount => 300 }); + $rs_cp->create({ amount => 300 }); }); } 'simple transaction'; @@ -163,7 +165,7 @@ SQL # test rollback throws_ok { $schema->txn_do(sub { - $rs->create({ amount => 700 }); + $rs_cp->create({ amount => 700 }); die 'mtfnpy'; }); } qr/mtfnpy/, 'simple failed txn'; @@ -212,9 +214,10 @@ SQL # a reconnect should trigger on next action $schema->storage->_get_dbh->disconnect; + lives_and { $wrappers->{$wrapper}->( sub { - $rs->create({ amount => 900 + $_ }) for 1..3; + $rs_cp->create({ amount => 900 + $_ }) for 1..3; }); is $rs->count, 3; } "transaction on disconnected handle with $wrapper wrapper"; @@ -245,12 +248,13 @@ SQL my $map = [ ['Artist 1', '1002.00'], ['Artist 2', '1003.00'] ]; + weaken(my $a_rs_cp = $artist_rs); + lives_and { my @results; - $wrappers->{$wrapper}->( sub { - while (my $money = $rs->next) { - my $artist = $artist_rs->next; + while (my $money = $rs_cp->next) { + my $artist = $a_rs_cp->next; push @results, [ $artist->name, $money->amount ]; }; }); @@ -332,4 +336,6 @@ END { $dbh->do("IF OBJECT_ID('cd', 'U') IS NOT NULL DROP TABLE cd"); $dbh->do("IF OBJECT_ID('money_test', 'U') IS NOT NULL DROP TABLE money_test"); } + + undef $schema; } diff --git a/t/750firebird.t b/t/750firebird.t index d55474b..32eb154 100644 --- a/t/750firebird.t +++ b/t/750firebird.t @@ -52,7 +52,7 @@ for my $prefix (keys %$env2optdep) { SKIP: { }); my $dbh = $schema->storage->dbh; - my $sg = Scope::Guard->new(\&cleanup); + my $sg = Scope::Guard->new(sub { cleanup($schema) }); eval { $dbh->do(q[DROP TABLE "artist"]) }; $dbh->do(<storage->disconnect; # to avoid object FOO is in use errors diff --git a/t/76joins.t b/t/76joins.t index b0d92f8..0fd511f 100644 --- a/t/76joins.t +++ b/t/76joins.t @@ -168,61 +168,52 @@ is($rs->first->name, 'We Are Goth', 'Correct record returned'); [ 4, 8 ], ]); - sub cd_count { - return $schema->resultset("CD")->count; - } - sub tk_count { - return $schema->resultset("TwoKeys")->count; - } - - is(cd_count(), 8, '8 rows in table cd'); - is(tk_count(), 7, '7 rows in table twokeys'); - - sub artist1 { - return $schema->resultset("CD")->search( - { 'artist.name' => 'Caterwauler McCrae' }, - { join => [qw/artist/]} - ); - } - sub artist2 { - return $schema->resultset("CD")->search( - { 'artist.name' => 'Random Boy Band' }, - { join => [qw/artist/]} - ); - } - - is( artist1()->count, 3, '3 Caterwauler McCrae CDs' ); - ok( artist1()->delete, 'Successfully deleted 3 CDs' ); - is( artist1()->count, 0, '0 Caterwauler McCrae CDs' ); - is( artist2()->count, 2, '3 Random Boy Band CDs' ); - ok( artist2()->update( { 'artist' => 1 } ) ); - is( artist2()->count, 0, '0 Random Boy Band CDs' ); - is( artist1()->count, 2, '2 Caterwauler McCrae CDs' ); + my $cd_count = sub { $schema->resultset("CD")->count }; + my $tk_count = sub { $schema->resultset("TwoKeys")->count }; + + is($cd_count->(), 8, '8 rows in table cd'); + is($tk_count->(), 7, '7 rows in table twokeys'); + + my $artist1_rs = $schema->resultset("CD")->search( + { 'artist.name' => 'Caterwauler McCrae' }, + { join => [qw/artist/]} + ); + + my $artist2_rs = $schema->resultset("CD")->search( + { 'artist.name' => 'Random Boy Band' }, + { join => [qw/artist/]} + ); + + is( $artist1_rs->count, 3, '3 Caterwauler McCrae CDs' ); + ok( $artist1_rs->delete, 'Successfully deleted 3 CDs' ); + is( $artist1_rs->count, 0, '0 Caterwauler McCrae CDs' ); + is( $artist2_rs->count, 2, '3 Random Boy Band CDs' ); + ok( $artist2_rs->update( { 'artist' => 1 } ) ); + is( $artist2_rs->count, 0, '0 Random Boy Band CDs' ); + is( $artist1_rs->count, 2, '2 Caterwauler McCrae CDs' ); # test update on multi-column-pk - sub tk1 { - return $schema->resultset("TwoKeys")->search( - { - 'artist.name' => { like => '%Boy Band' }, - 'cd.title' => 'Greatest Hits', - }, - { join => [qw/artist cd/] } - ); - } - sub tk2 { - return $schema->resultset("TwoKeys")->search( - { 'artist.name' => 'Caterwauler McCrae' }, - { join => [qw/artist/]} - ); - } - is( tk2()->count, 2, 'TwoKeys count == 2' ); - is( tk1()->count, 2, 'TwoKeys count == 2' ); - ok( tk1()->update( { artist => 1 } ) ); - is( tk1()->count, 0, 'TwoKeys count == 0' ); - is( tk2()->count, 4, '2 Caterwauler McCrae CDs' ); - ok( tk2()->delete, 'Successfully deleted 4 CDs' ); - is(cd_count(), 5, '5 rows in table cd'); - is(tk_count(), 3, '3 rows in table twokeys'); + my $tk1_rs = $schema->resultset("TwoKeys")->search( + { + 'artist.name' => { like => '%Boy Band' }, + 'cd.title' => 'Greatest Hits', + }, + { join => [qw/artist cd/] } + ); + + my $tk2_rs = $schema->resultset("TwoKeys")->search( + { 'artist.name' => 'Caterwauler McCrae' }, + { join => [qw/artist/]} + ); + + is( $tk2_rs->count, 2, 'TwoKeys count == 2' ); + is( $tk1_rs->count, 2, 'TwoKeys count == 2' ); + ok( $tk1_rs->update( { artist => 1 } ) ); + is( $tk1_rs->count, 0, 'TwoKeys count == 0' ); + is( $tk2_rs->count, 4, '2 Caterwauler McCrae CDs' ); + ok( $tk2_rs->delete, 'Successfully deleted 4 CDs' ); + is($cd_count->(), 5, '5 rows in table cd'); + is($tk_count->(), 3, '3 rows in table twokeys'); } done_testing; diff --git a/t/86sqlt.t b/t/86sqlt.t index 5726870..89783d3 100644 --- a/t/86sqlt.t +++ b/t/86sqlt.t @@ -88,6 +88,7 @@ my $schema = DBICTest->init_schema (no_deploy => 1); { my $deploy_hook_called = 0; $custom_deployment_statements_called = 0; + my $sqlt_type = $schema->storage->sqlt_type; # replace the sqlt calback with a custom version ading an index $schema->source('Track')->sqlt_deploy_callback(sub { @@ -97,11 +98,11 @@ my $schema = DBICTest->init_schema (no_deploy => 1); is ( $sqlt_table->schema->translator->producer_type, - join ('::', 'SQL::Translator::Producer', $schema->storage->sqlt_type), + join ('::', 'SQL::Translator::Producer', $sqlt_type), 'Production type passed to translator object', ); - if ($schema->storage->sqlt_type eq 'SQLite' ) { + if ($sqlt_type eq 'SQLite' ) { $sqlt_table->add_index( name => 'track_title', fields => ['title'] ) or die $sqlt_table->error; } diff --git a/t/98savepoints.t b/t/98savepoints.t index 03365ec..53d5d62 100644 --- a/t/98savepoints.t +++ b/t/98savepoints.t @@ -129,17 +129,19 @@ for my $prefix (keys %$env2optdep) { SKIP: { # And now to see if txn_do will behave correctly $schema->txn_do (sub { + my $artycp = $arty; + $schema->txn_do (sub { - $arty->name ('Muff'); - $arty->update; + $artycp->name ('Muff'); + $artycp->update; }); eval { $schema->txn_do (sub { - $arty->name ('Moff'); - $arty->update; - $arty->discard_changes; - is($arty->name,'Moff','Value updated in nested transaction'); + $artycp->name ('Moff'); + $artycp->update; + $artycp->discard_changes; + is($artycp->name,'Moff','Value updated in nested transaction'); $schema->storage->dbh->do ("GUARANTEED TO PHAIL"); }); }; @@ -170,5 +172,7 @@ for my $prefix (keys %$env2optdep) { SKIP: { done_testing; -END { eval { $schema->storage->dbh->do ("DROP TABLE artist") } if defined $schema } - +END { + eval { $schema->storage->dbh->do ("DROP TABLE artist") } if defined $schema; + undef $schema; +} diff --git a/t/cdbi/sweet/08pager.t b/t/cdbi/sweet/08pager.t index 9bf7d9b..2f037cc 100644 --- a/t/cdbi/sweet/08pager.t +++ b/t/cdbi/sweet/08pager.t @@ -69,3 +69,14 @@ is( $it->next, undef, "disable_sql_paging next past end of page ok" ); { rows => 5 } ); is( $it->count, 1, "complex abstract count ok" ); + +# cleanup globals so we do not trigger the leaktest +for ( map { DBICTest->schema->class($_) } DBICTest->schema->sources ) { + $_->class_resolver(undef); + $_->resultset_instance(undef); + $_->result_source_instance(undef); +} +{ + no warnings qw/redefine once/; + *DBICTest::schema = sub {}; +} diff --git a/t/delete/cascade_missing.t b/t/delete/cascade_missing.t index f5b95a1..03de883 100644 --- a/t/delete/cascade_missing.t +++ b/t/delete/cascade_missing.t @@ -12,8 +12,9 @@ my $schema = DBICTest->init_schema(); $schema->_unregister_source('CD'); warnings_like { + my $s = $schema; lives_ok { - $_->delete for $schema->resultset('Artist')->all; + $_->delete for $s->resultset('Artist')->all; } 'delete on rows with dangling rels lives'; } [ # 12 == 3 artists * failed cascades: diff --git a/t/delete/m2m.t b/t/delete/m2m.t index de4d3fd..7a1628d 100644 --- a/t/delete/m2m.t +++ b/t/delete/m2m.t @@ -9,7 +9,8 @@ my $schema = DBICTest->init_schema(); my $cd = $schema->resultset("CD")->find(2); ok $cd->liner_notes; -ok keys %{$cd->{_relationship_data}}, "_relationship_data populated"; + +ok scalar(keys %{$cd->{_relationship_data}}), "_relationship_data populated"; $cd->discard_changes; ok $cd->liner_notes, 'relationships still valid after discarding changes'; diff --git a/t/delete/related.t b/t/delete/related.t index 12bc43a..f8e1d97 100644 --- a/t/delete/related.t +++ b/t/delete/related.t @@ -1,11 +1,10 @@ -use Test::More; use strict; use warnings; +use Test::More; + use lib qw(t/lib); use DBICTest; -plan tests => 4; - my $schema = DBICTest->init_schema(); my $ars = $schema->resultset('Artist'); @@ -60,4 +59,9 @@ TODO: { my $cd2pr_count = $cd2pr_rs->count; $prod_cd->delete_related('cd_to_producer', { producer => $prod } ); is ($cd2pr_rs->count, $cd2pr_count -= 1, 'm2m link deleted succesfully'); + + # see 187ec69a for why this is neccessary + $prod->result_source(undef); } + +done_testing; diff --git a/t/inflate/datetime_firebird.t b/t/inflate/datetime_firebird.t index c68a762..6c41ac1 100644 --- a/t/inflate/datetime_firebird.t +++ b/t/inflate/datetime_firebird.t @@ -56,7 +56,7 @@ foreach my $conn_idx (0..$#info) { on_connect_call => [ 'datetime_setup' ], }); - my $sg = Scope::Guard->new(\&cleanup); + my $sg = Scope::Guard->new(sub { cleanup($schema) } ); eval { $schema->storage->dbh->do('DROP TABLE "event"') }; $schema->storage->dbh->do(<<'SQL'); @@ -96,6 +96,7 @@ done_testing; # clean up our mess sub cleanup { + my $schema = shift; my $dbh; eval { $schema->storage->disconnect; # to avoid object FOO is in use errors diff --git a/t/inflate/datetime_informix.t b/t/inflate/datetime_informix.t index 1df923e..bf23b72 100644 --- a/t/inflate/datetime_informix.t +++ b/t/inflate/datetime_informix.t @@ -30,7 +30,7 @@ my $schema; on_connect_call => [ 'datetime_setup' ], }); - my $sg = Scope::Guard->new(\&cleanup); + my $sg = Scope::Guard->new(sub { cleanup($schema) } ); eval { $schema->storage->dbh->do('DROP TABLE event') }; $schema->storage->dbh->do(<<'SQL'); @@ -70,6 +70,7 @@ done_testing; # clean up our mess sub cleanup { + my $schema = shift; my $dbh; eval { $dbh = $schema->storage->dbh; diff --git a/t/inflate/datetime_mssql.t b/t/inflate/datetime_mssql.t index 822cc84..1c32f10 100644 --- a/t/inflate/datetime_mssql.t +++ b/t/inflate/datetime_mssql.t @@ -73,7 +73,7 @@ for my $connect_info (@connect_info) { } } - my $guard = Scope::Guard->new(\&cleanup); + my $guard = Scope::Guard->new(sub{ cleanup($schema) }); # $^W because DBD::ADO is a piece of crap try { local $^W = 0; $schema->storage->dbh->do("DROP TABLE track") }; @@ -151,6 +151,7 @@ done_testing; # clean up our mess sub cleanup { + my $schema = shift; if (my $dbh = eval { $schema->storage->dbh }) { $dbh->do('DROP TABLE track'); $dbh->do('DROP TABLE event_small_dt'); diff --git a/t/inflate/datetime_oracle.t b/t/inflate/datetime_oracle.t index af8c90b..72e0e17 100644 --- a/t/inflate/datetime_oracle.t +++ b/t/inflate/datetime_oracle.t @@ -109,8 +109,9 @@ done_testing; # clean up our mess END { - if($schema && ($dbh = $schema->storage->dbh)) { - $dbh->do("DROP TABLE track"); - } + if($schema && ($dbh = $schema->storage->dbh)) { + $dbh->do("DROP TABLE track"); + } + undef $schema; } diff --git a/t/inflate/datetime_sqlanywhere.t b/t/inflate/datetime_sqlanywhere.t index 606394f..152a789 100644 --- a/t/inflate/datetime_sqlanywhere.t +++ b/t/inflate/datetime_sqlanywhere.t @@ -50,7 +50,7 @@ foreach my $info (@info) { on_connect_call => 'datetime_setup', }); - my $sg = Scope::Guard->new(\&cleanup); + my $sg = Scope::Guard->new(sub { cleanup($schema) } ); eval { $schema->storage->dbh->do('DROP TABLE event') }; $schema->storage->dbh->do(<<"SQL"); @@ -98,6 +98,7 @@ done_testing; # clean up our mess sub cleanup { + my $schema = shift; if (my $dbh = $schema->storage->dbh) { eval { $dbh->do("DROP TABLE $_") } for qw/event/; } diff --git a/t/inflate/datetime_sybase.t b/t/inflate/datetime_sybase.t index 56c0648..597f6a3 100644 --- a/t/inflate/datetime_sybase.t +++ b/t/inflate/datetime_sybase.t @@ -42,7 +42,7 @@ for my $storage_type (@storage_types) { on_connect_call => 'datetime_setup', }); - my $guard = Scope::Guard->new(\&cleanup); + my $guard = Scope::Guard->new(sub { cleanup($schema) } ); $schema->storage->ensure_connected; @@ -143,6 +143,7 @@ done_testing; # clean up our mess sub cleanup { + my $schema = shift; if (my $dbh = eval { $schema->storage->dbh }) { $dbh->do('DROP TABLE track'); $dbh->do('DROP TABLE event_small_dt'); diff --git a/t/lib/DBICTest.pm b/t/lib/DBICTest.pm index 4b8b951..df3587e 100644 --- a/t/lib/DBICTest.pm +++ b/t/lib/DBICTest.pm @@ -5,6 +5,7 @@ use strict; use warnings; use DBICTest::RunMode; use DBICTest::Schema; +use DBICTest::Util qw/populate_weakregistry assert_empty_weakregistry/; use Carp; use Path::Class::File (); @@ -181,6 +182,8 @@ sub __mk_disconnect_guard { }; } +my $weak_registry = {}; + sub init_schema { my $self = shift; my %args = @_; @@ -208,9 +211,17 @@ sub init_schema { __PACKAGE__->populate_schema( $schema ) if( !$args{no_populate} ); } + + populate_weakregistry ( $weak_registry, $schema->storage ) + if $INC{'Test/Builder.pm'} and $schema->storage; + return $schema; } +END { + assert_empty_weakregistry($weak_registry, 'quiet'); +} + =head2 deploy_schema DBICTest->deploy_schema( $schema ); diff --git a/t/lib/DBICTest/Schema.pm b/t/lib/DBICTest/Schema.pm index 937aa77..1e5c564 100644 --- a/t/lib/DBICTest/Schema.pm +++ b/t/lib/DBICTest/Schema.pm @@ -1,9 +1,14 @@ package # hide from PAUSE DBICTest::Schema; -use base qw/DBIx::Class::Schema/; +use strict; +use warnings; +no warnings 'qw'; -no warnings qw/qw/; +use base 'DBIx::Class::Schema'; + +use DBICTest::Util qw/populate_weakregistry assert_empty_weakregistry/; +use namespace::clean; __PACKAGE__->mk_group_accessors(simple => 'custom_attr'); @@ -60,4 +65,17 @@ sub sqlt_deploy_hook { $sqlt_schema->drop_table('dummy'); } +my $weak_registry = {}; + +sub clone { + my $self = shift->next::method(@_); + populate_weakregistry ( $weak_registry, $self ) + if $INC{'Test/Builder.pm'}; + $self; +} + +END { + assert_empty_weakregistry($weak_registry, 'quiet'); +} + 1; diff --git a/t/lib/DBICTest/Util.pm b/t/lib/DBICTest/Util.pm new file mode 100644 index 0000000..b120acd --- /dev/null +++ b/t/lib/DBICTest/Util.pm @@ -0,0 +1,94 @@ +package DBICTest::Util; + +use warnings; +use strict; + +use Carp; +use Scalar::Util qw/isweak weaken blessed reftype refaddr/; + +use base 'Exporter'; +our @EXPORT_OK = qw/stacktrace populate_weakregistry assert_empty_weakregistry/; + +sub stacktrace { + my $frame = shift; + $frame++; + my (@stack, @frame); + + while (@frame = caller($frame++)) { + push @stack, [@frame[3,1,2]]; + } + + return undef unless @stack; + + $stack[0][0] = ''; + return join "\tinvoked as ", map { sprintf ("%s at %s line %d\n", @$_ ) } @stack; +} + +sub populate_weakregistry { + my ($reg, $target, $slot) = @_; + + + croak 'Target is not a reference' unless defined ref $target; + + $slot ||= (sprintf '%s%s(0x%x)', # so we don't trigger stringification + (defined blessed $target) ? blessed($target) . '=' : '', + reftype $target, + refaddr $target, + ); + + weaken( $reg->{$slot}{weakref} = $target ); + $reg->{$slot}{stacktrace} = stacktrace(1); + + $target; +} + +my $leaks_found; +sub assert_empty_weakregistry { + my ($weak_registry, $quiet) = @_; + + croak 'Expecting a registry hashref' unless ref $weak_registry eq 'HASH'; + + return unless keys %$weak_registry; + + my $tb = eval { Test::Builder->new } + or croak 'Calling test_weakregistry without a loaded Test::Builder makes no sense'; + + for my $slot (sort keys %$weak_registry) { + next if ! defined $weak_registry->{$slot}{weakref}; + $tb->BAILOUT("!!!! WEAK REGISTRY SLOT $slot IS NOT A WEAKREF !!!!") + unless isweak( $weak_registry->{$slot}{weakref} ); + } + + + for my $slot (sort keys %$weak_registry) { + ! defined $weak_registry->{$slot}{weakref} and next if $quiet; + + $tb->ok (! defined $weak_registry->{$slot}{weakref}, "No leaks of $slot") or do { + $leaks_found = 1; + + my $diag = ''; + + $diag .= Devel::FindRef::track ($weak_registry->{$slot}{weakref}, 20) . "\n" + if ( $ENV{TEST_VERBOSE} && eval { require Devel::FindRef }); + + if (my $stack = $weak_registry->{$slot}{stacktrace}) { + $diag .= " Reference first seen$stack"; + } + + $tb->diag($diag) if $diag; + }; + } +} + +END { + if ($leaks_found) { + my $tb = Test::Builder->new; + $tb->diag(sprintf + "\n\n%s\n%s\n\nInstall Devel::FindRef and re-run the test with set " + . '$ENV{TEST_VERBOSE} (prove -v) to see a more detailed leak-report' + . "\n\n%s\n%s\n\n", ('#' x 16) x 4 + ) if (!$tb->is_passing and (!$ENV{TEST_VERBOSE} or !$INC{'Devel/FindRef.pm'})); + } +} + +1; diff --git a/t/multi_create/standard.t b/t/multi_create/standard.t index 263eec2..6eb2b9a 100644 --- a/t/multi_create/standard.t +++ b/t/multi_create/standard.t @@ -405,10 +405,10 @@ lives_ok ( sub { $schema->resultset('Artist')->populate([ $kurt_cobain ]); # %) - $a = $schema->resultset('Artist')->find({name => 'Kurt Cobain'}); + my $artist = $schema->resultset('Artist')->find({name => 'Kurt Cobain'}); - is($a->name, 'Kurt Cobain', 'Artist insertion ok'); - is($a->cds && $a->cds->first && $a->cds->first->title, + is($artist->name, 'Kurt Cobain', 'Artist insertion ok'); + is($artist->cds && $artist->cds->first && $artist->cds->first->title, 'In Utero', 'CD insertion ok'); }, 'populate'); diff --git a/t/sqlmaker/order_by_bindtransport.t b/t/sqlmaker/order_by_bindtransport.t index 22c82d5..d2a4e83 100644 --- a/t/sqlmaker/order_by_bindtransport.t +++ b/t/sqlmaker/order_by_bindtransport.t @@ -8,13 +8,10 @@ use lib qw(t/lib); use DBICTest; use DBIC::SqlMakerTest; -my $schema = DBICTest->init_schema; - -my $rs = $schema->resultset('FourKeys'); - sub test_order { TODO: { + my $rs = shift; my $args = shift; local $TODO = "Not implemented" if $args->{todo}; @@ -100,6 +97,7 @@ my @tests = ( }, ); -test_order($_) for @tests; +my $rs = DBICTest->init_schema->resultset('FourKeys'); +test_order($rs, $_) for @tests; done_testing; diff --git a/t/storage/reconnect.t b/t/storage/reconnect.t index 7416486..8a33284 100644 --- a/t/storage/reconnect.t +++ b/t/storage/reconnect.t @@ -89,7 +89,7 @@ my $ctx_map = { }, }; -for my $ctx (keys $ctx_map) { +for my $ctx (keys %$ctx_map) { # start disconnected and then connected $schema->storage->disconnect;