X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=t%2F71mysql.t;h=9d2c5d0f8ef8051d1d7cc7867e7c1edb536c5b43;hb=c0329273268971824784f239f32c7246e68da9c5;hp=cf02a6136a0e992a5fe655fc9d9c912c18c26a83;hpb=13a2f0311c4d16131b69bea50edb3805958a62a9;p=dbsrgits%2FDBIx-Class.git diff --git a/t/71mysql.t b/t/71mysql.t index cf02a61..9d2c5d0 100644 --- a/t/71mysql.t +++ b/t/71mysql.t @@ -1,22 +1,23 @@ +BEGIN { do "./t/lib/ANFANG.pm" or die ( $@ || $! ) } +use DBIx::Class::Optional::Dependencies -skip_all_without => 'test_rdbms_mysql'; + use strict; -use warnings; +use warnings; use Test::More; use Test::Exception; -use lib qw(t/lib); -use DBICTest; -use DBI::Const::GetInfoType; +use Test::Warn; -my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MYSQL_${_}" } qw/DSN USER PASS/}; +use B::Deparse; +use DBI::Const::GetInfoType; +use Scalar::Util qw/weaken/; -#warn "$dsn $user $pass"; -plan skip_all => 'Set $ENV{DBICTEST_MYSQL_DSN}, _USER and _PASS to run this test' - unless ($dsn && $user); +use DBICTest; -plan tests => 23; +my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_MYSQL_${_}" } qw/DSN USER PASS/}; -my $schema = DBICTest::Schema->connect($dsn, $user, $pass); +my $schema = DBICTest::Schema->connect($dsn, $user, $pass, { quote_names => 1 }); my $dbh = $schema->storage->dbh; @@ -26,7 +27,7 @@ $dbh->do("CREATE TABLE artist (artistid INTEGER NOT NULL AUTO_INCREMENT PRIMARY $dbh->do("DROP TABLE IF EXISTS cd;"); -$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year INTEGER, genreid INTEGER, single_track INTEGER);"); +$dbh->do("CREATE TABLE cd (cdid INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, artist INTEGER, title TEXT, year DATE, genreid INTEGER, single_track INTEGER);"); $dbh->do("DROP TABLE IF EXISTS producer;"); @@ -46,6 +47,14 @@ $dbh->do("CREATE TABLE books (id INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY, so #'dbi:mysql:host=localhost;database=dbic_test', 'dbic_test', ''); +# make sure sqlt_type overrides work (::Storage::DBI::mysql does this) +{ + my $schema = DBICTest::Schema->connect($dsn, $user, $pass); + + ok (!$schema->storage->_dbh, 'definitely not connected'); + is ($schema->storage->sqlt_type, 'MySQL', 'sqlt_type correct pre-connection'); +} + # This is in Core now, but it's here just to test that it doesn't break $schema->class('Artist')->load_components('PK::Auto'); @@ -68,30 +77,55 @@ $it->next; $it->next; is( $it->next, undef, "next past end of resultset ok" ); +# Limit with select-lock +lives_ok { + $schema->txn_do (sub { + isa_ok ( + $schema->resultset('Artist')->find({artistid => 1}, {for => 'update', rows => 1}), + 'DBICTest::Schema::Artist', + ); + }); +} 'Limited FOR UPDATE select works'; + +# shared-lock +lives_ok { + $schema->txn_do (sub { + isa_ok ( + $schema->resultset('Artist')->find({artistid => 1}, {for => 'shared'}), + 'DBICTest::Schema::Artist', + ); + }); +} 'LOCK IN SHARE MODE select works'; + +my ($int_type_name, @undef_default) = DBIx::Class::_ENV_::STRESSTEST_COLUMN_INFO_UNAWARE_STORAGE + ? ('integer') + : ( 'INT', default_value => undef ) +; + my $test_type_info = { 'artistid' => { - 'data_type' => 'INT', + 'data_type' => $int_type_name, 'is_nullable' => 0, 'size' => 11, - 'default_value' => undef, + @undef_default, }, 'name' => { 'data_type' => 'VARCHAR', 'is_nullable' => 1, 'size' => 100, - 'default_value' => undef, + @undef_default, }, 'rank' => { - 'data_type' => 'INT', + 'data_type' => $int_type_name, 'is_nullable' => 0, 'size' => 11, - 'default_value' => 13, + DBIx::Class::_ENV_::STRESSTEST_COLUMN_INFO_UNAWARE_STORAGE ? () : ( 'default_value' => '13' ), }, 'charfield' => { 'data_type' => 'CHAR', 'is_nullable' => 1, 'size' => 10, - 'default_value' => undef, + @undef_default, }, }; @@ -110,11 +144,11 @@ $schema->populate ('BooksInLibrary', [ ]); # -# try a distinct + prefetch on tables with identically named columns +# try a distinct + prefetch on tables with identically named columns # (mysql doesn't seem to like subqueries with equally named columns) # -SKIP: { +{ # try a ->has_many direction (due to a 'multi' accessor the select/group_by group is collapsed) my $owners = $schema->resultset ('Owners')->search ( { 'books.id' => { '!=', undef }}, @@ -122,96 +156,356 @@ SKIP: { ); my $owners2 = $schema->resultset ('Owners')->search ({ id => { -in => $owners->get_column ('me.id')->as_query }}); for ($owners, $owners2) { - lives_ok { is ($_->all, 2, 'Prefetched grouped search returns correct number of rows') } - || skip ('No test due to exception', 1); - lives_ok { is ($_->count, 2, 'Prefetched grouped search returns correct count') } - || skip ('No test due to exception', 1); + is ($_->all, 2, 'Prefetched grouped search returns correct number of rows'); + is ($_->count, 2, 'Prefetched grouped search returns correct count'); } - TODO: { - # try a ->prefetch direction (no select collapse) - my $books = $schema->resultset ('BooksInLibrary')->search ( - { 'owner.name' => 'wiggle' }, - { prefetch => 'owner', distinct => 1 } - ); - - local $TODO = 'MySQL is crazy - there seems to be no way to make this work'; - # error thrown is: - # Duplicate column name 'id' [for Statement " - # SELECT COUNT( * ) - # FROM ( - # SELECT me.id, me.source, me.owner, me.title, me.price, owner.id, owner.name - # FROM books me - # JOIN owners owner ON owner.id = me.owner - # WHERE ( ( owner.name = ? AND source = ? ) ) - # GROUP BY me.id, me.source, me.owner, me.title, me.price, owner.id, owner.name - # ) count_subq - # " with ParamValues: 0='wiggle', 1='Library'] - # - # go fucking figure - - my $books2 = $schema->resultset ('BooksInLibrary')->search ({ id => { -in => $books->get_column ('me.id')->as_query }}); - for ($books, $books2) { - lives_ok { is ($_->all, 1, 'Prefetched grouped search returns correct number of rows') } - || skip ('No test due to exception', 1); - lives_ok { is ($_->count, 1, 'Prefetched grouped search returns correct count') } - || skip ('No test due to exception', 1); - } + # try a ->belongs_to direction (no select collapse) + my $books = $schema->resultset ('BooksInLibrary')->search ( + { 'owner.name' => 'wiggle' }, + { prefetch => 'owner', distinct => 1 } + ); + my $books2 = $schema->resultset ('BooksInLibrary')->search ({ id => { -in => $books->get_column ('me.id')->as_query }}); + for ($books, $books2) { + is ($_->all, 1, 'Prefetched grouped search returns correct number of rows'); + is ($_->count, 1, 'Prefetched grouped search returns correct count'); } } SKIP: { - my $mysql_version = $dbh->get_info( $GetInfoType{SQL_DBMS_VER} ); - skip "Cannot determine MySQL server version", 1 if !$mysql_version; + my $norm_version = $schema->storage->_server_info->{normalized_dbms_version} + or skip "Cannot determine MySQL server version", 1; - my ($v1, $v2, $v3) = $mysql_version =~ /^(\d+)\.(\d+)(?:\.(\d+))?/; - skip "Cannot determine MySQL server version", 1 if !$v1 || !defined($v2); - - $v3 ||= 0; - - if( ($v1 < 5) || ($v1 == 5 && $v2 == 0 && $v3 <= 3) ) { + if ($norm_version < 5.000003_01) { $test_type_info->{charfield}->{data_type} = 'VARCHAR'; } + if (DBIx::Class::_ENV_::STRESSTEST_COLUMN_INFO_UNAWARE_STORAGE) { + $_->{data_type} = lc $_->{data_type} for values %$test_type_info; + } + my $type_info = $schema->storage->columns_info_for('artist'); is_deeply($type_info, $test_type_info, 'columns_info_for - column data types'); } +my $cd = $schema->resultset ('CD')->create ({}); +my $producer = $schema->resultset ('Producer')->create ({}); +lives_ok { $cd->set_producers ([ $producer ]) } 'set_relationship doesnt die'; + +{ + my $artist = $schema->resultset('Artist')->next; + my $cd = $schema->resultset('CD')->next; + $cd->set_from_related ('artist', $artist); + $cd->update; + + my $rs = $schema->resultset('CD')->search ({}, { prefetch => 'artist' }); + + lives_ok sub { + my $cd = $rs->next; + is ($cd->artist->name, $artist->name, 'Prefetched artist'); + }, 'join does not throw (mysql 3 test)'; +} + ## Can we properly deal with the null search problem? ## ## Only way is to do a SET SQL_AUTO_IS_NULL = 0; on connect ## But I'm not sure if we should do this or not (Ash, 2008/06/03) +# +# There is now a built-in function to do this, test that everything works +# with it (ribasushi, 2009/07/03) NULLINSEARCH: { - - ok my $artist1_rs = $schema->resultset('Artist')->search({artistid=>6666}) - => 'Created an artist resultset of 6666'; - + my $ansi_schema = DBICTest::Schema->connect ($dsn, $user, $pass, { on_connect_call => 'set_strict_mode' }); + + $ansi_schema->resultset('Artist')->create ({ name => 'last created artist' }); + + ok my $artist1_rs = $ansi_schema->resultset('Artist')->search({artistid=>6666}) + => 'Created an artist resultset of 6666'; + is $artist1_rs->count, 0 - => 'Got no returned rows'; - - ok my $artist2_rs = $schema->resultset('Artist')->search({artistid=>undef}) - => 'Created an artist resultset of undef'; - - TODO: { - local $TODO = "need to fix the row count =1 when select * from table where pk IS NULL problem"; - is $artist2_rs->count, 0 - => 'got no rows'; - } + => 'Got no returned rows'; + + ok my $artist2_rs = $ansi_schema->resultset('Artist')->search({artistid=>undef}) + => 'Created an artist resultset of undef'; + + is $artist2_rs->count, 0 + => 'got no rows'; my $artist = $artist2_rs->single; - - is $artist => undef - => 'Nothing Found!'; + + is $artist => undef, + => 'Nothing Found!'; } - -my $cd = $schema->resultset ('CD')->create ({}); -my $producer = $schema->resultset ('Producer')->create ({}); +# check for proper grouped counts +{ + my $ansi_schema = DBICTest::Schema->connect ($dsn, $user, $pass, { + on_connect_call => 'set_strict_mode', + quote_char => '`', + }); + my $rs = $ansi_schema->resultset('CD'); + + my $years; + $years->{$_->year|| scalar keys %$years}++ for $rs->all; # NULL != NULL, thus the keys eval + + lives_ok ( sub { + is ( + $rs->search ({}, { group_by => 'year'})->count, + scalar keys %$years, + 'grouped count correct', + ); + }, 'Grouped count does not throw'); + + lives_ok( sub { + $ansi_schema->resultset('Owners')->search({}, { + join => 'books', group_by => [ 'me.id', 'books.id' ] + })->count(); + }, 'count on grouped columns with the same name does not throw'); +} + +# a more contrived^Wcomplicated self-referential double-subquery test +{ + my $rs = $schema->resultset('Artist')->search({ name => { -like => 'baby_%' } }); + + $rs->populate([map { [$_] } ('name', map { "baby_$_" } (1..10) ) ]); + + my ($count_sql, @count_bind) = @${$rs->count_rs->as_query}; + + my $complex_rs = $schema->resultset('Artist')->search( + { artistid => { + -in => $rs->get_column('artistid') + ->as_query + } }, + ); + + $complex_rs->update({ name => \[ "CONCAT( `name`, '_bell_out_of_', $count_sql )", @count_bind ] }); + + for (1..10) { + is ( + $schema->resultset('Artist')->search({ name => "baby_${_}_bell_out_of_10" })->count, + 1, + "Correctly updated babybell $_", + ); + } + + is ($rs->count, 10, '10 artists present'); + + $schema->is_executed_querycount( sub { + $complex_rs->delete; + }, 1, 'One delete query fired' ); + is ($rs->count, 0, '10 Artists correctly deleted'); + + $rs->create({ + name => 'baby_with_cd', + cds => [ { title => 'babeeeeee', year => 2013 } ], + }); + is ($rs->count, 1, 'Artist with cd created'); -lives_ok { $cd->set_producers ([ $producer ]) } 'set_relationship doesnt die'; -# clean up our mess -END { - #$dbh->do("DROP TABLE artist") if $dbh; + $schema->is_executed_querycount( sub { + $schema->resultset('CD')->search_related('artist', + { 'artist.name' => { -like => 'baby_with_%' } } + )->delete; + }, 1, 'And one more delete query fired'); + is ($rs->count, 0, 'Artist with cd deleted'); } + +ZEROINSEARCH: { + my $cds_per_year = { + 2001 => 2, + 2002 => 1, + 2005 => 3, + }; + + my $rs = $schema->resultset ('CD'); + $rs->delete; + for my $y (keys %$cds_per_year) { + for my $c (1 .. $cds_per_year->{$y} ) { + $rs->create ({ title => "CD $y-$c", artist => 1, year => "$y-01-01" }); + } + } + + is ($rs->count, 6, 'CDs created successfully'); + + $rs = $rs->search ({}, { + select => [ \ 'YEAR(year)' ], as => ['y'], distinct => 1, + }); + + my $y_rs = $rs->get_column ('y'); + + warnings_exist { is_deeply ( + [ sort ($y_rs->all) ], + [ sort keys %$cds_per_year ], + 'Years group successfully', + ) } qr/ + \QUse of distinct => 1 while selecting anything other than a column \E + \Qdeclared on the primary ResultSource is deprecated\E + /x, 'deprecation warning'; + + + $rs->create ({ artist => 1, year => '0-1-1', title => 'Jesus Rap' }); + + is_deeply ( + [ sort $y_rs->all ], + [ 0, sort keys %$cds_per_year ], + 'Zero-year groups successfully', + ); + + # convoluted search taken verbatim from list + my $restrict_rs = $rs->search({ -and => [ + year => { '!=', 0 }, + year => { '!=', undef } + ]}); + + warnings_exist { is_deeply ( + [ sort $restrict_rs->get_column('y')->all ], + [ sort $y_rs->all ], + 'Zero year was correctly excluded from resultset', + ) } qr/ + \QUse of distinct => 1 while selecting anything other than a column \E + \Qdeclared on the primary ResultSource is deprecated\E + /x, 'deprecation warning'; +} + +# make sure find hooks determine driver +{ + my $schema = DBICTest::Schema->connect($dsn, $user, $pass); + $schema->resultset("Artist")->find(4); + isa_ok($schema->storage->sql_maker, 'DBIx::Class::SQLMaker::MySQL'); +} + +# make sure the mysql_auto_reconnect buggery is avoided +{ + local $ENV{MOD_PERL} = 'boogiewoogie'; + my $schema = DBICTest::Schema->connect($dsn, $user, $pass); + ok (! $schema->storage->_get_dbh->{mysql_auto_reconnect}, 'mysql_auto_reconnect unset regardless of ENV' ); + + # Make sure hardcore forking action still works even if mysql_auto_reconnect + # is true (test inspired by ether) + + my $schema_autorecon = DBICTest::Schema->connect($dsn, $user, $pass, { mysql_auto_reconnect => 1 }); + my $orig_dbh = $schema_autorecon->storage->_get_dbh; + weaken $orig_dbh; + + ok ($orig_dbh, 'Got weak $dbh ref'); + ok ($orig_dbh->{mysql_auto_reconnect}, 'mysql_auto_reconnect is properly set if explicitly requested' ); + + my $rs = $schema_autorecon->resultset('Artist'); + + my ($parent_in, $child_out); + pipe( $parent_in, $child_out ) or die "Pipe open failed: $!"; + my $pid = fork(); + if (! defined $pid ) { + die "fork() failed: $!" + } + elsif ($pid) { + close $child_out; + + # sanity check + $schema_autorecon->storage->dbh_do(sub { + is ($_[1], $orig_dbh, 'Storage holds correct $dbh in parent'); + }); + + # kill our $dbh + $schema_autorecon->storage->_dbh(undef); + + { + local $TODO = "Perl $] is known to leak like a sieve" + if DBIx::Class::_ENV_::PEEPEENESS; + + ok (! defined $orig_dbh, 'Parent $dbh handle is gone'); + } + } + else { + close $parent_in; + + #simulate a subtest to not confuse the parent TAP emission + my $tb = Test::More->builder; + $tb->reset; + for (qw/output failure_output todo_output/) { + close $tb->$_; + open ($tb->$_, '>&', $child_out); + } + + # wait for parent to kill its $dbh + sleep 1; + + # try to do something dbic-esque + $rs->create({ name => "Hardcore Forker $$" }); + + { + local $TODO = "Perl $] is known to leak like a sieve" + if DBIx::Class::_ENV_::PEEPEENESS; + + ok (! defined $orig_dbh, 'DBIC operation triggered reconnect - old $dbh is gone'); + } + + done_testing; + exit 0; + } + + while (my $ln = <$parent_in>) { + print " $ln"; + } + wait; + ok(!$?, 'Child subtests passed'); + + ok ($rs->find({ name => "Hardcore Forker $pid" }), 'Expected row created'); +} + +# Ensure disappearing RDBMS does not leave the storage in an inconsistent state +# Unlike the test in storage/reconnect.t we test live RDBMS-side disconnection +SKIP: +for my $cref ( + sub { + my $schema = shift; + + my $g = $schema->txn_scope_guard; + + is( $schema->storage->transaction_depth, 1, "Expected txn depth" ); + + $schema->storage->_dbh->do("SELECT SLEEP(2)"); + }, + sub { + my $schema = shift; + $schema->txn_do(sub { + is( $schema->storage->transaction_depth, 1, "Expected txn depth" ); + $schema->storage->_dbh->do("SELECT SLEEP(2)") + } ); + }, + sub { + my $schema = shift; + + my $g = $schema->txn_scope_guard; + + $schema->txn_do(sub { + is( $schema->storage->transaction_depth, 2, "Expected txn depth" ); + $schema->storage->_dbh->do("SELECT SLEEP(2)") + } ); + }, +) { + # version needed for the "read_timeout" feature + DBIx::Class::Optional::Dependencies->skip_without( 'DBD::mysql>=4.023' ); + + note( "Testing with " . B::Deparse->new->coderef2text($cref) ); + + my $schema = DBICTest::Schema->connect($dsn, $user, $pass, { + mysql_read_timeout => 1, + }); + + ok( !$schema->storage->connected, 'Not connected' ); + + is( $schema->storage->transaction_depth, undef, "Start with unknown txn depth" ); + + throws_ok { + $cref->($schema) + } qr/Rollback failed/; + + ok( !$schema->storage->connected, 'Not connected as a result of failed rollback' ); + + is( $schema->storage->transaction_depth, undef, "Depth expectedly unknown after failed rollbacks" ); + + ok( $schema->resultset('Artist')->count, 'query works after the fact' ); +} + +done_testing;