X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=t%2F746sybase.t;h=74587afef7f113f5a1d6252e929637720c7fc6f9;hb=d8cf3aa31fb3d6ff7813f021fcc002663725fc41;hp=10378cfcc5407394fd11734de31c381b43f0f9c7;hpb=b3f41261e52df5e7f59a68100680759c1a2039bd;p=dbsrgits%2FDBIx-Class.git diff --git a/t/746sybase.t b/t/746sybase.t index 10378cf..74587af 100644 --- a/t/746sybase.t +++ b/t/746sybase.t @@ -1,85 +1,656 @@ +BEGIN { do "./t/lib/ANFANG.pm" or die ( $@ || $! ) } +use DBIx::Class::Optional::Dependencies -skip_all_without => 'test_rdbms_ase'; + use strict; -use warnings; +use warnings; +no warnings 'uninitialized'; +use Config; use Test::More; -use lib qw(t/lib); +use Test::Exception; +use DBIx::Class::_Util 'sigwarn_silencer'; + use DBICTest; +my @storage_types = ( + 'DBI::Sybase::ASE', + 'DBI::Sybase::ASE::NoBindVars', +); + +my $schema; + my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/}; -plan skip_all => 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' - unless ($dsn && $user); +sub get_schema { + DBICTest::Schema->connect($dsn, $user, $pass, { + on_connect_call => [ + [ blob_setup => log_on_update => 1 ], # this is a safer option + ], + }); +} + +my $ping_count = 0; +{ + require DBIx::Class::Storage::DBI::Sybase::ASE; + my $ping = DBIx::Class::Storage::DBI::Sybase::ASE->can('_ping'); + *DBIx::Class::Storage::DBI::Sybase::ASE::_ping = sub { + $ping_count++; + goto $ping; + }; +} + +for my $storage_type (@storage_types) { -plan tests => 12; + unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect + DBICTest::Schema->storage_type("::$storage_type"); + } -my $schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1}); + $schema = get_schema(); -# start disconnected to test reconnection -$schema->storage->ensure_connected; -$schema->storage->disconnect; + $schema->storage->ensure_connected; -isa_ok( $schema->storage, 'DBIx::Class::Storage::DBI::Sybase' ); + # we are going to explicitly test this anyway, just loop through + next if + $storage_type ne 'DBI::Sybase::ASE::NoBindVars' + and + $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars') + ; -$schema->storage->dbh_do (sub { - my ($storage, $dbh) = @_; - eval { $dbh->do("DROP TABLE artist") }; - $dbh->do(<<'SQL'); + isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" ); + $schema->storage->_dbh->disconnect; + lives_ok (sub { $schema->storage->dbh }, 'reconnect works'); + + $schema->storage->dbh_do (sub { + my ($storage, $dbh) = @_; + eval { $dbh->do("DROP TABLE artist") }; + $dbh->do(<<'SQL'); CREATE TABLE artist ( - artistid INT IDENTITY NOT NULL, + artistid INT IDENTITY PRIMARY KEY, name VARCHAR(100), rank INT DEFAULT 13 NOT NULL, - charfield CHAR(10) NULL, - primary key(artistid) + charfield CHAR(10) NULL ) - SQL + }); -}); + my %seen_id; -my %seen_id; - -# fresh $schema so we start unconnected -$schema = DBICTest::Schema->connect($dsn, $user, $pass, {AutoCommit => 1}); +# so we start unconnected + $schema->storage->disconnect; # test primary key handling -my $new = $schema->resultset('Artist')->create({ name => 'foo' }); -ok($new->artistid > 0, "Auto-PK worked"); + my $new = $schema->resultset('Artist')->create({ name => 'foo' }); + like $new->artistid, qr/^\d+\z/, 'Auto-PK returned a number'; + ok($new->artistid > 0, "Auto-PK worked"); -$seen_id{$new->artistid}++; + $seen_id{$new->artistid}++; -# test LIMIT support -for (1..6) { +# check redispatch to storage-specific insert when auto-detected storage + if ($storage_type eq 'DBI::Sybase::ASE') { + DBICTest::Schema->storage_type('::DBI'); + $schema = get_schema(); + } + + $new = $schema->resultset('Artist')->create({ name => 'Artist 1' }); + is ( $seen_id{$new->artistid}, undef, 'id for Artist 1 is unique' ); + $seen_id{$new->artistid}++; + +# inserts happen in a txn, so we make sure it still works inside a txn too + $schema->txn_begin; + + for (2..6) { $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ }); is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" ); $seen_id{$new->artistid}++; -} + } + + $schema->txn_commit; + +# test simple count + is ($schema->resultset('Artist')->count, 7, 'count(*) of whole table ok'); + +# test LIMIT support + my $it = $schema->resultset('Artist')->search({ + artistid => { '>' => 0 } + }, { + rows => 3, + order_by => 'artistid', + }); + + is( $it->count, 3, "LIMIT count ok" ); -my $it; + is( $it->next->name, "foo", "iterator->next ok" ); + $it->next; + is( $it->next->name, "Artist 2", "iterator->next ok" ); + is( $it->next, undef, "next past end of resultset ok" ); -$it = $schema->resultset('Artist')->search( {}, { +# now try with offset + $it = $schema->resultset('Artist')->search({}, { rows => 3, + offset => 3, order_by => 'artistid', -}); + }); + + is( $it->count, 3, "LIMIT with offset count ok" ); + + is( $it->next->name, "Artist 3", "iterator->next ok" ); + $it->next; + is( $it->next->name, "Artist 5", "iterator->next ok" ); + is( $it->next, undef, "next past end of resultset ok" ); + +# now try a grouped count + $schema->resultset('Artist')->create({ name => 'Artist 6' }) + for (1..6); + + $it = $schema->resultset('Artist')->search({}, { + group_by => 'name' + }); + + is( $it->count, 7, 'COUNT of GROUP_BY ok' ); + +# do an IDENTITY_INSERT + { + no warnings 'redefine'; + + my @debug_out; + local $schema->storage->{debug} = 1; + local $schema->storage->debugobj->{callback} = sub { + push @debug_out, $_[1]; + }; + + my $txn_used = 0; + my $txn_commit = \&DBIx::Class::Storage::DBI::txn_commit; + local *DBIx::Class::Storage::DBI::txn_commit = sub { + $txn_used = 1; + goto &$txn_commit; + }; + + $schema->resultset('Artist') + ->create({ artistid => 999, name => 'mtfnpy' }); + + ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT used'); + + SKIP: { + skip 'not testing lack of txn on IDENTITY_INSERT with NoBindVars', 1 + if $storage_type =~ /NoBindVars/i; + + is $txn_used, 0, 'no txn on insert with IDENTITY_INSERT'; + } + } + +# do an IDENTITY_UPDATE + { + my @debug_out; + local $schema->storage->{debug} = 1; + local $schema->storage->debugobj->{callback} = sub { + push @debug_out, $_[1]; + }; + + lives_and { + $schema->resultset('Artist') + ->find(999)->update({ artistid => 555 }); + ok((grep /IDENTITY_UPDATE/i, @debug_out)); + } 'IDENTITY_UPDATE used'; + $ping_count-- if $@; + } + + my $bulk_rs = $schema->resultset('Artist')->search({ + name => { -like => 'bulk artist %' } + }); + +# test _insert_bulk using populate. + SKIP: { + skip '_insert_bulk not supported', 4 + unless $storage_type !~ /NoBindVars/i; + + lives_ok { + + local $SIG{__WARN__} = sigwarn_silencer(qr/Sybase bulk API operation failed due to character set incompatibility/) + unless $ENV{DBICTEST_SYBASE_SUBTEST_RERUN}; + + $schema->resultset('Artist')->populate([ + { + name => 'bulk artist 1', + charfield => 'foo', + }, + { + name => 'bulk artist 2', + charfield => 'foo', + }, + { + name => 'bulk artist 3', + charfield => 'foo', + }, + ]); + } '_insert_bulk via populate'; + + is $bulk_rs->count, 3, 'correct number inserted via _insert_bulk'; + + is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3, + 'column set correctly via _insert_bulk'); + + my %bulk_ids; + @bulk_ids{map $_->artistid, $bulk_rs->all} = (); + + is ((scalar keys %bulk_ids), 3, + 'identities generated correctly in _insert_bulk'); + + $bulk_rs->delete; + } + +# make sure _insert_bulk works a second time on the same connection + SKIP: { + skip '_insert_bulk not supported', 3 + unless $storage_type !~ /NoBindVars/i; + + lives_ok { + $schema->resultset('Artist')->populate([ + { + name => 'bulk artist 1', + charfield => 'bar', + }, + { + name => 'bulk artist 2', + charfield => 'bar', + }, + { + name => 'bulk artist 3', + charfield => 'bar', + }, + ]); + } '_insert_bulk via populate called a second time'; + + is $bulk_rs->count, 3, + 'correct number inserted via _insert_bulk'; + + is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3, + 'column set correctly via _insert_bulk'); + + $bulk_rs->delete; + } + +# test invalid _insert_bulk (missing required column) +# + throws_ok { + local $SIG{__WARN__} = sigwarn_silencer(qr/Sybase bulk API operation failed due to character set incompatibility/) + unless $ENV{DBICTEST_SYBASE_SUBTEST_RERUN}; + + $schema->resultset('Artist')->populate([ + { + charfield => 'foo', + } + ]); + } +# The second pattern is the error from fallback to regular array insert on +# incompatible charset. +# The third is for ::NoBindVars with no syb_has_blk. + qr/ + \Qno value or default\E + | + \Qdoes not allow null\E + | + \QUnable to invoke fast-path insert without storage placeholder support\E + /xi, + '_insert_bulk with missing required column throws error'; + +# now test _insert_bulk with IDENTITY_INSERT + SKIP: { + skip '_insert_bulk not supported', 3 + unless $storage_type !~ /NoBindVars/i; + + lives_ok { + $schema->resultset('Artist')->populate([ + { + artistid => 2001, + name => 'bulk artist 1', + charfield => 'foo', + }, + { + artistid => 2002, + name => 'bulk artist 2', + charfield => 'foo', + }, + { + artistid => 2003, + name => 'bulk artist 3', + charfield => 'foo', + }, + ]); + } '_insert_bulk with IDENTITY_INSERT via populate'; + + is $bulk_rs->count, 3, + 'correct number inserted via _insert_bulk with IDENTITY_INSERT'; + + is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3, + 'column set correctly via _insert_bulk with IDENTITY_INSERT'); + + $bulk_rs->delete; + } + +# test correlated subquery + my $subq = $schema->resultset('Artist')->search({ artistid => { '>' => 3 } }) + ->get_column('artistid') + ->as_query; + my $subq_rs = $schema->resultset('Artist')->search({ + artistid => { -in => $subq } + }); + is $subq_rs->count, 11, 'correlated subquery'; -TODO: { - local $TODO = 'Sybase is very very fucked in the limit department'; +# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t + SKIP: { + skip 'TEXT/IMAGE support does not work with FreeTDS', 22 + if $schema->storage->_using_freetds; - is( $it->count, 3, "LIMIT count ok" ); + my $dbh = $schema->storage->_dbh; + { + local $SIG{__WARN__} = sub {}; + eval { $dbh->do('DROP TABLE bindtype_test') }; + + $dbh->do(qq[ + CREATE TABLE bindtype_test + ( + id INT IDENTITY PRIMARY KEY, + bytea IMAGE NULL, + blob IMAGE NULL, + clob TEXT NULL, + a_memo IMAGE NULL + ) + ],{ RaiseError => 1, PrintError => 0 }); + } + + my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) ); + $binstr{'large'} = $binstr{'small'} x 1024; + + my $maxloblen = length $binstr{'large'}; + + if (not $schema->storage->_using_freetds) { + $dbh->{'LongReadLen'} = $maxloblen * 2; + } else { + $dbh->do("set textsize ".($maxloblen * 2)); + } + + my $rs = $schema->resultset('BindType'); + my $last_id; + + foreach my $type (qw(blob clob)) { + foreach my $size (qw(small large)) { + no warnings 'uninitialized'; + + my $created; + lives_ok { + $created = $rs->create( { $type => $binstr{$size} } ) + } "inserted $size $type without dying"; + + $last_id = $created->id if $created; + + lives_and { + ok($rs->find($last_id)->$type eq $binstr{$size}) + } "verified inserted $size $type"; + } + } + + $rs->delete; + + # blob insert with explicit PK + # also a good opportunity to test IDENTITY_INSERT + lives_ok { + $rs->create( { id => 1, blob => $binstr{large} } ) + } 'inserted large blob without dying with manual PK'; + + lives_and { + ok($rs->find(1)->blob eq $binstr{large}) + } 'verified inserted large blob with manual PK'; + + # try a blob update + my $new_str = $binstr{large} . 'mtfnpy'; + + # check redispatch to storage-specific update when auto-detected storage + if ($storage_type eq 'DBI::Sybase::ASE') { + DBICTest::Schema->storage_type('::DBI'); + $schema = get_schema(); + } + + lives_ok { + $rs->search({ id => 1 })->update({ blob => $new_str }) + } 'updated blob successfully'; + + lives_and { + ok($rs->find(1)->blob eq $new_str) + } 'verified updated blob'; + + # try a blob update with IDENTITY_UPDATE + lives_and { + $new_str = $binstr{large} . 'hlagh'; + $rs->find(1)->update({ id => 999, blob => $new_str }); + ok($rs->find(999)->blob eq $new_str); + } 'verified updated blob with IDENTITY_UPDATE'; + + ## try multi-row blob update + # first insert some blobs + $new_str = $binstr{large} . 'foo'; + lives_and { + $rs->delete; + $rs->create({ blob => $binstr{large} }) for (1..2); + $rs->update({ blob => $new_str }); + is((grep $_->blob eq $new_str, $rs->all), 2); + } 'multi-row blob update'; + + $rs->delete; + + # now try _insert_bulk with blobs and only blobs + $new_str = $binstr{large} . 'bar'; + lives_ok { + $rs->populate([ + { + blob => $binstr{large}, + clob => $new_str, + }, + { + blob => $binstr{large}, + clob => $new_str, + }, + ]); + } '_insert_bulk with blobs does not die'; + + is((grep $_->blob eq $binstr{large}, $rs->all), 2, + 'IMAGE column set correctly via _insert_bulk'); + + is((grep $_->clob eq $new_str, $rs->all), 2, + 'TEXT column set correctly via _insert_bulk'); + + # now try _insert_bulk with blobs and a non-blob which also happens to be an + # identity column + SKIP: { + skip 'no _insert_bulk without placeholders', 4 + if $storage_type =~ /NoBindVars/i; + + $rs->delete; + $new_str = $binstr{large} . 'bar'; + lives_ok { + $rs->populate([ + { + id => 1, + bytea => 1, + blob => $binstr{large}, + clob => $new_str, + a_memo => 2, + }, + { + id => 2, + bytea => 1, + blob => $binstr{large}, + clob => $new_str, + a_memo => 2, + }, + ]); + } '_insert_bulk with blobs and explicit identity does NOT die'; + + is((grep $_->blob eq $binstr{large}, $rs->all), 2, + 'IMAGE column set correctly via _insert_bulk with identity'); + + is((grep $_->clob eq $new_str, $rs->all), 2, + 'TEXT column set correctly via _insert_bulk with identity'); + + is_deeply [ map $_->id, $rs->all ], [ 1,2 ], + 'explicit identities set correctly via _insert_bulk with blobs'; + } + + lives_and { + $rs->delete; + $rs->create({ blob => $binstr{large} }) for (1..2); + $rs->update({ blob => undef }); + is((grep !defined($_->blob), $rs->all), 2); + } 'blob update to NULL'; + + lives_ok { + $schema->txn_do(sub { + my $created = $rs->create( { clob => "some text" } ); + }); + } 'insert blob field in transaction'; + $ping_count-- if $@; # failure retry triggers a ping + } + +# test MONEY column support (and some other misc. stuff) + $schema->storage->dbh_do (sub { + my ($storage, $dbh) = @_; + eval { $dbh->do("DROP TABLE money_test") }; + $dbh->do(<<'SQL'); +CREATE TABLE money_test ( + id INT IDENTITY PRIMARY KEY, + amount MONEY DEFAULT $999.99 NULL +) +SQL + }); + + my $rs = $schema->resultset('Money'); + +# test insert with defaults + lives_and { + $rs->create({}); + is((grep $_->amount == 999.99, $rs->all), 1); + } 'insert with all defaults works'; + $rs->delete; + +# test insert transaction when there's an active cursor + { + my $artist_rs = $schema->resultset('Artist'); + $artist_rs->first; + lives_ok { + my $row = $schema->resultset('Money')->create({ amount => 100 }); + $row->delete; + } 'inserted a row with an active cursor'; + $ping_count-- if $@; # dbh_do calls ->connected + } + +# test insert in an outer transaction when there's an active cursor + { + local $TODO = 'this should work once we have eager cursors'; + +# clear state, or we get a deadlock on $row->delete +# XXX figure out why this happens + $schema->storage->disconnect; + + lives_ok { + $schema->txn_do(sub { + my $artist_rs = $schema->resultset('Artist'); + $artist_rs->first; + my $row = $schema->resultset('Money')->create({ amount => 100 }); + $row->delete; + }); + } 'inserted a row with an active cursor in outer txn'; + $ping_count-- if $@; # dbh_do calls ->connected + } + +# Now test money values. + my $row; + lives_ok { + $row = $rs->create({ amount => 100 }); + } 'inserted a money value'; + + cmp_ok eval { $rs->find($row->id)->amount }, '==', 100, + 'money value round-trip'; + + lives_ok { + $row->update({ amount => 200 }); + } 'updated a money value'; + + cmp_ok eval { $rs->find($row->id)->amount }, '==', 200, + 'updated money value round-trip'; + + lives_ok { + $row->update({ amount => undef }); + } 'updated a money value to NULL'; + + lives_and { + my $null_amount = $rs->find($row->id)->amount; + is $null_amount, undef; + } 'updated money value to NULL round-trip'; + +# Test computed columns and timestamps + $schema->storage->dbh_do (sub { + my ($storage, $dbh) = @_; + eval { $dbh->do("DROP TABLE computed_column_test") }; + $dbh->do(<<'SQL'); +CREATE TABLE computed_column_test ( + id INT IDENTITY PRIMARY KEY, + a_computed_column AS getdate(), + a_timestamp timestamp, + charfield VARCHAR(20) DEFAULT 'foo' +) +SQL + }); + + require DBICTest::Schema::ComputedColumn; + $schema->register_class( + ComputedColumn => 'DBICTest::Schema::ComputedColumn' + ); + + ok (($rs = $schema->resultset('ComputedColumn')), + 'got rs for ComputedColumn'); + + lives_ok { $row = $rs->create({}) } + 'empty insert for a table with computed columns survived'; + + lives_ok { + $row->update({ charfield => 'bar' }) + } 'update of a table with computed columns survived'; } -# The iterator still works correctly with rows => 3, even though the sql is -# fucked, very interesting. +is $ping_count, 0, 'no pings'; + +# if tests passed and did so under a non-C LC_ALL - let's rerun the test +if (Test::Builder->new->is_passing and $ENV{LC_ALL} and $ENV{LC_ALL} ne 'C') { -is( $it->next->name, "foo", "iterator->next ok" ); -$it->next; -is( $it->next->name, "Artist 2", "iterator->next ok" ); -is( $it->next, undef, "next past end of resultset ok" ); + pass ("Your LC_ALL is set to $ENV{LC_ALL} - retesting with C"); + local $ENV{LC_ALL} = 'C'; + local $ENV{DBICTEST_SYBASE_SUBTEST_RERUN} = 1; + + local $ENV{PATH}; + local $ENV{PERL5LIB} = join ($Config{path_sep}, @INC); + my @cmd = map { $_ =~ /(.+)/ } ($^X, __FILE__); + + # this is cheating, and may even hang here and there (testing on windows passed fine) + # will be replaced with Test::SubExec::Noninteractive in due course + require IPC::Open2; + IPC::Open2::open2(my $out, undef, @cmd); + while (my $ln = <$out>) { + print " $ln"; + } + + wait; + ok (! $?, "Wstat $? from: @cmd"); +} + +done_testing; # clean up our mess END { - my $dbh = eval { $schema->storage->_dbh }; - $dbh->do('DROP TABLE artist') if $dbh; -} + if (my $dbh = eval { $schema->storage->_dbh }) { + eval { $dbh->do("DROP TABLE $_") } + for qw/artist bindtype_test money_test computed_column_test/; + } + undef $schema; +}