Massive cleanup of DateTime test dependencies, other interim
[dbsrgits/DBIx-Class.git] / t / 746sybase.t
CommitLineData
a964a928 1use strict;
68de9438 2use warnings;
d867eeda 3no warnings 'uninitialized';
a964a928 4
5use Test::More;
e0b2344f 6use Test::Exception;
a964a928 7use lib qw(t/lib);
8use DBICTest;
2baff5da 9
a964a928 10my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
11
6469dabf 12my $TESTS = 66 + 2;
5703eb14 13
d867eeda 14if (not ($dsn && $user)) {
15 plan skip_all =>
16 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
17 "\nWarning: This test drops and creates the tables " .
18 "'artist', 'money_test' and 'bindtype_test'";
19} else {
20 plan tests => $TESTS*2 + 1;
21}
22
23my @storage_types = (
95787afe 24 'DBI::Sybase::ASE',
25 'DBI::Sybase::ASE::NoBindVars',
d867eeda 26);
5ce107ad 27eval "require DBIx::Class::Storage::$_;" for @storage_types;
95787afe 28
d867eeda 29my $schema;
30my $storage_idx = -1;
31
32sub get_schema {
33 DBICTest::Schema->connect($dsn, $user, $pass, {
34 on_connect_call => [
35 [ blob_setup => log_on_update => 1 ], # this is a safer option
36 ],
37 });
38}
39
40my $ping_count = 0;
41{
95787afe 42 my $ping = DBIx::Class::Storage::DBI::Sybase::ASE->can('_ping');
43 *DBIx::Class::Storage::DBI::Sybase::ASE::_ping = sub {
d867eeda 44 $ping_count++;
45 goto $ping;
46 };
47}
48
49for my $storage_type (@storage_types) {
50 $storage_idx++;
51
95787afe 52 unless ($storage_type eq 'DBI::Sybase::ASE') { # autodetect
d867eeda 53 DBICTest::Schema->storage_type("::$storage_type");
54 }
61cfaef7 55
d867eeda 56 $schema = get_schema();
a964a928 57
d867eeda 58 $schema->storage->ensure_connected;
a964a928 59
d867eeda 60 if ($storage_idx == 0 &&
95787afe 61 $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars')) {
d867eeda 62# no placeholders in this version of Sybase or DBD::Sybase (or using FreeTDS)
63 my $tb = Test::More->builder;
64 $tb->skip('no placeholders') for 1..$TESTS;
65 next;
66 }
5703eb14 67
d867eeda 68 isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
6b1f5ef7 69
d867eeda 70 $schema->storage->_dbh->disconnect;
71 lives_ok (sub { $schema->storage->dbh }, 'reconnect works');
64f4e691 72
d867eeda 73 $schema->storage->dbh_do (sub {
74 my ($storage, $dbh) = @_;
75 eval { $dbh->do("DROP TABLE artist") };
76 $dbh->do(<<'SQL');
a964a928 77CREATE TABLE artist (
d867eeda 78 artistid INT IDENTITY PRIMARY KEY,
a964a928 79 name VARCHAR(100),
80 rank INT DEFAULT 13 NOT NULL,
d867eeda 81 charfield CHAR(10) NULL
a964a928 82)
26283ee3 83SQL
d867eeda 84 });
a964a928 85
d867eeda 86 my %seen_id;
a964a928 87
d867eeda 88# so we start unconnected
89 $schema->storage->disconnect;
fcc2ec11 90
26283ee3 91# test primary key handling
d867eeda 92 my $new = $schema->resultset('Artist')->create({ name => 'foo' });
93 ok($new->artistid > 0, "Auto-PK worked");
fcc2ec11 94
d867eeda 95 $seen_id{$new->artistid}++;
fcc2ec11 96
d867eeda 97# check redispatch to storage-specific insert when auto-detected storage
95787afe 98 if ($storage_type eq 'DBI::Sybase::ASE') {
d867eeda 99 DBICTest::Schema->storage_type('::DBI');
100 $schema = get_schema();
101 }
102
103 $new = $schema->resultset('Artist')->create({ name => 'Artist 1' });
104 is ( $seen_id{$new->artistid}, undef, 'id for Artist 1 is unique' );
105 $seen_id{$new->artistid}++;
106
107# inserts happen in a txn, so we make sure it still works inside a txn too
108 $schema->txn_begin;
109
110 for (2..6) {
a964a928 111 $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
112 is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" );
113 $seen_id{$new->artistid}++;
d867eeda 114 }
a964a928 115
d867eeda 116 $schema->txn_commit;
a964a928 117
d867eeda 118# test simple count
119 is ($schema->resultset('Artist')->count, 7, 'count(*) of whole table ok');
120
121# test LIMIT support
122 my $it = $schema->resultset('Artist')->search({
123 artistid => { '>' => 0 }
124 }, {
a0348159 125 rows => 3,
a0348159 126 order_by => 'artistid',
d867eeda 127 });
af9e4a5e 128
d867eeda 129 is( $it->count, 3, "LIMIT count ok" );
e19677ad 130
d867eeda 131 is( $it->next->name, "foo", "iterator->next ok" );
132 $it->next;
133 is( $it->next->name, "Artist 2", "iterator->next ok" );
134 is( $it->next, undef, "next past end of resultset ok" );
135
136# now try with offset
137 $it = $schema->resultset('Artist')->search({}, {
138 rows => 3,
139 offset => 3,
140 order_by => 'artistid',
141 });
142
143 is( $it->count, 3, "LIMIT with offset count ok" );
144
145 is( $it->next->name, "Artist 3", "iterator->next ok" );
146 $it->next;
147 is( $it->next->name, "Artist 5", "iterator->next ok" );
148 is( $it->next, undef, "next past end of resultset ok" );
149
150# now try a grouped count
151 $schema->resultset('Artist')->create({ name => 'Artist 6' })
152 for (1..6);
153
154 $it = $schema->resultset('Artist')->search({}, {
155 group_by => 'name'
156 });
157
158 is( $it->count, 7, 'COUNT of GROUP_BY ok' );
159
2baff5da 160# do an IDENTITY_INSERT
d867eeda 161 {
162 no warnings 'redefine';
163
164 my @debug_out;
165 local $schema->storage->{debug} = 1;
166 local $schema->storage->debugobj->{callback} = sub {
167 push @debug_out, $_[1];
168 };
169
170 my $txn_used = 0;
171 my $txn_commit = \&DBIx::Class::Storage::DBI::txn_commit;
172 local *DBIx::Class::Storage::DBI::txn_commit = sub {
173 $txn_used = 1;
174 goto &$txn_commit;
175 };
176
177 $schema->resultset('Artist')
178 ->create({ artistid => 999, name => 'mtfnpy' });
179
2baff5da 180 ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT used');
d867eeda 181
182 SKIP: {
183 skip 'not testing lack of txn on IDENTITY_INSERT with NoBindVars', 1
184 if $storage_type =~ /NoBindVars/i;
185
186 is $txn_used, 0, 'no txn on insert with IDENTITY_INSERT';
187 }
188 }
189
2baff5da 190# do an IDENTITY_UPDATE
191 {
192 my @debug_out;
193 local $schema->storage->{debug} = 1;
194 local $schema->storage->debugobj->{callback} = sub {
195 push @debug_out, $_[1];
196 };
197
198 lives_and {
199 $schema->resultset('Artist')
200 ->find(999)->update({ artistid => 555 });
201 ok((grep /IDENTITY_UPDATE/i, @debug_out));
202 } 'IDENTITY_UPDATE used';
203 $ping_count-- if $@;
204 }
d867eeda 205
206 my $bulk_rs = $schema->resultset('Artist')->search({
207 name => { -like => 'bulk artist %' }
208 });
209
0a9a9955 210# test insert_bulk using populate.
2baff5da 211 SKIP: {
212 skip 'insert_bulk not supported', 4
d390bd3c 213 unless $storage_type !~ /NoBindVars/i;
e06ad5d5 214
2baff5da 215 lives_ok {
216 $schema->resultset('Artist')->populate([
217 {
218 name => 'bulk artist 1',
219 charfield => 'foo',
220 },
221 {
222 name => 'bulk artist 2',
223 charfield => 'foo',
224 },
225 {
226 name => 'bulk artist 3',
227 charfield => 'foo',
228 },
229 ]);
230 } 'insert_bulk via populate';
231
232 is $bulk_rs->count, 3, 'correct number inserted via insert_bulk';
233
234 is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
235 'column set correctly via insert_bulk');
236
237 my %bulk_ids;
238 @bulk_ids{map $_->artistid, $bulk_rs->all} = ();
239
240 is ((scalar keys %bulk_ids), 3,
241 'identities generated correctly in insert_bulk');
242
243 $bulk_rs->delete;
244 }
e06ad5d5 245
0a9a9955 246# make sure insert_bulk works a second time on the same connection
247 SKIP: {
248 skip 'insert_bulk not supported', 3
d390bd3c 249 unless $storage_type !~ /NoBindVars/i;
0a9a9955 250
251 lives_ok {
252 $schema->resultset('Artist')->populate([
253 {
254 name => 'bulk artist 1',
255 charfield => 'bar',
256 },
257 {
258 name => 'bulk artist 2',
259 charfield => 'bar',
260 },
261 {
262 name => 'bulk artist 3',
263 charfield => 'bar',
264 },
265 ]);
266 } 'insert_bulk via populate called a second time';
267
268 is $bulk_rs->count, 3,
269 'correct number inserted via insert_bulk';
270
271 is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3,
272 'column set correctly via insert_bulk');
273
274 $bulk_rs->delete;
275 }
276
277# test invalid insert_bulk (missing required column)
278#
279# There should be a rollback, reconnect and the next valid insert_bulk should
280# succeed.
281 throws_ok {
282 $schema->resultset('Artist')->populate([
283 {
284 charfield => 'foo',
285 }
286 ]);
287 } qr/no value or default|does not allow null|placeholders/i,
288# The second pattern is the error from fallback to regular array insert on
289# incompatible charset.
290# The third is for ::NoBindVars with no syb_has_blk.
291 'insert_bulk with missing required column throws error';
292
d867eeda 293# now test insert_bulk with IDENTITY_INSERT
2baff5da 294 SKIP: {
295 skip 'insert_bulk not supported', 3
d390bd3c 296 unless $storage_type !~ /NoBindVars/i;
e19677ad 297
2baff5da 298 lives_ok {
299 $schema->resultset('Artist')->populate([
300 {
301 artistid => 2001,
302 name => 'bulk artist 1',
303 charfield => 'foo',
304 },
305 {
306 artistid => 2002,
307 name => 'bulk artist 2',
308 charfield => 'foo',
309 },
310 {
311 artistid => 2003,
312 name => 'bulk artist 3',
313 charfield => 'foo',
314 },
315 ]);
316 } 'insert_bulk with IDENTITY_INSERT via populate';
317
318 is $bulk_rs->count, 3,
319 'correct number inserted via insert_bulk with IDENTITY_INSERT';
320
321 is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
322 'column set correctly via insert_bulk with IDENTITY_INSERT');
323
324 $bulk_rs->delete;
325 }
d867eeda 326
327# test correlated subquery
328 my $subq = $schema->resultset('Artist')->search({ artistid => { '>' => 3 } })
329 ->get_column('artistid')
330 ->as_query;
331 my $subq_rs = $schema->resultset('Artist')->search({
332 artistid => { -in => $subq }
333 });
334 is $subq_rs->count, 11, 'correlated subquery';
335
336# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
337 SKIP: {
587daa97 338 skip 'TEXT/IMAGE support does not work with FreeTDS', 22
d867eeda 339 if $schema->storage->using_freetds;
340
341 my $dbh = $schema->storage->_dbh;
342 {
343 local $SIG{__WARN__} = sub {};
344 eval { $dbh->do('DROP TABLE bindtype_test') };
345
346 $dbh->do(qq[
347 CREATE TABLE bindtype_test
348 (
349 id INT IDENTITY PRIMARY KEY,
0a9a9955 350 bytea IMAGE NULL,
d867eeda 351 blob IMAGE NULL,
352 clob TEXT NULL
353 )
354 ],{ RaiseError => 1, PrintError => 0 });
355 }
356
357 my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
358 $binstr{'large'} = $binstr{'small'} x 1024;
359
360 my $maxloblen = length $binstr{'large'};
361
362 if (not $schema->storage->using_freetds) {
363 $dbh->{'LongReadLen'} = $maxloblen * 2;
364 } else {
365 $dbh->do("set textsize ".($maxloblen * 2));
366 }
367
368 my $rs = $schema->resultset('BindType');
369 my $last_id;
370
371 foreach my $type (qw(blob clob)) {
372 foreach my $size (qw(small large)) {
373 no warnings 'uninitialized';
374
0a9a9955 375 my $created;
376 lives_ok {
377 $created = $rs->create( { $type => $binstr{$size} } )
378 } "inserted $size $type without dying";
d867eeda 379
380 $last_id = $created->id if $created;
381
0a9a9955 382 lives_and {
383 ok($rs->find($last_id)->$type eq $binstr{$size})
384 } "verified inserted $size $type";
d867eeda 385 }
386 }
387
689819e1 388 $rs->delete;
389
0a9a9955 390 # blob insert with explicit PK
391 # also a good opportunity to test IDENTITY_INSERT
392 lives_ok {
393 $rs->create( { id => 1, blob => $binstr{large} } )
394 } 'inserted large blob without dying with manual PK';
d867eeda 395
0a9a9955 396 lives_and {
397 ok($rs->find(1)->blob eq $binstr{large})
398 } 'verified inserted large blob with manual PK';
d867eeda 399
400 # try a blob update
401 my $new_str = $binstr{large} . 'mtfnpy';
402
403 # check redispatch to storage-specific update when auto-detected storage
95787afe 404 if ($storage_type eq 'DBI::Sybase::ASE') {
d867eeda 405 DBICTest::Schema->storage_type('::DBI');
406 $schema = get_schema();
407 }
408
0a9a9955 409 lives_ok {
410 $rs->search({ id => 1 })->update({ blob => $new_str })
411 } 'updated blob successfully';
412
413 lives_and {
414 ok($rs->find(1)->blob eq $new_str)
415 } 'verified updated blob';
d867eeda 416
2baff5da 417 # try a blob update with IDENTITY_UPDATE
418 lives_and {
419 $new_str = $binstr{large} . 'hlagh';
420 $rs->find(1)->update({ id => 999, blob => $new_str });
421 ok($rs->find(999)->blob eq $new_str);
422 } 'verified updated blob with IDENTITY_UPDATE';
7ef97d26 423
d867eeda 424 ## try multi-row blob update
425 # first insert some blobs
d867eeda 426 $new_str = $binstr{large} . 'foo';
0a9a9955 427 lives_and {
428 $rs->delete;
429 $rs->create({ blob => $binstr{large} }) for (1..2);
430 $rs->update({ blob => $new_str });
431 is((grep $_->blob eq $new_str, $rs->all), 2);
432 } 'multi-row blob update';
433
434 $rs->delete;
435
587daa97 436 # now try insert_bulk with blobs and only blobs
0a9a9955 437 $new_str = $binstr{large} . 'bar';
438 lives_ok {
439 $rs->populate([
440 {
441 bytea => 1,
442 blob => $binstr{large},
443 clob => $new_str,
444 },
445 {
446 bytea => 1,
447 blob => $binstr{large},
448 clob => $new_str,
449 },
450 ]);
451 } 'insert_bulk with blobs does not die';
452
453 is((grep $_->blob eq $binstr{large}, $rs->all), 2,
454 'IMAGE column set correctly via insert_bulk');
455
456 is((grep $_->clob eq $new_str, $rs->all), 2,
457 'TEXT column set correctly via insert_bulk');
2baff5da 458
587daa97 459 # now try insert_bulk with blobs and a non-blob which also happens to be an
460 # identity column
461 SKIP: {
462 skip 'no insert_bulk without placeholders', 4
463 if $storage_type =~ /NoBindVars/i;
464
465 $rs->delete;
466 $new_str = $binstr{large} . 'bar';
467 lives_ok {
468 $rs->populate([
469 {
470 id => 1,
471 bytea => 1,
472 blob => $binstr{large},
473 clob => $new_str,
474 },
475 {
476 id => 2,
477 bytea => 1,
478 blob => $binstr{large},
479 clob => $new_str,
480 },
481 ]);
db66bc3f 482 } 'insert_bulk with blobs and explicit identity does NOT die';
587daa97 483
484 is((grep $_->blob eq $binstr{large}, $rs->all), 2,
485 'IMAGE column set correctly via insert_bulk with identity');
486
487 is((grep $_->clob eq $new_str, $rs->all), 2,
488 'TEXT column set correctly via insert_bulk with identity');
489
490 is_deeply [ map $_->id, $rs->all ], [ 1,2 ],
491 'explicit identities set correctly via insert_bulk with blobs';
492 }
493
d390bd3c 494 lives_and {
495 $rs->delete;
496 $rs->create({ blob => $binstr{large} }) for (1..2);
497 $rs->update({ blob => undef });
498 is((grep !defined($_->blob), $rs->all), 2);
499 } 'blob update to NULL';
d867eeda 500 }
501
cd048330 502# test MONEY column support (and some other misc. stuff)
d867eeda 503 $schema->storage->dbh_do (sub {
504 my ($storage, $dbh) = @_;
505 eval { $dbh->do("DROP TABLE money_test") };
506 $dbh->do(<<'SQL');
507CREATE TABLE money_test (
508 id INT IDENTITY PRIMARY KEY,
cd048330 509 amount MONEY DEFAULT $999.99 NULL
d867eeda 510)
511SQL
512 });
513
cd048330 514 my $rs = $schema->resultset('Money');
515
516# test insert with defaults
517 lives_and {
518 $rs->create({});
519 is((grep $_->amount == 999.99, $rs->all), 1);
520 } 'insert with all defaults works';
521 $rs->delete;
522
d867eeda 523# test insert transaction when there's an active cursor
2baff5da 524 {
d867eeda 525 my $artist_rs = $schema->resultset('Artist');
526 $artist_rs->first;
527 lives_ok {
528 my $row = $schema->resultset('Money')->create({ amount => 100 });
529 $row->delete;
530 } 'inserted a row with an active cursor';
531 $ping_count-- if $@; # dbh_do calls ->connected
532 }
533
534# test insert in an outer transaction when there's an active cursor
535 TODO: {
536 local $TODO = 'this should work once we have eager cursors';
537
538# clear state, or we get a deadlock on $row->delete
539# XXX figure out why this happens
540 $schema->storage->disconnect;
541
542 lives_ok {
543 $schema->txn_do(sub {
544 my $artist_rs = $schema->resultset('Artist');
545 $artist_rs->first;
546 my $row = $schema->resultset('Money')->create({ amount => 100 });
547 $row->delete;
548 });
549 } 'inserted a row with an active cursor in outer txn';
550 $ping_count-- if $@; # dbh_do calls ->connected
551 }
552
553# Now test money values.
d867eeda 554 my $row;
555 lives_ok {
556 $row = $rs->create({ amount => 100 });
557 } 'inserted a money value';
558
cdf7f026 559 cmp_ok eval { $rs->find($row->id)->amount }, '==', 100,
560 'money value round-trip';
d867eeda 561
562 lives_ok {
563 $row->update({ amount => 200 });
564 } 'updated a money value';
565
cdf7f026 566 cmp_ok eval { $rs->find($row->id)->amount }, '==', 200,
567 'updated money value round-trip';
d867eeda 568
569 lives_ok {
570 $row->update({ amount => undef });
571 } 'updated a money value to NULL';
572
46891041 573 lives_and {
ca507a2f 574 my $null_amount = $rs->find($row->id)->amount;
46891041 575 is $null_amount, undef;
576 } 'updated money value to NULL round-trip';
6469dabf 577
578# Test computed columns and timestamps
579 $schema->storage->dbh_do (sub {
580 my ($storage, $dbh) = @_;
581 eval { $dbh->do("DROP TABLE computed_column_test") };
582 $dbh->do(<<'SQL');
583CREATE TABLE computed_column_test (
584 id INT IDENTITY PRIMARY KEY,
585 a_computed_column AS getdate(),
586 a_timestamp timestamp,
587 charfield VARCHAR(20) DEFAULT 'foo'
588)
589SQL
590 });
591
592 require DBICTest::Schema::ComputedColumn;
593 $schema->register_class(
594 ComputedColumn => 'DBICTest::Schema::ComputedColumn'
595 );
596
597 ok (($rs = $schema->resultset('ComputedColumn')),
598 'got rs for ComputedColumn');
599
600 lives_ok { $row = $rs->create({}) }
601 'empty insert for a table with computed columns survived';
602
603 lives_ok {
604 $row->update({ charfield => 'bar' })
605 } 'update of a table with computed columns survived';
d867eeda 606}
607
608is $ping_count, 0, 'no pings';
f1358489 609
a964a928 610# clean up our mess
611END {
d867eeda 612 if (my $dbh = eval { $schema->storage->_dbh }) {
613 eval { $dbh->do("DROP TABLE $_") }
6469dabf 614 for qw/artist bindtype_test money_test computed_column_test/;
d867eeda 615 }
a964a928 616}