Merge 'sybase' into 'sybase_bulk_insert'
[dbsrgits/DBIx-Class.git] / t / 746sybase.t
CommitLineData
a964a928 1use strict;
2use warnings;
b0b44f97 3no warnings 'uninitialized';
a964a928 4
5use Test::More;
e0b2344f 6use Test::Exception;
a964a928 7use lib qw(t/lib);
8use DBICTest;
7ef97d26 9
10require DBIx::Class::Storage::DBI::Sybase;
11require DBIx::Class::Storage::DBI::Sybase::NoBindVars;
a964a928 12
13my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
14
f49102d2 15my $TESTS = 58 + 2;
5703eb14 16
7d17f469 17if (not ($dsn && $user)) {
18 plan skip_all =>
19 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
20 "\nWarning: This test drops and creates the tables " .
d69a17c8 21 "'artist', 'money_test' and 'bindtype_test'";
f1358489 22} else {
23 plan tests => $TESTS*2 + 1;
7d17f469 24}
a964a928 25
6b1f5ef7 26my @storage_types = (
27 'DBI::Sybase',
28 'DBI::Sybase::NoBindVars',
29);
f1358489 30my $schema;
31my $storage_idx = -1;
32
13820f24 33sub get_schema {
fcc2ec11 34 DBICTest::Schema->connect($dsn, $user, $pass, {
35 on_connect_call => [
36 [ blob_setup => log_on_update => 1 ], # this is a safer option
37 ],
38 });
39}
40
166c6561 41my $ping_count = 0;
42{
43 my $ping = DBIx::Class::Storage::DBI::Sybase->can('_ping');
44 *DBIx::Class::Storage::DBI::Sybase::_ping = sub {
45 $ping_count++;
46 goto $ping;
47 };
48}
49
6b1f5ef7 50for my $storage_type (@storage_types) {
5703eb14 51 $storage_idx++;
6b1f5ef7 52
53 unless ($storage_type eq 'DBI::Sybase') { # autodetect
fcc2ec11 54 DBICTest::Schema->storage_type("::$storage_type");
6b1f5ef7 55 }
61cfaef7 56
13820f24 57 $schema = get_schema();
a964a928 58
6b1f5ef7 59 $schema->storage->ensure_connected;
a964a928 60
5703eb14 61 if ($storage_idx == 0 &&
62 $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::NoBindVars')) {
8c4b6c50 63# no placeholders in this version of Sybase or DBD::Sybase (or using FreeTDS)
5703eb14 64 my $tb = Test::More->builder;
65 $tb->skip('no placeholders') for 1..$TESTS;
66 next;
67 }
68
6b1f5ef7 69 isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
70
61cfaef7 71 $schema->storage->_dbh->disconnect;
64f4e691 72 lives_ok (sub { $schema->storage->dbh }, 'reconnect works');
73
6b1f5ef7 74 $schema->storage->dbh_do (sub {
75 my ($storage, $dbh) = @_;
76 eval { $dbh->do("DROP TABLE artist") };
6b1f5ef7 77 $dbh->do(<<'SQL');
a964a928 78CREATE TABLE artist (
c5ce7cd6 79 artistid INT IDENTITY PRIMARY KEY,
a964a928 80 name VARCHAR(100),
81 rank INT DEFAULT 13 NOT NULL,
c5ce7cd6 82 charfield CHAR(10) NULL
a964a928 83)
c5ce7cd6 84SQL
6b1f5ef7 85 });
a964a928 86
6b1f5ef7 87 my %seen_id;
a964a928 88
6b1f5ef7 89# so we start unconnected
90 $schema->storage->disconnect;
a964a928 91
92# test primary key handling
6b1f5ef7 93 my $new = $schema->resultset('Artist')->create({ name => 'foo' });
94 ok($new->artistid > 0, "Auto-PK worked");
a964a928 95
6b1f5ef7 96 $seen_id{$new->artistid}++;
a964a928 97
fcc2ec11 98# check redispatch to storage-specific insert when auto-detected storage
99 if ($storage_type eq 'DBI::Sybase') {
100 DBICTest::Schema->storage_type('::DBI');
13820f24 101 $schema = get_schema();
fcc2ec11 102 }
103
104 $new = $schema->resultset('Artist')->create({ name => 'Artist 1' });
105 is ( $seen_id{$new->artistid}, undef, 'id for Artist 1 is unique' );
106 $seen_id{$new->artistid}++;
107
108# inserts happen in a txn, so we make sure it still works inside a txn too
109 $schema->txn_begin;
110
111 for (2..6) {
a964a928 112 $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
113 is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" );
114 $seen_id{$new->artistid}++;
6b1f5ef7 115 }
a964a928 116
f6de7111 117 $schema->txn_commit;
118
aa56ff9a 119# test simple count
120 is ($schema->resultset('Artist')->count, 7, 'count(*) of whole table ok');
121
122# test LIMIT support
e0b2344f 123 my $it = $schema->resultset('Artist')->search({
124 artistid => { '>' => 0 }
125 }, {
a964a928 126 rows => 3,
127 order_by => 'artistid',
6b1f5ef7 128 });
a964a928 129
b7505130 130 is( $it->count, 3, "LIMIT count ok" );
a964a928 131
6b1f5ef7 132 is( $it->next->name, "foo", "iterator->next ok" );
133 $it->next;
134 is( $it->next->name, "Artist 2", "iterator->next ok" );
135 is( $it->next, undef, "next past end of resultset ok" );
a964a928 136
a0348159 137# now try with offset
138 $it = $schema->resultset('Artist')->search({}, {
139 rows => 3,
140 offset => 3,
141 order_by => 'artistid',
142 });
143
144 is( $it->count, 3, "LIMIT with offset count ok" );
145
146 is( $it->next->name, "Artist 3", "iterator->next ok" );
147 $it->next;
148 is( $it->next->name, "Artist 5", "iterator->next ok" );
149 is( $it->next, undef, "next past end of resultset ok" );
150
92e7a79b 151# now try a grouped count
152 $schema->resultset('Artist')->create({ name => 'Artist 6' })
153 for (1..6);
154
155 $it = $schema->resultset('Artist')->search({}, {
156 group_by => 'name'
157 });
158
159 is( $it->count, 7, 'COUNT of GROUP_BY ok' );
160
7ef97d26 161# do an IDENTITY_INSERT
1713a57a 162 {
163 no warnings 'redefine';
f7d92f26 164
1713a57a 165 my @debug_out;
f7d92f26 166 local $schema->storage->{debug} = 1;
167 local $schema->storage->debugobj->{callback} = sub {
1713a57a 168 push @debug_out, $_[1];
169 };
170
171 my $txn_used = 0;
172 my $txn_commit = \&DBIx::Class::Storage::DBI::txn_commit;
173 local *DBIx::Class::Storage::DBI::txn_commit = sub {
174 $txn_used = 1;
175 goto &$txn_commit;
176 };
177
178 $schema->resultset('Artist')
179 ->create({ artistid => 999, name => 'mtfnpy' });
180
7ef97d26 181 ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT used');
1713a57a 182
183 SKIP: {
184 skip 'not testing lack of txn on IDENTITY_INSERT with NoBindVars', 1
185 if $storage_type =~ /NoBindVars/i;
186
187 is $txn_used, 0, 'no txn on insert with IDENTITY_INSERT';
188 }
189 }
190
7ef97d26 191# do an IDENTITY_UPDATE
192 {
193 my @debug_out;
194 local $schema->storage->{debug} = 1;
195 local $schema->storage->debugobj->{callback} = sub {
196 push @debug_out, $_[1];
197 };
198
199 lives_and {
200 $schema->resultset('Artist')
201 ->find(999)->update({ artistid => 555 });
202 ok((grep /IDENTITY_UPDATE/i, @debug_out));
203 } 'IDENTITY_UPDATE used';
204 $ping_count-- if $@;
205 }
40531ea8 206
207 my $bulk_rs = $schema->resultset('Artist')->search({
208 name => { -like => 'bulk artist %' }
209 });
210
f49102d2 211# test insert_bulk using populate.
758b5941 212 SKIP: {
213 skip 'insert_bulk not supported', 4
214 unless $schema->storage->_can_insert_bulk;
40531ea8 215
758b5941 216 lives_ok {
217 $schema->resultset('Artist')->populate([
218 {
219 name => 'bulk artist 1',
220 charfield => 'foo',
221 },
222 {
223 name => 'bulk artist 2',
224 charfield => 'foo',
225 },
226 {
227 name => 'bulk artist 3',
228 charfield => 'foo',
229 },
230 ]);
231 } 'insert_bulk via populate';
40531ea8 232
758b5941 233 is $bulk_rs->count, 3, 'correct number inserted via insert_bulk';
40531ea8 234
758b5941 235 is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
236 'column set correctly via insert_bulk');
40531ea8 237
758b5941 238 my %bulk_ids;
239 @bulk_ids{map $_->artistid, $bulk_rs->all} = ();
240
241 is ((scalar keys %bulk_ids), 3,
242 'identities generated correctly in insert_bulk');
243
244 $bulk_rs->delete;
245 }
40531ea8 246
af9e4a5e 247# make sure insert_bulk works a second time on the same connection
248 lives_ok {
249 $schema->resultset('Artist')->populate([
250 {
251 name => 'bulk artist 1',
252 charfield => 'bar',
253 },
254 {
255 name => 'bulk artist 2',
256 charfield => 'bar',
257 },
258 {
259 name => 'bulk artist 3',
260 charfield => 'bar',
261 },
262 ]);
263 } 'insert_bulk via populate called a second time';
264
265 is $bulk_rs->count, 3,
266 'correct number inserted via insert_bulk';
267
268 is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3,
269 'column set correctly via insert_bulk');
270
271 $bulk_rs->delete;
272
273# test invalid insert_bulk (missing required column)
274#
275# There should be a rollback, reconnect and the next valid insert_bulk should
276# succeed.
277 throws_ok {
278 $schema->resultset('Artist')->populate([
279 {
280 charfield => 'foo',
281 }
282 ]);
283 } qr/no value or default|does not allow null/i,
284# The second pattern is the error from fallback to regular array insert on
285# incompatible charset.
286 'insert_bulk with missing required column throws error';
287
40531ea8 288# now test insert_bulk with IDENTITY_INSERT
758b5941 289 SKIP: {
290 skip 'insert_bulk not supported', 3
291 unless $schema->storage->_can_insert_bulk;
40531ea8 292
758b5941 293 lives_ok {
294 $schema->resultset('Artist')->populate([
295 {
296 artistid => 2001,
297 name => 'bulk artist 1',
298 charfield => 'foo',
299 },
300 {
301 artistid => 2002,
302 name => 'bulk artist 2',
303 charfield => 'foo',
304 },
305 {
306 artistid => 2003,
307 name => 'bulk artist 3',
308 charfield => 'foo',
309 },
310 ]);
311 } 'insert_bulk with IDENTITY_INSERT via populate';
40531ea8 312
758b5941 313 is $bulk_rs->count, 3,
314 'correct number inserted via insert_bulk with IDENTITY_INSERT';
40531ea8 315
758b5941 316 is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
317 'column set correctly via insert_bulk with IDENTITY_INSERT');
318
319 $bulk_rs->delete;
320 }
40531ea8 321
7357c7bc 322# test correlated subquery
323 my $subq = $schema->resultset('Artist')->search({ artistid => { '>' => 3 } })
324 ->get_column('artistid')
325 ->as_query;
326 my $subq_rs = $schema->resultset('Artist')->search({
327 artistid => { -in => $subq }
328 });
329 is $subq_rs->count, 11, 'correlated subquery';
330
5703eb14 331# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
332 SKIP: {
f49102d2 333 skip 'TEXT/IMAGE support does not work with FreeTDS', 18
e97a6ee2 334 if $schema->storage->using_freetds;
5703eb14 335
75227502 336 my $dbh = $schema->storage->_dbh;
5703eb14 337 {
338 local $SIG{__WARN__} = sub {};
339 eval { $dbh->do('DROP TABLE bindtype_test') };
340
341 $dbh->do(qq[
342 CREATE TABLE bindtype_test
343 (
344 id INT IDENTITY PRIMARY KEY,
345 bytea INT NULL,
346 blob IMAGE NULL,
347 clob TEXT NULL
348 )
349 ],{ RaiseError => 1, PrintError => 0 });
350 }
e0b2344f 351
5703eb14 352 my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
353 $binstr{'large'} = $binstr{'small'} x 1024;
e0b2344f 354
5703eb14 355 my $maxloblen = length $binstr{'large'};
a3a526cc 356
e97a6ee2 357 if (not $schema->storage->using_freetds) {
a3a526cc 358 $dbh->{'LongReadLen'} = $maxloblen * 2;
359 } else {
360 $dbh->do("set textsize ".($maxloblen * 2));
361 }
e0b2344f 362
5703eb14 363 my $rs = $schema->resultset('BindType');
364 my $last_id;
e0b2344f 365
5703eb14 366 foreach my $type (qw(blob clob)) {
367 foreach my $size (qw(small large)) {
368 no warnings 'uninitialized';
bc54ca97 369
af9e4a5e 370 my $created;
371 lives_ok {
372 $created = $rs->create( { $type => $binstr{$size} } )
373 } "inserted $size $type without dying";
5703eb14 374
375 $last_id = $created->id if $created;
376
af9e4a5e 377 lives_and {
378 ok($rs->find($last_id)->$type eq $binstr{$size})
379 } "verified inserted $size $type";
5703eb14 380 }
381 }
bc54ca97 382
af9e4a5e 383 $rs->delete;
384
5703eb14 385 # blob insert with explicit PK
289877b0 386 # also a good opportunity to test IDENTITY_INSERT
af9e4a5e 387 lives_ok {
388 $rs->create( { id => 1, blob => $binstr{large} } )
389 } 'inserted large blob without dying with manual PK';
5703eb14 390
af9e4a5e 391 lives_and {
392 ok($rs->find(1)->blob eq $binstr{large})
393 } 'verified inserted large blob with manual PK';
078a332f 394
395 # try a blob update
396 my $new_str = $binstr{large} . 'mtfnpy';
fcc2ec11 397
398 # check redispatch to storage-specific update when auto-detected storage
399 if ($storage_type eq 'DBI::Sybase') {
400 DBICTest::Schema->storage_type('::DBI');
13820f24 401 $schema = get_schema();
fcc2ec11 402 }
403
af9e4a5e 404 lives_ok {
405 $rs->search({ id => 1 })->update({ blob => $new_str })
406 } 'updated blob successfully';
407
408 lives_and {
409 ok($rs->find(1)->blob eq $new_str)
410 } 'verified updated blob';
bda35a4c 411
7ef97d26 412 # try a blob update with IDENTITY_UPDATE
413 lives_and {
414 $new_str = $binstr{large} . 'hlagh';
415 $rs->find(1)->update({ id => 999, blob => $new_str });
416 ok($rs->find(999)->blob eq $new_str);
417 } 'verified updated blob with IDENTITY_UPDATE';
418
bda35a4c 419 ## try multi-row blob update
420 # first insert some blobs
bda35a4c 421 $new_str = $binstr{large} . 'foo';
af9e4a5e 422 lives_and {
423 $rs->delete;
424 $rs->create({ blob => $binstr{large} }) for (1..2);
425 $rs->update({ blob => $new_str });
426 is((grep $_->blob eq $new_str, $rs->all), 2);
427 } 'multi-row blob update';
428
429 $rs->delete;
430
431 # now try insert_bulk with blobs
432 $new_str = $binstr{large} . 'bar';
433 lives_ok {
434 $rs->populate([
435 {
436 bytea => 1,
437 blob => $binstr{large},
438 clob => $new_str,
439 },
440 {
441 bytea => 1,
442 blob => $binstr{large},
443 clob => $new_str,
444 },
445 ]);
446 } 'insert_bulk with blobs does not die';
447
448 is((grep $_->blob eq $binstr{large}, $rs->all), 2,
449 'IMAGE column set correctly via insert_bulk');
450
451 is((grep $_->clob eq $new_str, $rs->all), 2,
452 'TEXT column set correctly via insert_bulk');
7ef97d26 453
454 # make sure impossible blob update throws
455 throws_ok {
89cb2a63 456 $rs->update({ clob => 'foo' });
457 $rs->create({ clob => 'bar' });
458 $rs->search({ clob => 'foo' })->update({ clob => 'bar' });
7ef97d26 459 } qr/impossible/, 'impossible blob update throws';
9b3dabe0 460 }
e06ad5d5 461
462# test MONEY column support
463 $schema->storage->dbh_do (sub {
464 my ($storage, $dbh) = @_;
465 eval { $dbh->do("DROP TABLE money_test") };
466 $dbh->do(<<'SQL');
467CREATE TABLE money_test (
468 id INT IDENTITY PRIMARY KEY,
469 amount MONEY NULL
470)
471SQL
472 });
473
c9d9c670 474# test insert transaction when there's an active cursor
758b5941 475 {
322b7a6b 476 my $artist_rs = $schema->resultset('Artist');
477 $artist_rs->first;
478 lives_ok {
479 my $row = $schema->resultset('Money')->create({ amount => 100 });
480 $row->delete;
481 } 'inserted a row with an active cursor';
482 $ping_count-- if $@; # dbh_do calls ->connected
483 }
75227502 484
322b7a6b 485# test insert in an outer transaction when there's an active cursor
486 TODO: {
487 local $TODO = 'this should work once we have eager cursors';
75227502 488
322b7a6b 489# clear state, or we get a deadlock on $row->delete
490# XXX figure out why this happens
491 $schema->storage->disconnect;
492
493 lives_ok {
494 $schema->txn_do(sub {
495 my $artist_rs = $schema->resultset('Artist');
496 $artist_rs->first;
75227502 497 my $row = $schema->resultset('Money')->create({ amount => 100 });
498 $row->delete;
322b7a6b 499 });
500 } 'inserted a row with an active cursor in outer txn';
501 $ping_count-- if $@; # dbh_do calls ->connected
166c6561 502 }
503
504# Now test money values.
e06ad5d5 505 my $rs = $schema->resultset('Money');
506
507 my $row;
508 lives_ok {
509 $row = $rs->create({ amount => 100 });
510 } 'inserted a money value';
511
512 is eval { $rs->find($row->id)->amount }, 100, 'money value round-trip';
513
514 lives_ok {
515 $row->update({ amount => 200 });
516 } 'updated a money value';
517
518 is eval { $rs->find($row->id)->amount },
519 200, 'updated money value round-trip';
520
521 lives_ok {
522 $row->update({ amount => undef });
523 } 'updated a money value to NULL';
524
525 my $null_amount = eval { $rs->find($row->id)->amount };
526 ok(
527 (($null_amount == undef) && (not $@)),
528 'updated money value to NULL round-trip'
529 );
530 diag $@ if $@;
6b1f5ef7 531}
a964a928 532
f1358489 533is $ping_count, 0, 'no pings';
534
a964a928 535# clean up our mess
536END {
6b1f5ef7 537 if (my $dbh = eval { $schema->storage->_dbh }) {
e06ad5d5 538 eval { $dbh->do("DROP TABLE $_") }
539 for qw/artist bindtype_test money_test/;
6b1f5ef7 540 }
a964a928 541}