sybase bulk API support stuff (no blobs yet, coming soon...)
[dbsrgits/DBIx-Class-Historic.git] / t / 746sybase.t
CommitLineData
a964a928 1use strict;
2use warnings;
b0b44f97 3no warnings 'uninitialized';
a964a928 4
5use Test::More;
e0b2344f 6use Test::Exception;
a964a928 7use lib qw(t/lib);
8use DBICTest;
166c6561 9use DBIx::Class::Storage::DBI::Sybase;
10use DBIx::Class::Storage::DBI::Sybase::NoBindVars;
a964a928 11
12my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
13
c080561b 14my $TESTS = 49 + 2;
5703eb14 15
7d17f469 16if (not ($dsn && $user)) {
17 plan skip_all =>
18 'Set $ENV{DBICTEST_SYBASE_DSN}, _USER and _PASS to run this test' .
19 "\nWarning: This test drops and creates the tables " .
d69a17c8 20 "'artist', 'money_test' and 'bindtype_test'";
f1358489 21} else {
22 plan tests => $TESTS*2 + 1;
7d17f469 23}
a964a928 24
6b1f5ef7 25my @storage_types = (
26 'DBI::Sybase',
27 'DBI::Sybase::NoBindVars',
28);
f1358489 29my $schema;
30my $storage_idx = -1;
31
13820f24 32sub get_schema {
fcc2ec11 33 DBICTest::Schema->connect($dsn, $user, $pass, {
34 on_connect_call => [
35 [ blob_setup => log_on_update => 1 ], # this is a safer option
36 ],
37 });
38}
39
166c6561 40my $ping_count = 0;
41{
42 my $ping = DBIx::Class::Storage::DBI::Sybase->can('_ping');
43 *DBIx::Class::Storage::DBI::Sybase::_ping = sub {
44 $ping_count++;
45 goto $ping;
46 };
47}
48
6b1f5ef7 49for my $storage_type (@storage_types) {
5703eb14 50 $storage_idx++;
6b1f5ef7 51
52 unless ($storage_type eq 'DBI::Sybase') { # autodetect
fcc2ec11 53 DBICTest::Schema->storage_type("::$storage_type");
6b1f5ef7 54 }
61cfaef7 55
13820f24 56 $schema = get_schema();
a964a928 57
6b1f5ef7 58 $schema->storage->ensure_connected;
a964a928 59
5703eb14 60 if ($storage_idx == 0 &&
61 $schema->storage->isa('DBIx::Class::Storage::DBI::Sybase::NoBindVars')) {
8c4b6c50 62# no placeholders in this version of Sybase or DBD::Sybase (or using FreeTDS)
5703eb14 63 my $tb = Test::More->builder;
64 $tb->skip('no placeholders') for 1..$TESTS;
65 next;
66 }
67
6b1f5ef7 68 isa_ok( $schema->storage, "DBIx::Class::Storage::$storage_type" );
69
61cfaef7 70 $schema->storage->_dbh->disconnect;
64f4e691 71 lives_ok (sub { $schema->storage->dbh }, 'reconnect works');
72
6b1f5ef7 73 $schema->storage->dbh_do (sub {
74 my ($storage, $dbh) = @_;
75 eval { $dbh->do("DROP TABLE artist") };
6b1f5ef7 76 $dbh->do(<<'SQL');
a964a928 77CREATE TABLE artist (
c5ce7cd6 78 artistid INT IDENTITY PRIMARY KEY,
a964a928 79 name VARCHAR(100),
80 rank INT DEFAULT 13 NOT NULL,
c5ce7cd6 81 charfield CHAR(10) NULL
a964a928 82)
c5ce7cd6 83SQL
6b1f5ef7 84 });
a964a928 85
6b1f5ef7 86 my %seen_id;
a964a928 87
6b1f5ef7 88# so we start unconnected
89 $schema->storage->disconnect;
a964a928 90
91# test primary key handling
6b1f5ef7 92 my $new = $schema->resultset('Artist')->create({ name => 'foo' });
93 ok($new->artistid > 0, "Auto-PK worked");
a964a928 94
6b1f5ef7 95 $seen_id{$new->artistid}++;
a964a928 96
fcc2ec11 97# check redispatch to storage-specific insert when auto-detected storage
98 if ($storage_type eq 'DBI::Sybase') {
99 DBICTest::Schema->storage_type('::DBI');
13820f24 100 $schema = get_schema();
fcc2ec11 101 }
102
103 $new = $schema->resultset('Artist')->create({ name => 'Artist 1' });
104 is ( $seen_id{$new->artistid}, undef, 'id for Artist 1 is unique' );
105 $seen_id{$new->artistid}++;
106
107# inserts happen in a txn, so we make sure it still works inside a txn too
108 $schema->txn_begin;
109
110 for (2..6) {
a964a928 111 $new = $schema->resultset('Artist')->create({ name => 'Artist ' . $_ });
112 is ( $seen_id{$new->artistid}, undef, "id for Artist $_ is unique" );
113 $seen_id{$new->artistid}++;
6b1f5ef7 114 }
a964a928 115
f6de7111 116 $schema->txn_commit;
117
aa56ff9a 118# test simple count
119 is ($schema->resultset('Artist')->count, 7, 'count(*) of whole table ok');
120
121# test LIMIT support
e0b2344f 122 my $it = $schema->resultset('Artist')->search({
123 artistid => { '>' => 0 }
124 }, {
a964a928 125 rows => 3,
126 order_by => 'artistid',
6b1f5ef7 127 });
a964a928 128
b7505130 129 is( $it->count, 3, "LIMIT count ok" );
a964a928 130
6b1f5ef7 131 is( $it->next->name, "foo", "iterator->next ok" );
132 $it->next;
133 is( $it->next->name, "Artist 2", "iterator->next ok" );
134 is( $it->next, undef, "next past end of resultset ok" );
a964a928 135
a0348159 136# now try with offset
137 $it = $schema->resultset('Artist')->search({}, {
138 rows => 3,
139 offset => 3,
140 order_by => 'artistid',
141 });
142
143 is( $it->count, 3, "LIMIT with offset count ok" );
144
145 is( $it->next->name, "Artist 3", "iterator->next ok" );
146 $it->next;
147 is( $it->next->name, "Artist 5", "iterator->next ok" );
148 is( $it->next, undef, "next past end of resultset ok" );
149
92e7a79b 150# now try a grouped count
151 $schema->resultset('Artist')->create({ name => 'Artist 6' })
152 for (1..6);
153
154 $it = $schema->resultset('Artist')->search({}, {
155 group_by => 'name'
156 });
157
158 is( $it->count, 7, 'COUNT of GROUP_BY ok' );
159
1713a57a 160# do an identity insert (which should happen with no txn when using
161# placeholders.)
162 {
163 no warnings 'redefine';
f7d92f26 164
1713a57a 165 my @debug_out;
f7d92f26 166 local $schema->storage->{debug} = 1;
167 local $schema->storage->debugobj->{callback} = sub {
1713a57a 168 push @debug_out, $_[1];
169 };
170
171 my $txn_used = 0;
172 my $txn_commit = \&DBIx::Class::Storage::DBI::txn_commit;
173 local *DBIx::Class::Storage::DBI::txn_commit = sub {
174 $txn_used = 1;
175 goto &$txn_commit;
176 };
177
178 $schema->resultset('Artist')
179 ->create({ artistid => 999, name => 'mtfnpy' });
180
181 ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT');
182
183 SKIP: {
184 skip 'not testing lack of txn on IDENTITY_INSERT with NoBindVars', 1
185 if $storage_type =~ /NoBindVars/i;
186
187 is $txn_used, 0, 'no txn on insert with IDENTITY_INSERT';
188 }
189 }
190
c080561b 191# test insert_bulk using populate.
40531ea8 192 lives_ok {
193 $schema->resultset('Artist')->populate([
194 {
195 name => 'bulk artist 1',
196 charfield => 'foo',
197 },
198 {
199 name => 'bulk artist 2',
200 charfield => 'foo',
201 },
202 {
203 name => 'bulk artist 3',
204 charfield => 'foo',
205 },
206 ]);
207 } 'insert_bulk via populate';
208
209 my $bulk_rs = $schema->resultset('Artist')->search({
210 name => { -like => 'bulk artist %' }
211 });
212
213 is $bulk_rs->count, 3, 'correct number inserted via insert_bulk';
214
215 is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
216 'column set correctly via insert_bulk');
217
218 my %bulk_ids;
219 @bulk_ids{map $_->artistid, $bulk_rs->all} = ();
220
221 is ((scalar keys %bulk_ids), 3,
222 'identities generated correctly in insert_bulk');
223
224 $bulk_rs->delete;
225
c080561b 226# test invalid insert_bulk (missing required column)
227#
228# There should be a rollback, reconnect and the next valid insert_bulk should
229# succeed.
230 throws_ok {
231 $schema->resultset('Artist')->populate([
232 {
233 charfield => 'foo',
234 }
235 ]);
236 } qr/no value or default|does not allow null/i,
237# The second pattern is the error from fallback to regular array insert on
238# incompatible charset.
239 'insert_bulk with missing required column throws error';
240
40531ea8 241# now test insert_bulk with IDENTITY_INSERT
242 lives_ok {
243 $schema->resultset('Artist')->populate([
244 {
245 artistid => 2001,
246 name => 'bulk artist 1',
247 charfield => 'foo',
248 },
249 {
250 artistid => 2002,
251 name => 'bulk artist 2',
252 charfield => 'foo',
253 },
254 {
255 artistid => 2003,
256 name => 'bulk artist 3',
257 charfield => 'foo',
258 },
259 ]);
260 } 'insert_bulk with IDENTITY_INSERT via populate';
261
262 is $bulk_rs->count, 3,
263 'correct number inserted via insert_bulk with IDENTITY_INSERT';
264
265 is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
266 'column set correctly via insert_bulk with IDENTITY_INSERT');
267
268 $bulk_rs->delete;
269
7357c7bc 270# test correlated subquery
271 my $subq = $schema->resultset('Artist')->search({ artistid => { '>' => 3 } })
272 ->get_column('artistid')
273 ->as_query;
274 my $subq_rs = $schema->resultset('Artist')->search({
275 artistid => { -in => $subq }
276 });
277 is $subq_rs->count, 11, 'correlated subquery';
278
5703eb14 279# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
280 SKIP: {
bda35a4c 281 skip 'TEXT/IMAGE support does not work with FreeTDS', 13
e97a6ee2 282 if $schema->storage->using_freetds;
5703eb14 283
75227502 284 my $dbh = $schema->storage->_dbh;
5703eb14 285 {
286 local $SIG{__WARN__} = sub {};
287 eval { $dbh->do('DROP TABLE bindtype_test') };
288
289 $dbh->do(qq[
290 CREATE TABLE bindtype_test
291 (
292 id INT IDENTITY PRIMARY KEY,
293 bytea INT NULL,
294 blob IMAGE NULL,
295 clob TEXT NULL
296 )
297 ],{ RaiseError => 1, PrintError => 0 });
298 }
e0b2344f 299
5703eb14 300 my %binstr = ( 'small' => join('', map { chr($_) } ( 1 .. 127 )) );
301 $binstr{'large'} = $binstr{'small'} x 1024;
e0b2344f 302
5703eb14 303 my $maxloblen = length $binstr{'large'};
a3a526cc 304
e97a6ee2 305 if (not $schema->storage->using_freetds) {
a3a526cc 306 $dbh->{'LongReadLen'} = $maxloblen * 2;
307 } else {
308 $dbh->do("set textsize ".($maxloblen * 2));
309 }
e0b2344f 310
5703eb14 311 my $rs = $schema->resultset('BindType');
312 my $last_id;
e0b2344f 313
5703eb14 314 foreach my $type (qw(blob clob)) {
315 foreach my $size (qw(small large)) {
316 no warnings 'uninitialized';
bc54ca97 317
5703eb14 318 my $created = eval { $rs->create( { $type => $binstr{$size} } ) };
319 ok(!$@, "inserted $size $type without dying");
320 diag $@ if $@;
321
322 $last_id = $created->id if $created;
323
324 my $got = eval {
23419345 325 $rs->find($last_id)->$type
5703eb14 326 };
327 diag $@ if $@;
328 ok($got eq $binstr{$size}, "verified inserted $size $type");
329 }
330 }
bc54ca97 331
5703eb14 332 # blob insert with explicit PK
289877b0 333 # also a good opportunity to test IDENTITY_INSERT
5703eb14 334 {
335 local $SIG{__WARN__} = sub {};
336 eval { $dbh->do('DROP TABLE bindtype_test') };
337
338 $dbh->do(qq[
339 CREATE TABLE bindtype_test
340 (
289877b0 341 id INT IDENTITY PRIMARY KEY,
5703eb14 342 bytea INT NULL,
343 blob IMAGE NULL,
344 clob TEXT NULL
345 )
346 ],{ RaiseError => 1, PrintError => 0 });
347 }
348 my $created = eval { $rs->create( { id => 1, blob => $binstr{large} } ) };
078a332f 349 ok(!$@, "inserted large blob without dying with manual PK");
5703eb14 350 diag $@ if $@;
7d17f469 351
5703eb14 352 my $got = eval {
23419345 353 $rs->find(1)->blob
5703eb14 354 };
7d17f469 355 diag $@ if $@;
078a332f 356 ok($got eq $binstr{large}, "verified inserted large blob with manual PK");
357
358 # try a blob update
359 my $new_str = $binstr{large} . 'mtfnpy';
fcc2ec11 360
361 # check redispatch to storage-specific update when auto-detected storage
362 if ($storage_type eq 'DBI::Sybase') {
363 DBICTest::Schema->storage_type('::DBI');
13820f24 364 $schema = get_schema();
fcc2ec11 365 }
366
078a332f 367 eval { $rs->search({ id => 1 })->update({ blob => $new_str }) };
368 ok !$@, 'updated blob successfully';
369 diag $@ if $@;
370 $got = eval {
371 $rs->find(1)->blob
372 };
373 diag $@ if $@;
374 ok($got eq $new_str, "verified updated blob");
bda35a4c 375
376 ## try multi-row blob update
377 # first insert some blobs
378 $rs->find(1)->delete;
379 $rs->create({ blob => $binstr{large} }) for (1..3);
380 $new_str = $binstr{large} . 'foo';
381 $rs->update({ blob => $new_str });
382 is((grep $_->blob eq $new_str, $rs->all), 3, 'multi-row blob update');
9b3dabe0 383 }
e06ad5d5 384
385# test MONEY column support
386 $schema->storage->dbh_do (sub {
387 my ($storage, $dbh) = @_;
388 eval { $dbh->do("DROP TABLE money_test") };
389 $dbh->do(<<'SQL');
390CREATE TABLE money_test (
391 id INT IDENTITY PRIMARY KEY,
392 amount MONEY NULL
393)
394SQL
395 });
396
c9d9c670 397# test insert transaction when there's an active cursor
322b7a6b 398 SKIP: {
399 skip 'not testing insert with active cursor if using ::NoBindVars', 1
400 if $storage_type =~ /NoBindVars/i;
401
402 my $artist_rs = $schema->resultset('Artist');
403 $artist_rs->first;
404 lives_ok {
405 my $row = $schema->resultset('Money')->create({ amount => 100 });
406 $row->delete;
407 } 'inserted a row with an active cursor';
408 $ping_count-- if $@; # dbh_do calls ->connected
409 }
75227502 410
322b7a6b 411# test insert in an outer transaction when there's an active cursor
412 TODO: {
413 local $TODO = 'this should work once we have eager cursors';
75227502 414
322b7a6b 415# clear state, or we get a deadlock on $row->delete
416# XXX figure out why this happens
417 $schema->storage->disconnect;
418
419 lives_ok {
420 $schema->txn_do(sub {
421 my $artist_rs = $schema->resultset('Artist');
422 $artist_rs->first;
75227502 423 my $row = $schema->resultset('Money')->create({ amount => 100 });
424 $row->delete;
322b7a6b 425 });
426 } 'inserted a row with an active cursor in outer txn';
427 $ping_count-- if $@; # dbh_do calls ->connected
166c6561 428 }
429
430# Now test money values.
e06ad5d5 431 my $rs = $schema->resultset('Money');
432
433 my $row;
434 lives_ok {
435 $row = $rs->create({ amount => 100 });
436 } 'inserted a money value';
437
438 is eval { $rs->find($row->id)->amount }, 100, 'money value round-trip';
439
440 lives_ok {
441 $row->update({ amount => 200 });
442 } 'updated a money value';
443
444 is eval { $rs->find($row->id)->amount },
445 200, 'updated money value round-trip';
446
447 lives_ok {
448 $row->update({ amount => undef });
449 } 'updated a money value to NULL';
450
451 my $null_amount = eval { $rs->find($row->id)->amount };
452 ok(
453 (($null_amount == undef) && (not $@)),
454 'updated money value to NULL round-trip'
455 );
456 diag $@ if $@;
6b1f5ef7 457}
a964a928 458
f1358489 459is $ping_count, 0, 'no pings';
460
a964a928 461# clean up our mess
462END {
6b1f5ef7 463 if (my $dbh = eval { $schema->storage->_dbh }) {
e06ad5d5 464 eval { $dbh->do("DROP TABLE $_") }
465 for qw/artist bindtype_test money_test/;
6b1f5ef7 466 }
a964a928 467}