Cleanup/improve the leaktest a bit
[dbsrgits/DBIx-Class.git] / lib / DBIx / Class / Storage / DBI / Sybase / ASE.pm
CommitLineData
057db5ce 1package DBIx::Class::Storage::DBI::Sybase::ASE;
2
3use strict;
4use warnings;
5
6use base qw/
7 DBIx::Class::Storage::DBI::Sybase
8 DBIx::Class::Storage::DBI::AutoCast
9/;
10use mro 'c3';
70c28808 11use DBIx::Class::Carp;
6298a324 12use Scalar::Util 'blessed';
13use List::Util 'first';
057db5ce 14use Sub::Name();
6298a324 15use Data::Dumper::Concise 'Dumper';
ed7ab0f4 16use Try::Tiny;
fd323bf1 17use namespace::clean;
057db5ce 18
6a247f33 19__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ');
2b8cc2f2 20__PACKAGE__->sql_quote_char ([qw/[ ]/]);
6f7a118e 21__PACKAGE__->datetime_parser_type('DateTime::Format::Sybase');
6a247f33 22
057db5ce 23__PACKAGE__->mk_group_accessors('simple' =>
24 qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
25 _bulk_storage _is_bulk_storage _began_bulk_work
26 _bulk_disabled_due_to_coderef_connect_info_warned
27 _identity_method/
28);
29
30my @also_proxy_to_extra_storages = qw/
31 connect_call_set_auto_cast auto_cast connect_call_blob_setup
32 connect_call_datetime_setup
33
34 disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
35 auto_savepoint unsafe cursor_class debug debugobj schema
36/;
37
38=head1 NAME
39
40DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
41DBIx::Class
42
43=head1 SYNOPSIS
44
45This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
46
47=head1 DESCRIPTION
48
49If your version of Sybase does not support placeholders, then your storage will
50be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
51You can also enable that driver explicitly, see the documentation for more
52details.
53
54With this driver there is unfortunately no way to get the C<last_insert_id>
55without doing a C<SELECT MAX(col)>. This is done safely in a transaction
56(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
57
8384a713 58A recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting:
057db5ce 59
60 on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
61
62=head1 METHODS
63
64=cut
65
66sub _rebless {
67 my $self = shift;
68
69 my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
70
71 if ($self->using_freetds) {
72 carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
73
74You are using FreeTDS with Sybase.
75
76We will do our best to support this configuration, but please consider this
77support experimental.
78
79TEXT/IMAGE columns will definitely not work.
80
81You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
82instead.
83
84See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
85
86To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
87variable.
88EOF
89
bbdda281 90 if (not $self->_use_typeless_placeholders) {
91 if ($self->_use_placeholders) {
057db5ce 92 $self->auto_cast(1);
93 }
94 else {
95 $self->ensure_class_loaded($no_bind_vars);
96 bless $self, $no_bind_vars;
97 $self->_rebless;
98 }
99 }
100 }
101
102 elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
103 # not necessarily FreeTDS, but no placeholders nevertheless
104 $self->ensure_class_loaded($no_bind_vars);
105 bless $self, $no_bind_vars;
106 $self->_rebless;
107 }
108 # this is highly unlikely, but we check just in case
bbdda281 109 elsif (not $self->_use_typeless_placeholders) {
057db5ce 110 $self->auto_cast(1);
111 }
112}
113
114sub _init {
115 my $self = shift;
116 $self->_set_max_connect(256);
117
118# create storage for insert/(update blob) transactions,
119# unless this is that storage
120 return if $self->_is_extra_storage;
121
122 my $writer_storage = (ref $self)->new;
123
124 $writer_storage->_is_extra_storage(1);
125 $writer_storage->connect_info($self->connect_info);
126 $writer_storage->auto_cast($self->auto_cast);
127
128 $self->_writer_storage($writer_storage);
129
130# create a bulk storage unless connect_info is a coderef
131 return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
132
133 my $bulk_storage = (ref $self)->new;
134
135 $bulk_storage->_is_extra_storage(1);
136 $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
137 $bulk_storage->connect_info($self->connect_info);
138
139# this is why
140 $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
141
142 $self->_bulk_storage($bulk_storage);
143}
144
145for my $method (@also_proxy_to_extra_storages) {
146 no strict 'refs';
147 no warnings 'redefine';
148
149 my $replaced = __PACKAGE__->can($method);
150
151 *{$method} = Sub::Name::subname $method => sub {
152 my $self = shift;
153 $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
154 $self->_bulk_storage->$replaced(@_) if $self->_bulk_storage;
155 return $self->$replaced(@_);
156 };
157}
158
159sub disconnect {
160 my $self = shift;
161
162# Even though we call $sth->finish for uses off the bulk API, there's still an
163# "active statement" warning on disconnect, which we throw away here.
164# This is due to the bug described in insert_bulk.
165# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
166 local $SIG{__WARN__} = sub {
167 warn $_[0] unless $_[0] =~ /active statement/i;
168 } if $self->_is_bulk_storage;
169
170# so that next transaction gets a dbh
171 $self->_began_bulk_work(0) if $self->_is_bulk_storage;
172
173 $self->next::method;
174}
175
c1e5a9ac 176# This is only invoked for FreeTDS drivers by ::Storage::DBI::Sybase::FreeTDS
177sub _set_autocommit_stmt {
178 my ($self, $on) = @_;
179
180 return 'SET CHAINED ' . ($on ? 'OFF' : 'ON');
181}
182
057db5ce 183# Set up session settings for Sybase databases for the connection.
184#
185# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
186# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
187# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
188# only want when AutoCommit is off.
057db5ce 189sub _run_connection_actions {
190 my $self = shift;
191
192 if ($self->_is_bulk_storage) {
c1e5a9ac 193 # this should be cleared on every reconnect
057db5ce 194 $self->_began_bulk_work(0);
195 return;
196 }
197
c1e5a9ac 198 $self->_dbh->{syb_chained_txn} = 1
199 unless $self->using_freetds;
057db5ce 200
201 $self->next::method(@_);
202}
203
204=head2 connect_call_blob_setup
205
206Used as:
207
208 on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
209
210Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
211instead of as a hex string.
212
213Recommended.
214
215Also sets the C<log_on_update> value for blob write operations. The default is
216C<1>, but C<0> is better if your database is configured for it.
217
218See
219L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
220
221=cut
222
223sub connect_call_blob_setup {
224 my $self = shift;
225 my %args = @_;
226 my $dbh = $self->_dbh;
227 $dbh->{syb_binary_images} = 1;
228
229 $self->_blob_log_on_update($args{log_on_update})
230 if exists $args{log_on_update};
231}
232
057db5ce 233sub _is_lob_column {
234 my ($self, $source, $column) = @_;
235
236 return $self->_is_lob_type($source->column_info($column)->{data_type});
237}
238
239sub _prep_for_execute {
240 my $self = shift;
0e773352 241 my ($op, $ident, $args) = @_;
057db5ce 242
243 my ($sql, $bind) = $self->next::method (@_);
244
6298a324 245 my $table = blessed $ident ? $ident->from : $ident;
057db5ce 246
247 my $bind_info = $self->_resolve_column_info(
0e773352 248 $ident, [map { $_->[0]{dbic_colname} || () } @{$bind}]
057db5ce 249 );
6298a324 250 my $bound_identity_col =
251 first { $bind_info->{$_}{is_auto_increment} }
252 keys %$bind_info
057db5ce 253 ;
e366f807 254
255 my $columns_info = blessed $ident && $ident->columns_info;
256
6298a324 257 my $identity_col =
e366f807 258 $columns_info &&
259 first { $columns_info->{$_}{is_auto_increment} }
260 keys %$columns_info
057db5ce 261 ;
262
263 if (($op eq 'insert' && $bound_identity_col) ||
264 ($op eq 'update' && exists $args->[0]{$identity_col})) {
265 $sql = join ("\n",
266 $self->_set_table_identity_sql($op => $table, 'on'),
267 $sql,
268 $self->_set_table_identity_sql($op => $table, 'off'),
269 );
270 }
271
272 if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
273 (not $self->{insert_bulk})) {
274 $sql =
275 "$sql\n" .
276 $self->_fetch_identity_sql($ident, $identity_col);
277 }
278
279 return ($sql, $bind);
280}
281
282sub _set_table_identity_sql {
283 my ($self, $op, $table, $on_off) = @_;
284
285 return sprintf 'SET IDENTITY_%s %s %s',
286 uc($op), $self->sql_maker->_quote($table), uc($on_off);
287}
288
289# Stolen from SQLT, with some modifications. This is a makeshift
290# solution before a sane type-mapping library is available, thus
291# the 'our' for easy overrides.
292our %TYPE_MAPPING = (
293 number => 'numeric',
294 money => 'money',
295 varchar => 'varchar',
296 varchar2 => 'varchar',
297 timestamp => 'datetime',
298 text => 'varchar',
299 real => 'double precision',
300 comment => 'text',
301 bit => 'bit',
302 tinyint => 'smallint',
303 float => 'double precision',
304 serial => 'numeric',
305 bigserial => 'numeric',
306 boolean => 'varchar',
307 long => 'varchar',
308);
309
310sub _native_data_type {
311 my ($self, $type) = @_;
312
313 $type = lc $type;
314 $type =~ s/\s* identity//x;
315
316 return uc($TYPE_MAPPING{$type} || $type);
317}
318
319sub _fetch_identity_sql {
320 my ($self, $source, $col) = @_;
321
322 return sprintf ("SELECT MAX(%s) FROM %s",
323 map { $self->sql_maker->_quote ($_) } ($col, $source->from)
324 );
325}
326
327sub _execute {
328 my $self = shift;
329 my ($op) = @_;
330
0e773352 331 my ($rv, $sth, @bind) = $self->next::method(@_);
057db5ce 332
333 if ($op eq 'insert') {
334 $self->_identity($sth->fetchrow_array);
335 $sth->finish;
336 }
337
338 return wantarray ? ($rv, $sth, @bind) : $rv;
339}
340
341sub last_insert_id { shift->_identity }
342
343# handles TEXT/IMAGE and transaction for last_insert_id
344sub insert {
345 my $self = shift;
346 my ($source, $to_insert) = @_;
347
e366f807 348 my $columns_info = $source->columns_info;
349
6298a324 350 my $identity_col =
e366f807 351 (first { $columns_info->{$_}{is_auto_increment} }
352 keys %$columns_info )
6298a324 353 || '';
057db5ce 354
355 # check for empty insert
356 # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
6469dabf 357 # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
358 # and computed columns)
057db5ce 359 if (not %$to_insert) {
360 for my $col ($source->columns) {
361 next if $col eq $identity_col;
6469dabf 362
363 my $info = $source->column_info($col);
364
365 next if ref $info->{default_value} eq 'SCALAR'
366 || (exists $info->{data_type} && (not defined $info->{data_type}));
367
368 next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
369
057db5ce 370 $to_insert->{$col} = \'DEFAULT';
371 }
372 }
373
374 my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
375
376 # do we need the horrific SELECT MAX(COL) hack?
377 my $dumb_last_insert_id =
378 $identity_col
379 && (not exists $to_insert->{$identity_col})
380 && ($self->_identity_method||'') ne '@@IDENTITY';
381
382 my $next = $self->next::can;
383
384 # we are already in a transaction, or there are no blobs
385 # and we don't need the PK - just (try to) do it
386 if ($self->{transaction_depth}
387 || (!$blob_cols && !$dumb_last_insert_id)
388 ) {
389 return $self->_insert (
390 $next, $source, $to_insert, $blob_cols, $identity_col
391 );
392 }
393
394 # otherwise use the _writer_storage to do the insert+transaction on another
395 # connection
396 my $guard = $self->_writer_storage->txn_scope_guard;
397
398 my $updated_cols = $self->_writer_storage->_insert (
399 $next, $source, $to_insert, $blob_cols, $identity_col
400 );
401
402 $self->_identity($self->_writer_storage->_identity);
403
404 $guard->commit;
405
406 return $updated_cols;
407}
408
409sub _insert {
410 my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
411
412 my $updated_cols = $self->$next ($source, $to_insert);
413
414 my $final_row = {
415 ($identity_col ?
416 ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
417 %$to_insert,
418 %$updated_cols,
419 };
420
421 $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
422
423 return $updated_cols;
424}
425
426sub update {
427 my $self = shift;
428 my ($source, $fields, $where, @rest) = @_;
429
057db5ce 430 my $blob_cols = $self->_remove_blob_cols($source, $fields);
431
432 my $table = $source->name;
433
e366f807 434 my $columns_info = $source->columns_info;
435
6298a324 436 my $identity_col =
e366f807 437 first { $columns_info->{$_}{is_auto_increment} }
438 keys %$columns_info;
057db5ce 439
440 my $is_identity_update = $identity_col && defined $fields->{$identity_col};
441
442 return $self->next::method(@_) unless $blob_cols;
443
444# If there are any blobs in $where, Sybase will return a descriptive error
445# message.
446# XXX blobs can still be used with a LIKE query, and this should be handled.
447
448# update+blob update(s) done atomically on separate connection
449 $self = $self->_writer_storage;
450
451 my $guard = $self->txn_scope_guard;
452
453# First update the blob columns to be updated to '' (taken from $fields, where
454# it is originally put by _remove_blob_cols .)
455 my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
456
457# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
458
459 $self->next::method($source, \%blobs_to_empty, $where, @rest);
460
461# Now update the blobs before the other columns in case the update of other
462# columns makes the search condition invalid.
463 $self->_update_blobs($source, $blob_cols, $where);
464
465 my @res;
466 if (%$fields) {
cca282b6 467 if (wantarray) {
057db5ce 468 @res = $self->next::method(@_);
469 }
cca282b6 470 elsif (defined wantarray) {
057db5ce 471 $res[0] = $self->next::method(@_);
472 }
473 else {
474 $self->next::method(@_);
475 }
476 }
477
478 $guard->commit;
479
cca282b6 480 return wantarray ? @res : $res[0];
057db5ce 481}
482
483sub insert_bulk {
484 my $self = shift;
485 my ($source, $cols, $data) = @_;
486
e366f807 487 my $columns_info = $source->columns_info;
488
6298a324 489 my $identity_col =
e366f807 490 first { $columns_info->{$_}{is_auto_increment} }
491 keys %$columns_info;
057db5ce 492
6298a324 493 my $is_identity_insert = (first { $_ eq $identity_col } @{$cols}) ? 1 : 0;
057db5ce 494
495 my @source_columns = $source->columns;
496
497 my $use_bulk_api =
498 $self->_bulk_storage &&
499 $self->_get_dbh->{syb_has_blk};
500
501 if ((not $use_bulk_api)
502 &&
503 (ref($self->_dbi_connect_info->[0]) eq 'CODE')
504 &&
505 (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
506 carp <<'EOF';
507Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
508regular array inserts.
509EOF
510 $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
511 }
512
513 if (not $use_bulk_api) {
514 my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
515
516# _execute_array uses a txn anyway, but it ends too early in case we need to
517# select max(col) to get the identity for inserting blobs.
518 ($self, my $guard) = $self->{transaction_depth} == 0 ?
519 ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
520 :
521 ($self, undef);
522
523 local $self->{insert_bulk} = 1;
524
525 $self->next::method(@_);
526
527 if ($blob_cols) {
528 if ($is_identity_insert) {
529 $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
530 }
531 else {
532 my @cols_with_identities = (@$cols, $identity_col);
533
534 ## calculate identities
535 # XXX This assumes identities always increase by 1, which may or may not
536 # be true.
537 my ($last_identity) =
538 $self->_dbh->selectrow_array (
539 $self->_fetch_identity_sql($source, $identity_col)
540 );
541 my @identities = (($last_identity - @$data + 1) .. $last_identity);
542
543 my @data_with_identities = map [@$_, shift @identities], @$data;
544
545 $self->_insert_blobs_array (
546 $source, $blob_cols, \@cols_with_identities, \@data_with_identities
547 );
548 }
549 }
550
551 $guard->commit if $guard;
552
553 return;
554 }
555
556# otherwise, use the bulk API
557
558# rearrange @$data so that columns are in database order
559 my %orig_idx;
560 @orig_idx{@$cols} = 0..$#$cols;
561
562 my %new_idx;
563 @new_idx{@source_columns} = 0..$#source_columns;
564
565 my @new_data;
566 for my $datum (@$data) {
567 my $new_datum = [];
568 for my $col (@source_columns) {
569# identity data will be 'undef' if not $is_identity_insert
570# columns with defaults will also be 'undef'
571 $new_datum->[ $new_idx{$col} ] =
572 exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
573 }
574 push @new_data, $new_datum;
575 }
576
577# bcp identity index is 1-based
578 my $identity_idx = exists $new_idx{$identity_col} ?
579 $new_idx{$identity_col} + 1 : 0;
580
581## Set a client-side conversion error handler, straight from DBD::Sybase docs.
582# This ignores any data conversion errors detected by the client side libs, as
583# they are usually harmless.
584 my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
585 Sub::Name::subname insert_bulk => sub {
586 my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
587
588 return 1 if $errno == 36;
589
590 carp
591 "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
592 ($errmsg ? "\n$errmsg" : '') .
593 ($osmsg ? "\n$osmsg" : '') .
594 ($blkmsg ? "\n$blkmsg" : '');
595
596 return 0;
597 });
598
4edfce2f 599 my $exception = '';
ed7ab0f4 600 try {
057db5ce 601 my $bulk = $self->_bulk_storage;
602
603 my $guard = $bulk->txn_scope_guard;
604
605## XXX get this to work instead of our own $sth
606## will require SQLA or *Hacks changes for ordered columns
607# $bulk->next::method($source, \@source_columns, \@new_data, {
608# syb_bcp_attribs => {
609# identity_flag => $is_identity_insert,
610# identity_column => $identity_idx,
611# }
612# });
613 my $sql = 'INSERT INTO ' .
614 $bulk->sql_maker->_quote($source->name) . ' (' .
615# colname list is ignored for BCP, but does no harm
616 (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
617 ' VALUES ('. (join ', ', ('?') x @source_columns) . ')';
618
619## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
620## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
621## better yet the version above. Should be fixed in DBD::Sybase .
622 my $sth = $bulk->_get_dbh->prepare($sql,
623# 'insert', # op
624 {
625 syb_bcp_attribs => {
626 identity_flag => $is_identity_insert,
627 identity_column => $identity_idx,
628 }
629 }
630 );
631
0e773352 632 my @bind = map { [ $source_columns[$_] => $_ ] } (0 .. $#source_columns);
057db5ce 633
634 $self->_execute_array(
635 $source, $sth, \@bind, \@source_columns, \@new_data, sub {
636 $guard->commit
637 }
638 );
639
640 $bulk->_query_end($sql);
ed7ab0f4 641 } catch {
642 $exception = shift;
057db5ce 643 };
644
057db5ce 645 DBD::Sybase::set_cslib_cb($orig_cslib_cb);
646
647 if ($exception =~ /-Y option/) {
f32e99f9 648 my $w = 'Sybase bulk API operation failed due to character set incompatibility, '
649 . 'reverting to regular array inserts. Try unsetting the LANG environment variable'
650 ;
651 $w .= "\n$exception" if $self->debug;
652 carp $w;
057db5ce 653
057db5ce 654 $self->_bulk_storage(undef);
655 unshift @_, $self;
656 goto \&insert_bulk;
657 }
658 elsif ($exception) {
659# rollback makes the bulkLogin connection unusable
660 $self->_bulk_storage->disconnect;
661 $self->throw_exception($exception);
662 }
663}
664
665sub _dbh_execute_array {
666 my ($self, $sth, $tuple_status, $cb) = @_;
667
668 my $rv = $self->next::method($sth, $tuple_status);
669 $cb->() if $cb;
670
671 return $rv;
672}
673
674# Make sure blobs are not bound as placeholders, and return any non-empty ones
675# as a hash.
676sub _remove_blob_cols {
677 my ($self, $source, $fields) = @_;
678
679 my %blob_cols;
680
681 for my $col (keys %$fields) {
682 if ($self->_is_lob_column($source, $col)) {
683 my $blob_val = delete $fields->{$col};
684 if (not defined $blob_val) {
685 $fields->{$col} = \'NULL';
686 }
687 else {
688 $fields->{$col} = \"''";
689 $blob_cols{$col} = $blob_val unless $blob_val eq '';
690 }
691 }
692 }
693
694 return %blob_cols ? \%blob_cols : undef;
695}
696
697# same for insert_bulk
698sub _remove_blob_cols_array {
699 my ($self, $source, $cols, $data) = @_;
700
701 my @blob_cols;
702
703 for my $i (0..$#$cols) {
704 my $col = $cols->[$i];
705
706 if ($self->_is_lob_column($source, $col)) {
707 for my $j (0..$#$data) {
708 my $blob_val = delete $data->[$j][$i];
709 if (not defined $blob_val) {
710 $data->[$j][$i] = \'NULL';
711 }
712 else {
713 $data->[$j][$i] = \"''";
714 $blob_cols[$j][$i] = $blob_val
715 unless $blob_val eq '';
716 }
717 }
718 }
719 }
720
721 return @blob_cols ? \@blob_cols : undef;
722}
723
724sub _update_blobs {
725 my ($self, $source, $blob_cols, $where) = @_;
726
9780718f 727 my @primary_cols = try
728 { $source->_pri_cols }
729 catch {
730 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
731 };
057db5ce 732
733# check if we're updating a single row by PK
734 my $pk_cols_in_where = 0;
735 for my $col (@primary_cols) {
736 $pk_cols_in_where++ if defined $where->{$col};
737 }
738 my @rows;
739
740 if ($pk_cols_in_where == @primary_cols) {
741 my %row_to_update;
742 @row_to_update{@primary_cols} = @{$where}{@primary_cols};
743 @rows = \%row_to_update;
744 } else {
745 my $cursor = $self->select ($source, \@primary_cols, $where, {});
746 @rows = map {
747 my %row; @row{@primary_cols} = @$_; \%row
748 } $cursor->all;
749 }
750
751 for my $row (@rows) {
752 $self->_insert_blobs($source, $blob_cols, $row);
753 }
754}
755
756sub _insert_blobs {
757 my ($self, $source, $blob_cols, $row) = @_;
758 my $dbh = $self->_get_dbh;
759
760 my $table = $source->name;
761
762 my %row = %$row;
9780718f 763 my @primary_cols = try
764 { $source->_pri_cols }
765 catch {
766 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
767 };
057db5ce 768
769 $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
770 if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
771
772 for my $col (keys %$blob_cols) {
773 my $blob = $blob_cols->{$col};
774
775 my %where = map { ($_, $row{$_}) } @primary_cols;
776
777 my $cursor = $self->select ($source, [$col], \%where, {});
778 $cursor->next;
779 my $sth = $cursor->sth;
780
781 if (not $sth) {
057db5ce 782 $self->throw_exception(
783 "Could not find row in table '$table' for blob update:\n"
6298a324 784 . (Dumper \%where)
057db5ce 785 );
786 }
787
9780718f 788 try {
057db5ce 789 do {
790 $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
791 } while $sth->fetch;
792
793 $sth->func('ct_prepare_send') or die $sth->errstr;
794
795 my $log_on_update = $self->_blob_log_on_update;
796 $log_on_update = 1 if not defined $log_on_update;
797
798 $sth->func('CS_SET', 1, {
799 total_txtlen => length($blob),
800 log_on_update => $log_on_update
801 }, 'ct_data_info') or die $sth->errstr;
802
803 $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
804
805 $sth->func('ct_finish_send') or die $sth->errstr;
9780718f 806 }
807 catch {
057db5ce 808 if ($self->using_freetds) {
809 $self->throw_exception (
9780718f 810 "TEXT/IMAGE operation failed, probably because you are using FreeTDS: $_"
057db5ce 811 );
9780718f 812 }
813 else {
814 $self->throw_exception($_);
057db5ce 815 }
816 }
9780718f 817 finally {
818 $sth->finish if $sth;
819 };
057db5ce 820 }
821}
822
823sub _insert_blobs_array {
824 my ($self, $source, $blob_cols, $cols, $data) = @_;
825
826 for my $i (0..$#$data) {
827 my $datum = $data->[$i];
828
829 my %row;
830 @row{ @$cols } = @$datum;
831
832 my %blob_vals;
833 for my $j (0..$#$cols) {
834 if (exists $blob_cols->[$i][$j]) {
835 $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
836 }
837 }
838
839 $self->_insert_blobs ($source, \%blob_vals, \%row);
840 }
841}
842
843=head2 connect_call_datetime_setup
844
845Used as:
846
847 on_connect_call => 'datetime_setup'
848
8384a713 849In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
057db5ce 850
851 $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
852 $dbh->do('set dateformat mdy'); # input fmt: 08/13/1979 18:08:55.080
853
854On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
855L<DateTime::Format::Sybase>, which you will need to install.
856
c1e5a9ac 857This works for both C<DATETIME> and C<SMALLDATETIME> columns, note that
057db5ce 858C<SMALLDATETIME> columns only have minute precision.
859
860=cut
861
70c28808 862sub connect_call_datetime_setup {
863 my $self = shift;
864 my $dbh = $self->_get_dbh;
057db5ce 865
70c28808 866 if ($dbh->can('syb_date_fmt')) {
867 # amazingly, this works with FreeTDS
868 $dbh->syb_date_fmt('ISO_strict');
869 }
870 else {
871 carp_once
872 'Your DBD::Sybase is too old to support '
873 .'DBIx::Class::InflateColumn::DateTime, please upgrade!';
057db5ce 874
875 $dbh->do('SET DATEFORMAT mdy');
057db5ce 876 1;
877 }
878}
879
057db5ce 880
057db5ce 881sub _dbh_begin_work {
882 my $self = shift;
883
884# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
885# TRAN once. However, we need to make sure there's a $dbh.
886 return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
887
888 $self->next::method(@_);
889
057db5ce 890 $self->_began_bulk_work(1) if $self->_is_bulk_storage;
891}
892
057db5ce 893# savepoint support using ASE syntax
894
895sub _svp_begin {
896 my ($self, $name) = @_;
897
898 $self->_get_dbh->do("SAVE TRANSACTION $name");
899}
900
901# A new SAVE TRANSACTION with the same name releases the previous one.
902sub _svp_release { 1 }
903
904sub _svp_rollback {
905 my ($self, $name) = @_;
906
907 $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
908}
909
9101;
911
912=head1 Schema::Loader Support
913
290da7d6 914As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
c1e5a9ac 915most versions of Sybase ASE.
057db5ce 916
917=head1 FreeTDS
918
919This driver supports L<DBD::Sybase> compiled against FreeTDS
920(L<http://www.freetds.org/>) to the best of our ability, however it is
921recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
922libraries. They are a part of the Sybase ASE distribution:
923
924The Open Client FAQ is here:
925L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
926
927Sybase ASE for Linux (which comes with the Open Client libraries) may be
928downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
929
930To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
931
932 perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
933
c1e5a9ac 934It is recommended to set C<tds version> for your ASE server to C<5.0> in
935C</etc/freetds/freetds.conf>.
936
937Some versions or configurations of the libraries involved will not support
938placeholders, in which case the storage will be reblessed to
057db5ce 939L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
940
941In some configurations, placeholders will work but will throw implicit type
942conversion errors for anything that's not expecting a string. In such a case,
943the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
944automatically set, which you may enable on connection with
c1e5a9ac 945L<connect_call_set_auto_cast|DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>.
946The type info for the C<CAST>s is taken from the
947L<DBIx::Class::ResultSource/data_type> definitions in your Result classes, and
948are mapped to a Sybase type (if it isn't already) using a mapping based on
949L<SQL::Translator>.
057db5ce 950
48580715 951In other configurations, placeholders will work just as they do with the Sybase
057db5ce 952Open Client libraries.
953
954Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
955
956=head1 INSERTS WITH PLACEHOLDERS
957
958With placeholders enabled, inserts are done in a transaction so that there are
959no concurrency issues with getting the inserted identity value using
960C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
961mode.
962
963In addition, they are done on a separate connection so that it's possible to
964have active cursors when doing an insert.
965
966When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
c1e5a9ac 967are unnecessary and not used, as there are no concurrency issues with C<SELECT
968@@IDENTITY> which is a session variable.
057db5ce 969
970=head1 TRANSACTIONS
971
c1e5a9ac 972Due to limitations of the TDS protocol and L<DBD::Sybase>, you cannot begin a
973transaction while there are active cursors, nor can you use multiple active
974cursors within a transaction. An active cursor is, for example, a
057db5ce 975L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
976C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
977
978For example, this will not work:
979
980 $schema->txn_do(sub {
981 my $rs = $schema->resultset('Book');
982 while (my $row = $rs->next) {
983 $schema->resultset('MetaData')->create({
984 book_id => $row->id,
985 ...
986 });
987 }
988 });
989
990This won't either:
991
992 my $first_row = $large_rs->first;
993 $schema->txn_do(sub { ... });
994
995Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
996are not affected, as they are done on an extra database handle.
997
998Some workarounds:
999
1000=over 4
1001
1002=item * use L<DBIx::Class::Storage::DBI::Replicated>
1003
1004=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
1005
1006=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
1007
1008=back
1009
1010=head1 MAXIMUM CONNECTIONS
1011
1012The TDS protocol makes separate connections to the server for active statements
1013in the background. By default the number of such connections is limited to 25,
1014on both the client side and the server side.
1015
1016This is a bit too low for a complex L<DBIx::Class> application, so on connection
1017the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
1018can override it to whatever setting you like in the DSN.
1019
1020See
1021L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
1022for information on changing the setting on the server side.
1023
1024=head1 DATES
1025
1026See L</connect_call_datetime_setup> to setup date formats
1027for L<DBIx::Class::InflateColumn::DateTime>.
1028
1029=head1 TEXT/IMAGE COLUMNS
1030
1031L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
1032C<TEXT/IMAGE> columns.
1033
1034Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
1035
1036 $schema->storage->dbh->do("SET TEXTSIZE $bytes");
1037
1038or
1039
1040 $schema->storage->set_textsize($bytes);
1041
1042instead.
1043
1044However, the C<LongReadLen> you pass in
8384a713 1045L<connect_info|DBIx::Class::Storage::DBI/connect_info> is used to execute the
1046equivalent C<SET TEXTSIZE> command on connection.
057db5ce 1047
8384a713 1048See L</connect_call_blob_setup> for a
1049L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting you need to work
1050with C<IMAGE> columns.
057db5ce 1051
1052=head1 BULK API
1053
1054The experimental L<DBD::Sybase> Bulk API support is used for
1055L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
1056on a separate connection.
1057
1058To use this feature effectively, use a large number of rows for each
1059L<populate|DBIx::Class::ResultSet/populate> call, eg.:
1060
1061 while (my $rows = $data_source->get_100_rows()) {
1062 $rs->populate($rows);
1063 }
1064
1065B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
1066calls in your C<Result> classes B<must> list columns in database order for this
1067to work. Also, you may have to unset the C<LANG> environment variable before
c1e5a9ac 1068loading your app, as C<BCP -Y> is not yet supported in DBD::Sybase .
057db5ce 1069
1070When inserting IMAGE columns using this method, you'll need to use
1071L</connect_call_blob_setup> as well.
1072
6476fd66 1073=head1 COMPUTED COLUMNS
1074
1075If you have columns such as:
1076
1077 created_dtm AS getdate()
1078
1079represent them in your Result classes as:
1080
1081 created_dtm => {
1082 data_type => undef,
1083 default_value => \'getdate()',
1084 is_nullable => 0,
c1e5a9ac 1085 inflate_datetime => 1,
6476fd66 1086 }
1087
1088The C<data_type> must exist and must be C<undef>. Then empty inserts will work
1089on tables with such columns.
1090
1091=head1 TIMESTAMP COLUMNS
1092
1093C<timestamp> columns in Sybase ASE are not really timestamps, see:
1094L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
1095
1096They should be defined in your Result classes as:
1097
1098 ts => {
1099 data_type => 'timestamp',
1100 is_nullable => 0,
1101 inflate_datetime => 0,
1102 }
1103
1104The C<<inflate_datetime => 0>> is necessary if you use
1105L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
1106be able to read these values.
1107
1108The values will come back as hexadecimal.
1109
057db5ce 1110=head1 TODO
1111
1112=over
1113
1114=item *
1115
1116Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
1117any active cursors, using eager cursors.
1118
1119=item *
1120
1121Real limits and limited counts using stored procedures deployed on startup.
1122
1123=item *
1124
057db5ce 1125Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
1126
1127=item *
1128
1129bulk_insert using prepare_cached (see comments.)
1130
1131=back
1132
1133=head1 AUTHOR
1134
8138f96a 1135See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
057db5ce 1136
1137=head1 LICENSE
1138
1139You may distribute this code under the same terms as Perl itself.
1140
1141=cut
1142# vim:sts=2 sw=2: