Fix assumption in Sybase::ASE blob updater: cond is not always a HASHREF
[dbsrgits/DBIx-Class.git] / lib / DBIx / Class / Storage / DBI / Sybase / ASE.pm
CommitLineData
057db5ce 1package DBIx::Class::Storage::DBI::Sybase::ASE;
2
3use strict;
4use warnings;
5
6use base qw/
7 DBIx::Class::Storage::DBI::Sybase
8 DBIx::Class::Storage::DBI::AutoCast
9/;
10use mro 'c3';
70c28808 11use DBIx::Class::Carp;
6298a324 12use Scalar::Util 'blessed';
13use List::Util 'first';
057db5ce 14use Sub::Name();
6298a324 15use Data::Dumper::Concise 'Dumper';
ed7ab0f4 16use Try::Tiny;
fd323bf1 17use namespace::clean;
057db5ce 18
6a247f33 19__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ');
2b8cc2f2 20__PACKAGE__->sql_quote_char ([qw/[ ]/]);
6f7a118e 21__PACKAGE__->datetime_parser_type('DateTime::Format::Sybase');
6a247f33 22
057db5ce 23__PACKAGE__->mk_group_accessors('simple' =>
24 qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
25 _bulk_storage _is_bulk_storage _began_bulk_work
26 _bulk_disabled_due_to_coderef_connect_info_warned
27 _identity_method/
28);
29
30my @also_proxy_to_extra_storages = qw/
31 connect_call_set_auto_cast auto_cast connect_call_blob_setup
32 connect_call_datetime_setup
33
34 disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
35 auto_savepoint unsafe cursor_class debug debugobj schema
36/;
37
38=head1 NAME
39
40DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
41DBIx::Class
42
43=head1 SYNOPSIS
44
45This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
46
47=head1 DESCRIPTION
48
49If your version of Sybase does not support placeholders, then your storage will
50be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
51You can also enable that driver explicitly, see the documentation for more
52details.
53
54With this driver there is unfortunately no way to get the C<last_insert_id>
55without doing a C<SELECT MAX(col)>. This is done safely in a transaction
56(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
57
8384a713 58A recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting:
057db5ce 59
60 on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
61
62=head1 METHODS
63
64=cut
65
66sub _rebless {
67 my $self = shift;
68
69 my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
70
71 if ($self->using_freetds) {
72 carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
73
74You are using FreeTDS with Sybase.
75
76We will do our best to support this configuration, but please consider this
77support experimental.
78
79TEXT/IMAGE columns will definitely not work.
80
81You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
82instead.
83
84See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
85
86To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
87variable.
88EOF
89
bbdda281 90 if (not $self->_use_typeless_placeholders) {
91 if ($self->_use_placeholders) {
057db5ce 92 $self->auto_cast(1);
93 }
94 else {
95 $self->ensure_class_loaded($no_bind_vars);
96 bless $self, $no_bind_vars;
97 $self->_rebless;
98 }
99 }
100 }
101
102 elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
103 # not necessarily FreeTDS, but no placeholders nevertheless
104 $self->ensure_class_loaded($no_bind_vars);
105 bless $self, $no_bind_vars;
106 $self->_rebless;
107 }
108 # this is highly unlikely, but we check just in case
bbdda281 109 elsif (not $self->_use_typeless_placeholders) {
057db5ce 110 $self->auto_cast(1);
111 }
112}
113
114sub _init {
115 my $self = shift;
116 $self->_set_max_connect(256);
117
118# create storage for insert/(update blob) transactions,
119# unless this is that storage
120 return if $self->_is_extra_storage;
121
122 my $writer_storage = (ref $self)->new;
123
124 $writer_storage->_is_extra_storage(1);
125 $writer_storage->connect_info($self->connect_info);
126 $writer_storage->auto_cast($self->auto_cast);
127
128 $self->_writer_storage($writer_storage);
129
130# create a bulk storage unless connect_info is a coderef
131 return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
132
133 my $bulk_storage = (ref $self)->new;
134
135 $bulk_storage->_is_extra_storage(1);
136 $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
137 $bulk_storage->connect_info($self->connect_info);
138
139# this is why
140 $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
141
142 $self->_bulk_storage($bulk_storage);
143}
144
145for my $method (@also_proxy_to_extra_storages) {
146 no strict 'refs';
147 no warnings 'redefine';
148
149 my $replaced = __PACKAGE__->can($method);
150
151 *{$method} = Sub::Name::subname $method => sub {
152 my $self = shift;
153 $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
154 $self->_bulk_storage->$replaced(@_) if $self->_bulk_storage;
155 return $self->$replaced(@_);
156 };
157}
158
159sub disconnect {
160 my $self = shift;
161
162# Even though we call $sth->finish for uses off the bulk API, there's still an
163# "active statement" warning on disconnect, which we throw away here.
164# This is due to the bug described in insert_bulk.
165# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
166 local $SIG{__WARN__} = sub {
167 warn $_[0] unless $_[0] =~ /active statement/i;
168 } if $self->_is_bulk_storage;
169
170# so that next transaction gets a dbh
171 $self->_began_bulk_work(0) if $self->_is_bulk_storage;
172
173 $self->next::method;
174}
175
c1e5a9ac 176# This is only invoked for FreeTDS drivers by ::Storage::DBI::Sybase::FreeTDS
177sub _set_autocommit_stmt {
178 my ($self, $on) = @_;
179
180 return 'SET CHAINED ' . ($on ? 'OFF' : 'ON');
181}
182
057db5ce 183# Set up session settings for Sybase databases for the connection.
184#
185# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
186# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
187# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
188# only want when AutoCommit is off.
057db5ce 189sub _run_connection_actions {
190 my $self = shift;
191
192 if ($self->_is_bulk_storage) {
c1e5a9ac 193 # this should be cleared on every reconnect
057db5ce 194 $self->_began_bulk_work(0);
195 return;
196 }
197
c1e5a9ac 198 $self->_dbh->{syb_chained_txn} = 1
199 unless $self->using_freetds;
057db5ce 200
201 $self->next::method(@_);
202}
203
204=head2 connect_call_blob_setup
205
206Used as:
207
208 on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
209
210Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
211instead of as a hex string.
212
213Recommended.
214
215Also sets the C<log_on_update> value for blob write operations. The default is
216C<1>, but C<0> is better if your database is configured for it.
217
218See
219L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
220
221=cut
222
223sub connect_call_blob_setup {
224 my $self = shift;
225 my %args = @_;
226 my $dbh = $self->_dbh;
227 $dbh->{syb_binary_images} = 1;
228
229 $self->_blob_log_on_update($args{log_on_update})
230 if exists $args{log_on_update};
231}
232
057db5ce 233sub _is_lob_column {
234 my ($self, $source, $column) = @_;
235
236 return $self->_is_lob_type($source->column_info($column)->{data_type});
237}
238
239sub _prep_for_execute {
240 my $self = shift;
0e773352 241 my ($op, $ident, $args) = @_;
057db5ce 242
243 my ($sql, $bind) = $self->next::method (@_);
244
6298a324 245 my $table = blessed $ident ? $ident->from : $ident;
057db5ce 246
247 my $bind_info = $self->_resolve_column_info(
0e773352 248 $ident, [map { $_->[0]{dbic_colname} || () } @{$bind}]
057db5ce 249 );
6298a324 250 my $bound_identity_col =
251 first { $bind_info->{$_}{is_auto_increment} }
252 keys %$bind_info
057db5ce 253 ;
e366f807 254
255 my $columns_info = blessed $ident && $ident->columns_info;
256
6298a324 257 my $identity_col =
e366f807 258 $columns_info &&
259 first { $columns_info->{$_}{is_auto_increment} }
260 keys %$columns_info
057db5ce 261 ;
262
263 if (($op eq 'insert' && $bound_identity_col) ||
264 ($op eq 'update' && exists $args->[0]{$identity_col})) {
265 $sql = join ("\n",
266 $self->_set_table_identity_sql($op => $table, 'on'),
267 $sql,
268 $self->_set_table_identity_sql($op => $table, 'off'),
269 );
270 }
271
272 if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
273 (not $self->{insert_bulk})) {
274 $sql =
275 "$sql\n" .
276 $self->_fetch_identity_sql($ident, $identity_col);
277 }
278
279 return ($sql, $bind);
280}
281
282sub _set_table_identity_sql {
283 my ($self, $op, $table, $on_off) = @_;
284
285 return sprintf 'SET IDENTITY_%s %s %s',
286 uc($op), $self->sql_maker->_quote($table), uc($on_off);
287}
288
289# Stolen from SQLT, with some modifications. This is a makeshift
290# solution before a sane type-mapping library is available, thus
291# the 'our' for easy overrides.
292our %TYPE_MAPPING = (
293 number => 'numeric',
294 money => 'money',
295 varchar => 'varchar',
296 varchar2 => 'varchar',
297 timestamp => 'datetime',
298 text => 'varchar',
299 real => 'double precision',
300 comment => 'text',
301 bit => 'bit',
302 tinyint => 'smallint',
303 float => 'double precision',
304 serial => 'numeric',
305 bigserial => 'numeric',
306 boolean => 'varchar',
307 long => 'varchar',
308);
309
310sub _native_data_type {
311 my ($self, $type) = @_;
312
313 $type = lc $type;
314 $type =~ s/\s* identity//x;
315
316 return uc($TYPE_MAPPING{$type} || $type);
317}
318
319sub _fetch_identity_sql {
320 my ($self, $source, $col) = @_;
321
322 return sprintf ("SELECT MAX(%s) FROM %s",
323 map { $self->sql_maker->_quote ($_) } ($col, $source->from)
324 );
325}
326
327sub _execute {
328 my $self = shift;
329 my ($op) = @_;
330
0e773352 331 my ($rv, $sth, @bind) = $self->next::method(@_);
057db5ce 332
333 if ($op eq 'insert') {
334 $self->_identity($sth->fetchrow_array);
335 $sth->finish;
336 }
337
338 return wantarray ? ($rv, $sth, @bind) : $rv;
339}
340
341sub last_insert_id { shift->_identity }
342
343# handles TEXT/IMAGE and transaction for last_insert_id
344sub insert {
345 my $self = shift;
346 my ($source, $to_insert) = @_;
347
e366f807 348 my $columns_info = $source->columns_info;
349
6298a324 350 my $identity_col =
e366f807 351 (first { $columns_info->{$_}{is_auto_increment} }
352 keys %$columns_info )
6298a324 353 || '';
057db5ce 354
355 # check for empty insert
356 # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
6469dabf 357 # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
358 # and computed columns)
057db5ce 359 if (not %$to_insert) {
360 for my $col ($source->columns) {
361 next if $col eq $identity_col;
6469dabf 362
363 my $info = $source->column_info($col);
364
365 next if ref $info->{default_value} eq 'SCALAR'
366 || (exists $info->{data_type} && (not defined $info->{data_type}));
367
368 next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
369
057db5ce 370 $to_insert->{$col} = \'DEFAULT';
371 }
372 }
373
374 my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
375
376 # do we need the horrific SELECT MAX(COL) hack?
377 my $dumb_last_insert_id =
378 $identity_col
379 && (not exists $to_insert->{$identity_col})
380 && ($self->_identity_method||'') ne '@@IDENTITY';
381
382 my $next = $self->next::can;
383
384 # we are already in a transaction, or there are no blobs
385 # and we don't need the PK - just (try to) do it
386 if ($self->{transaction_depth}
387 || (!$blob_cols && !$dumb_last_insert_id)
388 ) {
389 return $self->_insert (
390 $next, $source, $to_insert, $blob_cols, $identity_col
391 );
392 }
393
394 # otherwise use the _writer_storage to do the insert+transaction on another
395 # connection
396 my $guard = $self->_writer_storage->txn_scope_guard;
397
398 my $updated_cols = $self->_writer_storage->_insert (
399 $next, $source, $to_insert, $blob_cols, $identity_col
400 );
401
402 $self->_identity($self->_writer_storage->_identity);
403
404 $guard->commit;
405
406 return $updated_cols;
407}
408
409sub _insert {
410 my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
411
412 my $updated_cols = $self->$next ($source, $to_insert);
413
414 my $final_row = {
415 ($identity_col ?
416 ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
417 %$to_insert,
418 %$updated_cols,
419 };
420
421 $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
422
423 return $updated_cols;
424}
425
426sub update {
427 my $self = shift;
428 my ($source, $fields, $where, @rest) = @_;
429
057db5ce 430 my $blob_cols = $self->_remove_blob_cols($source, $fields);
431
432 my $table = $source->name;
433
e366f807 434 my $columns_info = $source->columns_info;
435
6298a324 436 my $identity_col =
e366f807 437 first { $columns_info->{$_}{is_auto_increment} }
438 keys %$columns_info;
057db5ce 439
440 my $is_identity_update = $identity_col && defined $fields->{$identity_col};
441
442 return $self->next::method(@_) unless $blob_cols;
443
444# If there are any blobs in $where, Sybase will return a descriptive error
445# message.
446# XXX blobs can still be used with a LIKE query, and this should be handled.
447
448# update+blob update(s) done atomically on separate connection
449 $self = $self->_writer_storage;
450
451 my $guard = $self->txn_scope_guard;
452
453# First update the blob columns to be updated to '' (taken from $fields, where
454# it is originally put by _remove_blob_cols .)
455 my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
456
457# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
458
459 $self->next::method($source, \%blobs_to_empty, $where, @rest);
460
461# Now update the blobs before the other columns in case the update of other
462# columns makes the search condition invalid.
463 $self->_update_blobs($source, $blob_cols, $where);
464
465 my @res;
466 if (%$fields) {
cca282b6 467 if (wantarray) {
057db5ce 468 @res = $self->next::method(@_);
469 }
cca282b6 470 elsif (defined wantarray) {
057db5ce 471 $res[0] = $self->next::method(@_);
472 }
473 else {
474 $self->next::method(@_);
475 }
476 }
477
478 $guard->commit;
479
cca282b6 480 return wantarray ? @res : $res[0];
057db5ce 481}
482
483sub insert_bulk {
484 my $self = shift;
485 my ($source, $cols, $data) = @_;
486
e366f807 487 my $columns_info = $source->columns_info;
488
6298a324 489 my $identity_col =
e366f807 490 first { $columns_info->{$_}{is_auto_increment} }
491 keys %$columns_info;
057db5ce 492
6298a324 493 my $is_identity_insert = (first { $_ eq $identity_col } @{$cols}) ? 1 : 0;
057db5ce 494
495 my @source_columns = $source->columns;
496
497 my $use_bulk_api =
498 $self->_bulk_storage &&
499 $self->_get_dbh->{syb_has_blk};
500
501 if ((not $use_bulk_api)
502 &&
503 (ref($self->_dbi_connect_info->[0]) eq 'CODE')
504 &&
505 (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
506 carp <<'EOF';
507Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
508regular array inserts.
509EOF
510 $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
511 }
512
513 if (not $use_bulk_api) {
514 my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
515
516# _execute_array uses a txn anyway, but it ends too early in case we need to
517# select max(col) to get the identity for inserting blobs.
518 ($self, my $guard) = $self->{transaction_depth} == 0 ?
519 ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
520 :
521 ($self, undef);
522
523 local $self->{insert_bulk} = 1;
524
525 $self->next::method(@_);
526
527 if ($blob_cols) {
528 if ($is_identity_insert) {
529 $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
530 }
531 else {
532 my @cols_with_identities = (@$cols, $identity_col);
533
534 ## calculate identities
535 # XXX This assumes identities always increase by 1, which may or may not
536 # be true.
537 my ($last_identity) =
538 $self->_dbh->selectrow_array (
539 $self->_fetch_identity_sql($source, $identity_col)
540 );
541 my @identities = (($last_identity - @$data + 1) .. $last_identity);
542
543 my @data_with_identities = map [@$_, shift @identities], @$data;
544
545 $self->_insert_blobs_array (
546 $source, $blob_cols, \@cols_with_identities, \@data_with_identities
547 );
548 }
549 }
550
551 $guard->commit if $guard;
552
553 return;
554 }
555
556# otherwise, use the bulk API
557
558# rearrange @$data so that columns are in database order
559 my %orig_idx;
560 @orig_idx{@$cols} = 0..$#$cols;
561
562 my %new_idx;
563 @new_idx{@source_columns} = 0..$#source_columns;
564
565 my @new_data;
566 for my $datum (@$data) {
567 my $new_datum = [];
568 for my $col (@source_columns) {
569# identity data will be 'undef' if not $is_identity_insert
570# columns with defaults will also be 'undef'
571 $new_datum->[ $new_idx{$col} ] =
572 exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
573 }
574 push @new_data, $new_datum;
575 }
576
577# bcp identity index is 1-based
578 my $identity_idx = exists $new_idx{$identity_col} ?
579 $new_idx{$identity_col} + 1 : 0;
580
581## Set a client-side conversion error handler, straight from DBD::Sybase docs.
582# This ignores any data conversion errors detected by the client side libs, as
583# they are usually harmless.
584 my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
585 Sub::Name::subname insert_bulk => sub {
586 my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
587
588 return 1 if $errno == 36;
589
590 carp
591 "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
592 ($errmsg ? "\n$errmsg" : '') .
593 ($osmsg ? "\n$osmsg" : '') .
594 ($blkmsg ? "\n$blkmsg" : '');
595
596 return 0;
597 });
598
4edfce2f 599 my $exception = '';
ed7ab0f4 600 try {
057db5ce 601 my $bulk = $self->_bulk_storage;
602
603 my $guard = $bulk->txn_scope_guard;
604
605## XXX get this to work instead of our own $sth
606## will require SQLA or *Hacks changes for ordered columns
607# $bulk->next::method($source, \@source_columns, \@new_data, {
608# syb_bcp_attribs => {
609# identity_flag => $is_identity_insert,
610# identity_column => $identity_idx,
611# }
612# });
613 my $sql = 'INSERT INTO ' .
614 $bulk->sql_maker->_quote($source->name) . ' (' .
615# colname list is ignored for BCP, but does no harm
616 (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
617 ' VALUES ('. (join ', ', ('?') x @source_columns) . ')';
618
619## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
620## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
621## better yet the version above. Should be fixed in DBD::Sybase .
622 my $sth = $bulk->_get_dbh->prepare($sql,
623# 'insert', # op
624 {
625 syb_bcp_attribs => {
626 identity_flag => $is_identity_insert,
627 identity_column => $identity_idx,
628 }
629 }
630 );
631
0e773352 632 my @bind = map { [ $source_columns[$_] => $_ ] } (0 .. $#source_columns);
057db5ce 633
634 $self->_execute_array(
635 $source, $sth, \@bind, \@source_columns, \@new_data, sub {
636 $guard->commit
637 }
638 );
639
640 $bulk->_query_end($sql);
ed7ab0f4 641 } catch {
642 $exception = shift;
057db5ce 643 };
644
057db5ce 645 DBD::Sybase::set_cslib_cb($orig_cslib_cb);
646
647 if ($exception =~ /-Y option/) {
f32e99f9 648 my $w = 'Sybase bulk API operation failed due to character set incompatibility, '
649 . 'reverting to regular array inserts. Try unsetting the LANG environment variable'
650 ;
651 $w .= "\n$exception" if $self->debug;
652 carp $w;
057db5ce 653
057db5ce 654 $self->_bulk_storage(undef);
655 unshift @_, $self;
656 goto \&insert_bulk;
657 }
658 elsif ($exception) {
659# rollback makes the bulkLogin connection unusable
660 $self->_bulk_storage->disconnect;
661 $self->throw_exception($exception);
662 }
663}
664
665sub _dbh_execute_array {
666 my ($self, $sth, $tuple_status, $cb) = @_;
667
668 my $rv = $self->next::method($sth, $tuple_status);
669 $cb->() if $cb;
670
671 return $rv;
672}
673
674# Make sure blobs are not bound as placeholders, and return any non-empty ones
675# as a hash.
676sub _remove_blob_cols {
677 my ($self, $source, $fields) = @_;
678
679 my %blob_cols;
680
681 for my $col (keys %$fields) {
682 if ($self->_is_lob_column($source, $col)) {
683 my $blob_val = delete $fields->{$col};
684 if (not defined $blob_val) {
685 $fields->{$col} = \'NULL';
686 }
687 else {
688 $fields->{$col} = \"''";
689 $blob_cols{$col} = $blob_val unless $blob_val eq '';
690 }
691 }
692 }
693
694 return %blob_cols ? \%blob_cols : undef;
695}
696
697# same for insert_bulk
698sub _remove_blob_cols_array {
699 my ($self, $source, $cols, $data) = @_;
700
701 my @blob_cols;
702
703 for my $i (0..$#$cols) {
704 my $col = $cols->[$i];
705
706 if ($self->_is_lob_column($source, $col)) {
707 for my $j (0..$#$data) {
708 my $blob_val = delete $data->[$j][$i];
709 if (not defined $blob_val) {
710 $data->[$j][$i] = \'NULL';
711 }
712 else {
713 $data->[$j][$i] = \"''";
714 $blob_cols[$j][$i] = $blob_val
715 unless $blob_val eq '';
716 }
717 }
718 }
719 }
720
721 return @blob_cols ? \@blob_cols : undef;
722}
723
724sub _update_blobs {
725 my ($self, $source, $blob_cols, $where) = @_;
726
9780718f 727 my @primary_cols = try
728 { $source->_pri_cols }
729 catch {
730 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
731 };
057db5ce 732
564986d6 733 my @pks_to_update;
734 if (
735 ref $where eq 'HASH'
736 and
737 @primary_cols == grep { defined $where->{$_} } @primary_cols
738 ) {
057db5ce 739 my %row_to_update;
740 @row_to_update{@primary_cols} = @{$where}{@primary_cols};
564986d6 741 @pks_to_update = \%row_to_update;
742 }
743 else {
057db5ce 744 my $cursor = $self->select ($source, \@primary_cols, $where, {});
564986d6 745 @pks_to_update = map {
057db5ce 746 my %row; @row{@primary_cols} = @$_; \%row
747 } $cursor->all;
748 }
749
564986d6 750 for my $ident (@pks_to_update) {
751 $self->_insert_blobs($source, $blob_cols, $ident);
057db5ce 752 }
753}
754
755sub _insert_blobs {
756 my ($self, $source, $blob_cols, $row) = @_;
757 my $dbh = $self->_get_dbh;
758
759 my $table = $source->name;
760
761 my %row = %$row;
9780718f 762 my @primary_cols = try
763 { $source->_pri_cols }
764 catch {
765 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
766 };
057db5ce 767
768 $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
769 if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
770
771 for my $col (keys %$blob_cols) {
772 my $blob = $blob_cols->{$col};
773
774 my %where = map { ($_, $row{$_}) } @primary_cols;
775
776 my $cursor = $self->select ($source, [$col], \%where, {});
777 $cursor->next;
778 my $sth = $cursor->sth;
779
780 if (not $sth) {
057db5ce 781 $self->throw_exception(
782 "Could not find row in table '$table' for blob update:\n"
6298a324 783 . (Dumper \%where)
057db5ce 784 );
785 }
786
9780718f 787 try {
057db5ce 788 do {
789 $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
790 } while $sth->fetch;
791
792 $sth->func('ct_prepare_send') or die $sth->errstr;
793
794 my $log_on_update = $self->_blob_log_on_update;
795 $log_on_update = 1 if not defined $log_on_update;
796
797 $sth->func('CS_SET', 1, {
798 total_txtlen => length($blob),
799 log_on_update => $log_on_update
800 }, 'ct_data_info') or die $sth->errstr;
801
802 $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
803
804 $sth->func('ct_finish_send') or die $sth->errstr;
9780718f 805 }
806 catch {
057db5ce 807 if ($self->using_freetds) {
808 $self->throw_exception (
9780718f 809 "TEXT/IMAGE operation failed, probably because you are using FreeTDS: $_"
057db5ce 810 );
9780718f 811 }
812 else {
813 $self->throw_exception($_);
057db5ce 814 }
815 }
9780718f 816 finally {
817 $sth->finish if $sth;
818 };
057db5ce 819 }
820}
821
822sub _insert_blobs_array {
823 my ($self, $source, $blob_cols, $cols, $data) = @_;
824
825 for my $i (0..$#$data) {
826 my $datum = $data->[$i];
827
828 my %row;
829 @row{ @$cols } = @$datum;
830
831 my %blob_vals;
832 for my $j (0..$#$cols) {
833 if (exists $blob_cols->[$i][$j]) {
834 $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
835 }
836 }
837
838 $self->_insert_blobs ($source, \%blob_vals, \%row);
839 }
840}
841
842=head2 connect_call_datetime_setup
843
844Used as:
845
846 on_connect_call => 'datetime_setup'
847
8384a713 848In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
057db5ce 849
850 $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
851 $dbh->do('set dateformat mdy'); # input fmt: 08/13/1979 18:08:55.080
852
853On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
854L<DateTime::Format::Sybase>, which you will need to install.
855
c1e5a9ac 856This works for both C<DATETIME> and C<SMALLDATETIME> columns, note that
057db5ce 857C<SMALLDATETIME> columns only have minute precision.
858
859=cut
860
70c28808 861sub connect_call_datetime_setup {
862 my $self = shift;
863 my $dbh = $self->_get_dbh;
057db5ce 864
70c28808 865 if ($dbh->can('syb_date_fmt')) {
866 # amazingly, this works with FreeTDS
867 $dbh->syb_date_fmt('ISO_strict');
868 }
869 else {
870 carp_once
871 'Your DBD::Sybase is too old to support '
872 .'DBIx::Class::InflateColumn::DateTime, please upgrade!';
057db5ce 873
874 $dbh->do('SET DATEFORMAT mdy');
057db5ce 875 1;
876 }
877}
878
057db5ce 879
90d7422f 880sub _exec_txn_begin {
057db5ce 881 my $self = shift;
882
883# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
884# TRAN once. However, we need to make sure there's a $dbh.
885 return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
886
887 $self->next::method(@_);
888
057db5ce 889 $self->_began_bulk_work(1) if $self->_is_bulk_storage;
890}
891
057db5ce 892# savepoint support using ASE syntax
893
90d7422f 894sub _exec_svp_begin {
057db5ce 895 my ($self, $name) = @_;
896
90d7422f 897 $self->_dbh->do("SAVE TRANSACTION $name");
057db5ce 898}
899
900# A new SAVE TRANSACTION with the same name releases the previous one.
90d7422f 901sub _exec_svp_release { 1 }
057db5ce 902
90d7422f 903sub _exec_svp_rollback {
057db5ce 904 my ($self, $name) = @_;
905
90d7422f 906 $self->_dbh->do("ROLLBACK TRANSACTION $name");
057db5ce 907}
908
9091;
910
911=head1 Schema::Loader Support
912
290da7d6 913As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
c1e5a9ac 914most versions of Sybase ASE.
057db5ce 915
916=head1 FreeTDS
917
918This driver supports L<DBD::Sybase> compiled against FreeTDS
919(L<http://www.freetds.org/>) to the best of our ability, however it is
920recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
921libraries. They are a part of the Sybase ASE distribution:
922
923The Open Client FAQ is here:
924L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
925
926Sybase ASE for Linux (which comes with the Open Client libraries) may be
927downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
928
929To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
930
931 perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
932
c1e5a9ac 933It is recommended to set C<tds version> for your ASE server to C<5.0> in
934C</etc/freetds/freetds.conf>.
935
936Some versions or configurations of the libraries involved will not support
937placeholders, in which case the storage will be reblessed to
057db5ce 938L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
939
940In some configurations, placeholders will work but will throw implicit type
941conversion errors for anything that's not expecting a string. In such a case,
942the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
943automatically set, which you may enable on connection with
c1e5a9ac 944L<connect_call_set_auto_cast|DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>.
945The type info for the C<CAST>s is taken from the
946L<DBIx::Class::ResultSource/data_type> definitions in your Result classes, and
947are mapped to a Sybase type (if it isn't already) using a mapping based on
948L<SQL::Translator>.
057db5ce 949
48580715 950In other configurations, placeholders will work just as they do with the Sybase
057db5ce 951Open Client libraries.
952
953Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
954
955=head1 INSERTS WITH PLACEHOLDERS
956
957With placeholders enabled, inserts are done in a transaction so that there are
958no concurrency issues with getting the inserted identity value using
959C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
960mode.
961
962In addition, they are done on a separate connection so that it's possible to
963have active cursors when doing an insert.
964
965When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
c1e5a9ac 966are unnecessary and not used, as there are no concurrency issues with C<SELECT
967@@IDENTITY> which is a session variable.
057db5ce 968
969=head1 TRANSACTIONS
970
c1e5a9ac 971Due to limitations of the TDS protocol and L<DBD::Sybase>, you cannot begin a
972transaction while there are active cursors, nor can you use multiple active
973cursors within a transaction. An active cursor is, for example, a
057db5ce 974L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
975C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
976
977For example, this will not work:
978
979 $schema->txn_do(sub {
980 my $rs = $schema->resultset('Book');
981 while (my $row = $rs->next) {
982 $schema->resultset('MetaData')->create({
983 book_id => $row->id,
984 ...
985 });
986 }
987 });
988
989This won't either:
990
991 my $first_row = $large_rs->first;
992 $schema->txn_do(sub { ... });
993
994Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
995are not affected, as they are done on an extra database handle.
996
997Some workarounds:
998
999=over 4
1000
1001=item * use L<DBIx::Class::Storage::DBI::Replicated>
1002
1003=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
1004
1005=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
1006
1007=back
1008
1009=head1 MAXIMUM CONNECTIONS
1010
1011The TDS protocol makes separate connections to the server for active statements
1012in the background. By default the number of such connections is limited to 25,
1013on both the client side and the server side.
1014
1015This is a bit too low for a complex L<DBIx::Class> application, so on connection
1016the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
1017can override it to whatever setting you like in the DSN.
1018
1019See
1020L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
1021for information on changing the setting on the server side.
1022
1023=head1 DATES
1024
1025See L</connect_call_datetime_setup> to setup date formats
1026for L<DBIx::Class::InflateColumn::DateTime>.
1027
1028=head1 TEXT/IMAGE COLUMNS
1029
1030L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
1031C<TEXT/IMAGE> columns.
1032
1033Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
1034
1035 $schema->storage->dbh->do("SET TEXTSIZE $bytes");
1036
1037or
1038
1039 $schema->storage->set_textsize($bytes);
1040
1041instead.
1042
1043However, the C<LongReadLen> you pass in
8384a713 1044L<connect_info|DBIx::Class::Storage::DBI/connect_info> is used to execute the
1045equivalent C<SET TEXTSIZE> command on connection.
057db5ce 1046
8384a713 1047See L</connect_call_blob_setup> for a
1048L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting you need to work
1049with C<IMAGE> columns.
057db5ce 1050
1051=head1 BULK API
1052
1053The experimental L<DBD::Sybase> Bulk API support is used for
1054L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
1055on a separate connection.
1056
1057To use this feature effectively, use a large number of rows for each
1058L<populate|DBIx::Class::ResultSet/populate> call, eg.:
1059
1060 while (my $rows = $data_source->get_100_rows()) {
1061 $rs->populate($rows);
1062 }
1063
1064B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
1065calls in your C<Result> classes B<must> list columns in database order for this
1066to work. Also, you may have to unset the C<LANG> environment variable before
c1e5a9ac 1067loading your app, as C<BCP -Y> is not yet supported in DBD::Sybase .
057db5ce 1068
1069When inserting IMAGE columns using this method, you'll need to use
1070L</connect_call_blob_setup> as well.
1071
6476fd66 1072=head1 COMPUTED COLUMNS
1073
1074If you have columns such as:
1075
1076 created_dtm AS getdate()
1077
1078represent them in your Result classes as:
1079
1080 created_dtm => {
1081 data_type => undef,
1082 default_value => \'getdate()',
1083 is_nullable => 0,
c1e5a9ac 1084 inflate_datetime => 1,
6476fd66 1085 }
1086
1087The C<data_type> must exist and must be C<undef>. Then empty inserts will work
1088on tables with such columns.
1089
1090=head1 TIMESTAMP COLUMNS
1091
1092C<timestamp> columns in Sybase ASE are not really timestamps, see:
1093L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
1094
1095They should be defined in your Result classes as:
1096
1097 ts => {
1098 data_type => 'timestamp',
1099 is_nullable => 0,
1100 inflate_datetime => 0,
1101 }
1102
1103The C<<inflate_datetime => 0>> is necessary if you use
1104L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
1105be able to read these values.
1106
1107The values will come back as hexadecimal.
1108
057db5ce 1109=head1 TODO
1110
1111=over
1112
1113=item *
1114
1115Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
1116any active cursors, using eager cursors.
1117
1118=item *
1119
1120Real limits and limited counts using stored procedures deployed on startup.
1121
1122=item *
1123
057db5ce 1124Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
1125
1126=item *
1127
1128bulk_insert using prepare_cached (see comments.)
1129
1130=back
1131
1132=head1 AUTHOR
1133
8138f96a 1134See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
057db5ce 1135
1136=head1 LICENSE
1137
1138You may distribute this code under the same terms as Perl itself.
1139
1140=cut
1141# vim:sts=2 sw=2: