Multiple cleanups to accommodate broken FreeTDS driver
[dbsrgits/DBIx-Class.git] / lib / DBIx / Class / Storage / DBI / Sybase / ASE.pm
CommitLineData
057db5ce 1package DBIx::Class::Storage::DBI::Sybase::ASE;
2
3use strict;
4use warnings;
5
6use base qw/
fabbd5cc 7 DBIx::Class::Storage::DBI::Sybase
8 DBIx::Class::Storage::DBI::AutoCast
9 DBIx::Class::Storage::DBI::IdentityInsert
057db5ce 10/;
11use mro 'c3';
70c28808 12use DBIx::Class::Carp;
fabbd5cc 13use Scalar::Util qw/blessed weaken/;
6298a324 14use List::Util 'first';
057db5ce 15use Sub::Name();
6298a324 16use Data::Dumper::Concise 'Dumper';
ed7ab0f4 17use Try::Tiny;
fabbd5cc 18use Context::Preserve 'preserve_context';
fd323bf1 19use namespace::clean;
057db5ce 20
6a247f33 21__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ');
2b8cc2f2 22__PACKAGE__->sql_quote_char ([qw/[ ]/]);
c6b7885f 23__PACKAGE__->datetime_parser_type(
24 'DBIx::Class::Storage::DBI::Sybase::ASE::DateTime::Format'
25);
6a247f33 26
057db5ce 27__PACKAGE__->mk_group_accessors('simple' =>
fabbd5cc 28 qw/_identity _identity_method _blob_log_on_update _parent_storage
29 _writer_storage _is_writer_storage
057db5ce 30 _bulk_storage _is_bulk_storage _began_bulk_work
fabbd5cc 31 /
057db5ce 32);
33
deabd575 34
057db5ce 35my @also_proxy_to_extra_storages = qw/
36 connect_call_set_auto_cast auto_cast connect_call_blob_setup
37 connect_call_datetime_setup
38
39 disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
40 auto_savepoint unsafe cursor_class debug debugobj schema
41/;
42
43=head1 NAME
44
45DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
46DBIx::Class
47
48=head1 SYNOPSIS
49
50This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
51
52=head1 DESCRIPTION
53
54If your version of Sybase does not support placeholders, then your storage will
55be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
56You can also enable that driver explicitly, see the documentation for more
57details.
58
59With this driver there is unfortunately no way to get the C<last_insert_id>
60without doing a C<SELECT MAX(col)>. This is done safely in a transaction
61(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
62
8384a713 63A recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting:
057db5ce 64
65 on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
66
67=head1 METHODS
68
69=cut
70
71sub _rebless {
72 my $self = shift;
73
74 my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
75
76 if ($self->using_freetds) {
70171cd7 77 carp_once <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
057db5ce 78
79You are using FreeTDS with Sybase.
80
81We will do our best to support this configuration, but please consider this
82support experimental.
83
84TEXT/IMAGE columns will definitely not work.
85
86You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
87instead.
88
89See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
90
91To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
92variable.
93EOF
94
bbdda281 95 if (not $self->_use_typeless_placeholders) {
96 if ($self->_use_placeholders) {
057db5ce 97 $self->auto_cast(1);
98 }
99 else {
100 $self->ensure_class_loaded($no_bind_vars);
101 bless $self, $no_bind_vars;
102 $self->_rebless;
103 }
104 }
105 }
106
107 elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
108 # not necessarily FreeTDS, but no placeholders nevertheless
109 $self->ensure_class_loaded($no_bind_vars);
110 bless $self, $no_bind_vars;
111 $self->_rebless;
112 }
113 # this is highly unlikely, but we check just in case
bbdda281 114 elsif (not $self->_use_typeless_placeholders) {
057db5ce 115 $self->auto_cast(1);
116 }
117}
118
119sub _init {
120 my $self = shift;
121 $self->_set_max_connect(256);
122
123# create storage for insert/(update blob) transactions,
124# unless this is that storage
fabbd5cc 125 return if $self->_parent_storage;
057db5ce 126
127 my $writer_storage = (ref $self)->new;
128
fabbd5cc 129 $writer_storage->_is_writer_storage(1); # just info
057db5ce 130 $writer_storage->connect_info($self->connect_info);
131 $writer_storage->auto_cast($self->auto_cast);
132
fabbd5cc 133 weaken ($writer_storage->{_parent_storage} = $self);
057db5ce 134 $self->_writer_storage($writer_storage);
135
136# create a bulk storage unless connect_info is a coderef
137 return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
138
139 my $bulk_storage = (ref $self)->new;
140
057db5ce 141 $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
142 $bulk_storage->connect_info($self->connect_info);
143
144# this is why
145 $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
146
fabbd5cc 147 weaken ($bulk_storage->{_parent_storage} = $self);
057db5ce 148 $self->_bulk_storage($bulk_storage);
149}
150
151for my $method (@also_proxy_to_extra_storages) {
152 no strict 'refs';
153 no warnings 'redefine';
154
155 my $replaced = __PACKAGE__->can($method);
156
157 *{$method} = Sub::Name::subname $method => sub {
158 my $self = shift;
159 $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
160 $self->_bulk_storage->$replaced(@_) if $self->_bulk_storage;
161 return $self->$replaced(@_);
162 };
163}
164
165sub disconnect {
166 my $self = shift;
167
168# Even though we call $sth->finish for uses off the bulk API, there's still an
169# "active statement" warning on disconnect, which we throw away here.
170# This is due to the bug described in insert_bulk.
171# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
172 local $SIG{__WARN__} = sub {
173 warn $_[0] unless $_[0] =~ /active statement/i;
174 } if $self->_is_bulk_storage;
175
176# so that next transaction gets a dbh
177 $self->_began_bulk_work(0) if $self->_is_bulk_storage;
178
179 $self->next::method;
180}
181
c1e5a9ac 182# This is only invoked for FreeTDS drivers by ::Storage::DBI::Sybase::FreeTDS
183sub _set_autocommit_stmt {
184 my ($self, $on) = @_;
185
186 return 'SET CHAINED ' . ($on ? 'OFF' : 'ON');
187}
188
057db5ce 189# Set up session settings for Sybase databases for the connection.
190#
191# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
192# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
193# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
194# only want when AutoCommit is off.
057db5ce 195sub _run_connection_actions {
196 my $self = shift;
197
198 if ($self->_is_bulk_storage) {
c1e5a9ac 199 # this should be cleared on every reconnect
057db5ce 200 $self->_began_bulk_work(0);
201 return;
202 }
203
c1e5a9ac 204 $self->_dbh->{syb_chained_txn} = 1
205 unless $self->using_freetds;
057db5ce 206
207 $self->next::method(@_);
208}
209
210=head2 connect_call_blob_setup
211
212Used as:
213
214 on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
215
216Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
217instead of as a hex string.
218
219Recommended.
220
221Also sets the C<log_on_update> value for blob write operations. The default is
222C<1>, but C<0> is better if your database is configured for it.
223
224See
225L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
226
227=cut
228
229sub connect_call_blob_setup {
230 my $self = shift;
231 my %args = @_;
232 my $dbh = $self->_dbh;
233 $dbh->{syb_binary_images} = 1;
234
235 $self->_blob_log_on_update($args{log_on_update})
236 if exists $args{log_on_update};
237}
238
057db5ce 239sub _is_lob_column {
240 my ($self, $source, $column) = @_;
241
242 return $self->_is_lob_type($source->column_info($column)->{data_type});
243}
244
245sub _prep_for_execute {
246 my $self = shift;
fabbd5cc 247 my ($op, $ident) = @_;
248
249 #
250### This is commented out because all tests pass. However I am leaving it
251### here as it may prove necessary (can't think through all combinations)
252### BTW it doesn't currently work exactly - need better sensitivity to
253 # currently set value
254 #
255 # inherit these from the parent for the duration of _prep_for_execute
256 # Don't know how to make a localizing loop with if's, otherwise I would
257 #local $self->{_autoinc_supplied_for_op}
258 # = $self->_parent_storage->_autoinc_supplied_for_op
259 #if ($op eq 'insert' or $op eq 'update') and $self->_parent_storage;
260 #local $self->{_perform_autoinc_retrieval}
261 # = $self->_parent_storage->_perform_autoinc_retrieval
262 #if ($op eq 'insert' or $op eq 'update') and $self->_parent_storage;
057db5ce 263
264 my ($sql, $bind) = $self->next::method (@_);
265
fabbd5cc 266 if (my $identity_col = $self->_perform_autoinc_retrieval) {
267 $sql .= "\n" . $self->_fetch_identity_sql($ident, $identity_col)
057db5ce 268 }
269
270 return ($sql, $bind);
271}
272
fabbd5cc 273sub _fetch_identity_sql {
274 my ($self, $source, $col) = @_;
057db5ce 275
fabbd5cc 276 return sprintf ("SELECT MAX(%s) FROM %s",
277 map { $self->sql_maker->_quote ($_) } ($col, $source->from)
278 );
057db5ce 279}
280
281# Stolen from SQLT, with some modifications. This is a makeshift
282# solution before a sane type-mapping library is available, thus
283# the 'our' for easy overrides.
284our %TYPE_MAPPING = (
285 number => 'numeric',
286 money => 'money',
287 varchar => 'varchar',
288 varchar2 => 'varchar',
289 timestamp => 'datetime',
290 text => 'varchar',
291 real => 'double precision',
292 comment => 'text',
293 bit => 'bit',
294 tinyint => 'smallint',
295 float => 'double precision',
296 serial => 'numeric',
297 bigserial => 'numeric',
298 boolean => 'varchar',
299 long => 'varchar',
300);
301
302sub _native_data_type {
303 my ($self, $type) = @_;
304
305 $type = lc $type;
306 $type =~ s/\s* identity//x;
307
308 return uc($TYPE_MAPPING{$type} || $type);
309}
310
057db5ce 311
312sub _execute {
313 my $self = shift;
314 my ($op) = @_;
315
0e773352 316 my ($rv, $sth, @bind) = $self->next::method(@_);
057db5ce 317
fabbd5cc 318 $self->_identity( ($sth->fetchall_arrayref)[0][0] )
319 if $self->_perform_autoinc_retrieval;
057db5ce 320
321 return wantarray ? ($rv, $sth, @bind) : $rv;
322}
323
324sub last_insert_id { shift->_identity }
325
326# handles TEXT/IMAGE and transaction for last_insert_id
327sub insert {
328 my $self = shift;
329 my ($source, $to_insert) = @_;
330
e366f807 331 my $columns_info = $source->columns_info;
332
6298a324 333 my $identity_col =
e366f807 334 (first { $columns_info->{$_}{is_auto_increment} }
335 keys %$columns_info )
6298a324 336 || '';
057db5ce 337
fabbd5cc 338 # FIXME - this is duplication from DBI.pm. When refactored towards
339 # the LobWriter this can be folded back where it belongs.
340 local $self->{_autoinc_supplied_for_op} = exists $to_insert->{$identity_col}
341 ? 1
342 : 0
343 ;
344 local $self->{_perform_autoinc_retrieval} =
345 ($identity_col and ! exists $to_insert->{$identity_col})
346 ? $identity_col
347 : undef
348 ;
349
057db5ce 350 # check for empty insert
351 # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
6469dabf 352 # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
353 # and computed columns)
057db5ce 354 if (not %$to_insert) {
355 for my $col ($source->columns) {
356 next if $col eq $identity_col;
6469dabf 357
358 my $info = $source->column_info($col);
359
360 next if ref $info->{default_value} eq 'SCALAR'
361 || (exists $info->{data_type} && (not defined $info->{data_type}));
362
363 next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
364
057db5ce 365 $to_insert->{$col} = \'DEFAULT';
366 }
367 }
368
369 my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
370
371 # do we need the horrific SELECT MAX(COL) hack?
fabbd5cc 372 my $need_dumb_last_insert_id = (
373 $self->_perform_autoinc_retrieval
374 &&
375 ($self->_identity_method||'') ne '@@IDENTITY'
376 );
057db5ce 377
378 my $next = $self->next::can;
379
380 # we are already in a transaction, or there are no blobs
381 # and we don't need the PK - just (try to) do it
382 if ($self->{transaction_depth}
fabbd5cc 383 || (!$blob_cols && !$need_dumb_last_insert_id)
057db5ce 384 ) {
385 return $self->_insert (
386 $next, $source, $to_insert, $blob_cols, $identity_col
387 );
388 }
389
390 # otherwise use the _writer_storage to do the insert+transaction on another
391 # connection
392 my $guard = $self->_writer_storage->txn_scope_guard;
393
394 my $updated_cols = $self->_writer_storage->_insert (
395 $next, $source, $to_insert, $blob_cols, $identity_col
396 );
397
398 $self->_identity($self->_writer_storage->_identity);
399
400 $guard->commit;
401
402 return $updated_cols;
403}
404
405sub _insert {
406 my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
407
408 my $updated_cols = $self->$next ($source, $to_insert);
409
410 my $final_row = {
411 ($identity_col ?
412 ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
413 %$to_insert,
414 %$updated_cols,
415 };
416
417 $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
418
419 return $updated_cols;
420}
421
422sub update {
423 my $self = shift;
424 my ($source, $fields, $where, @rest) = @_;
425
fabbd5cc 426 #
427 # When *updating* identities, ASE requires SET IDENTITY_UPDATE called
428 #
429 if (my $blob_cols = $self->_remove_blob_cols($source, $fields)) {
057db5ce 430
fabbd5cc 431 # If there are any blobs in $where, Sybase will return a descriptive error
432 # message.
433 # XXX blobs can still be used with a LIKE query, and this should be handled.
057db5ce 434
fabbd5cc 435 # update+blob update(s) done atomically on separate connection
436 $self = $self->_writer_storage;
057db5ce 437
fabbd5cc 438 my $guard = $self->txn_scope_guard;
057db5ce 439
fabbd5cc 440 # First update the blob columns to be updated to '' (taken from $fields, where
441 # it is originally put by _remove_blob_cols .)
442 my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
057db5ce 443
fabbd5cc 444 # We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
445 $self->next::method($source, \%blobs_to_empty, $where, @rest);
057db5ce 446
fabbd5cc 447 # Now update the blobs before the other columns in case the update of other
448 # columns makes the search condition invalid.
449 my $rv = $self->_update_blobs($source, $blob_cols, $where);
057db5ce 450
fabbd5cc 451 if (keys %$fields) {
057db5ce 452
fabbd5cc 453 # Now set the identity update flags for the actual update
454 local $self->{_autoinc_supplied_for_op} = (first
455 { $_->{is_auto_increment} }
456 values %{ $source->columns_info([ keys %$fields ]) }
457 ) ? 1 : 0;
057db5ce 458
fabbd5cc 459 my $next = $self->next::can;
460 my $args = \@_;
461 return preserve_context {
462 $self->$next(@$args);
463 } after => sub { $guard->commit };
057db5ce 464 }
465 else {
fabbd5cc 466 $guard->commit;
467 return $rv;
057db5ce 468 }
469 }
fabbd5cc 470 else {
471 # Set the identity update flags for the actual update
472 local $self->{_autoinc_supplied_for_op} = (first
473 { $_->{is_auto_increment} }
474 values %{ $source->columns_info([ keys %$fields ]) }
475 ) ? 1 : 0;
476
477 return $self->next::method(@_);
478 }
057db5ce 479}
480
481sub insert_bulk {
482 my $self = shift;
483 my ($source, $cols, $data) = @_;
484
e366f807 485 my $columns_info = $source->columns_info;
486
6298a324 487 my $identity_col =
e366f807 488 first { $columns_info->{$_}{is_auto_increment} }
489 keys %$columns_info;
057db5ce 490
fabbd5cc 491 # FIXME - this is duplication from DBI.pm. When refactored towards
492 # the LobWriter this can be folded back where it belongs.
493 local $self->{_autoinc_supplied_for_op} =
494 (first { $_ eq $identity_col } @$cols)
495 ? 1
496 : 0
497 ;
057db5ce 498
057db5ce 499 my $use_bulk_api =
500 $self->_bulk_storage &&
501 $self->_get_dbh->{syb_has_blk};
502
6d5679b2 503 if (! $use_bulk_api and ref($self->_dbi_connect_info->[0]) eq 'CODE') {
504 carp_unique( join ' ',
505 'Bulk API support disabled due to use of a CODEREF connect_info.',
506 'Reverting to regular array inserts.',
507 );
057db5ce 508 }
509
510 if (not $use_bulk_api) {
511 my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
512
52cef7e3 513# next::method uses a txn anyway, but it ends too early in case we need to
057db5ce 514# select max(col) to get the identity for inserting blobs.
515 ($self, my $guard) = $self->{transaction_depth} == 0 ?
516 ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
517 :
518 ($self, undef);
519
057db5ce 520 $self->next::method(@_);
521
522 if ($blob_cols) {
fabbd5cc 523 if ($self->_autoinc_supplied_for_op) {
057db5ce 524 $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
525 }
526 else {
527 my @cols_with_identities = (@$cols, $identity_col);
528
529 ## calculate identities
530 # XXX This assumes identities always increase by 1, which may or may not
531 # be true.
532 my ($last_identity) =
533 $self->_dbh->selectrow_array (
534 $self->_fetch_identity_sql($source, $identity_col)
535 );
536 my @identities = (($last_identity - @$data + 1) .. $last_identity);
537
538 my @data_with_identities = map [@$_, shift @identities], @$data;
539
540 $self->_insert_blobs_array (
541 $source, $blob_cols, \@cols_with_identities, \@data_with_identities
542 );
543 }
544 }
545
546 $guard->commit if $guard;
547
548 return;
549 }
550
551# otherwise, use the bulk API
552
553# rearrange @$data so that columns are in database order
6d5679b2 554# and so we submit a full column list
555 my %orig_order = map { $cols->[$_] => $_ } 0..$#$cols;
057db5ce 556
6d5679b2 557 my @source_columns = $source->columns;
558
559 # bcp identity index is 1-based
560 my $identity_idx = first { $source_columns[$_] eq $identity_col } (0..$#source_columns);
561 $identity_idx = defined $identity_idx ? $identity_idx + 1 : 0;
057db5ce 562
563 my @new_data;
6d5679b2 564 for my $slice_idx (0..$#$data) {
565 push @new_data, [map {
fabbd5cc 566 # identity data will be 'undef' if not _autoinc_supplied_for_op()
6d5679b2 567 # columns with defaults will also be 'undef'
568 exists $orig_order{$_}
569 ? $data->[$slice_idx][$orig_order{$_}]
570 : undef
571 } @source_columns];
057db5ce 572 }
573
6d5679b2 574 my $proto_bind = $self->_resolve_bindattrs(
575 $source,
576 [map {
577 [ { dbic_colname => $source_columns[$_], _bind_data_slice_idx => $_ }
578 => $new_data[0][$_] ]
579 } (0 ..$#source_columns) ],
580 $columns_info
581 );
057db5ce 582
583## Set a client-side conversion error handler, straight from DBD::Sybase docs.
584# This ignores any data conversion errors detected by the client side libs, as
585# they are usually harmless.
586 my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
587 Sub::Name::subname insert_bulk => sub {
588 my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
589
590 return 1 if $errno == 36;
591
592 carp
593 "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
594 ($errmsg ? "\n$errmsg" : '') .
595 ($osmsg ? "\n$osmsg" : '') .
596 ($blkmsg ? "\n$blkmsg" : '');
597
598 return 0;
599 });
600
4edfce2f 601 my $exception = '';
ed7ab0f4 602 try {
057db5ce 603 my $bulk = $self->_bulk_storage;
604
605 my $guard = $bulk->txn_scope_guard;
606
6d5679b2 607## FIXME - once this is done - address the FIXME on finish() below
057db5ce 608## XXX get this to work instead of our own $sth
609## will require SQLA or *Hacks changes for ordered columns
610# $bulk->next::method($source, \@source_columns, \@new_data, {
611# syb_bcp_attribs => {
fabbd5cc 612# identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0,
057db5ce 613# identity_column => $identity_idx,
614# }
615# });
616 my $sql = 'INSERT INTO ' .
617 $bulk->sql_maker->_quote($source->name) . ' (' .
618# colname list is ignored for BCP, but does no harm
619 (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
620 ' VALUES ('. (join ', ', ('?') x @source_columns) . ')';
621
622## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
623## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
624## better yet the version above. Should be fixed in DBD::Sybase .
625 my $sth = $bulk->_get_dbh->prepare($sql,
626# 'insert', # op
627 {
628 syb_bcp_attribs => {
fabbd5cc 629 identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0,
057db5ce 630 identity_column => $identity_idx,
631 }
632 }
633 );
634
6d5679b2 635 {
636 # FIXME the $sth->finish in _execute_array does a rollback for some
637 # reason. Disable it temporarily until we fix the SQLMaker thing above
638 no warnings 'redefine';
639 no strict 'refs';
640 local *{ref($sth).'::finish'} = sub {};
057db5ce 641
52cef7e3 642 $self->_dbh_execute_for_fetch(
6d5679b2 643 $source, $sth, $proto_bind, \@source_columns, \@new_data
644 );
645 }
646
647 $guard->commit;
057db5ce 648
649 $bulk->_query_end($sql);
ed7ab0f4 650 } catch {
651 $exception = shift;
057db5ce 652 };
653
057db5ce 654 DBD::Sybase::set_cslib_cb($orig_cslib_cb);
655
656 if ($exception =~ /-Y option/) {
f32e99f9 657 my $w = 'Sybase bulk API operation failed due to character set incompatibility, '
658 . 'reverting to regular array inserts. Try unsetting the LANG environment variable'
659 ;
660 $w .= "\n$exception" if $self->debug;
661 carp $w;
057db5ce 662
057db5ce 663 $self->_bulk_storage(undef);
664 unshift @_, $self;
665 goto \&insert_bulk;
666 }
667 elsif ($exception) {
668# rollback makes the bulkLogin connection unusable
669 $self->_bulk_storage->disconnect;
670 $self->throw_exception($exception);
671 }
672}
673
057db5ce 674# Make sure blobs are not bound as placeholders, and return any non-empty ones
675# as a hash.
676sub _remove_blob_cols {
677 my ($self, $source, $fields) = @_;
678
679 my %blob_cols;
680
681 for my $col (keys %$fields) {
682 if ($self->_is_lob_column($source, $col)) {
683 my $blob_val = delete $fields->{$col};
684 if (not defined $blob_val) {
685 $fields->{$col} = \'NULL';
686 }
687 else {
688 $fields->{$col} = \"''";
689 $blob_cols{$col} = $blob_val unless $blob_val eq '';
690 }
691 }
692 }
693
694 return %blob_cols ? \%blob_cols : undef;
695}
696
697# same for insert_bulk
698sub _remove_blob_cols_array {
699 my ($self, $source, $cols, $data) = @_;
700
701 my @blob_cols;
702
703 for my $i (0..$#$cols) {
704 my $col = $cols->[$i];
705
706 if ($self->_is_lob_column($source, $col)) {
707 for my $j (0..$#$data) {
708 my $blob_val = delete $data->[$j][$i];
709 if (not defined $blob_val) {
710 $data->[$j][$i] = \'NULL';
711 }
712 else {
713 $data->[$j][$i] = \"''";
714 $blob_cols[$j][$i] = $blob_val
715 unless $blob_val eq '';
716 }
717 }
718 }
719 }
720
721 return @blob_cols ? \@blob_cols : undef;
722}
723
724sub _update_blobs {
725 my ($self, $source, $blob_cols, $where) = @_;
726
9780718f 727 my @primary_cols = try
728 { $source->_pri_cols }
729 catch {
730 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
731 };
057db5ce 732
564986d6 733 my @pks_to_update;
734 if (
735 ref $where eq 'HASH'
736 and
737 @primary_cols == grep { defined $where->{$_} } @primary_cols
738 ) {
057db5ce 739 my %row_to_update;
740 @row_to_update{@primary_cols} = @{$where}{@primary_cols};
564986d6 741 @pks_to_update = \%row_to_update;
742 }
743 else {
057db5ce 744 my $cursor = $self->select ($source, \@primary_cols, $where, {});
564986d6 745 @pks_to_update = map {
057db5ce 746 my %row; @row{@primary_cols} = @$_; \%row
747 } $cursor->all;
748 }
749
564986d6 750 for my $ident (@pks_to_update) {
751 $self->_insert_blobs($source, $blob_cols, $ident);
057db5ce 752 }
753}
754
755sub _insert_blobs {
756 my ($self, $source, $blob_cols, $row) = @_;
757 my $dbh = $self->_get_dbh;
758
759 my $table = $source->name;
760
761 my %row = %$row;
9780718f 762 my @primary_cols = try
763 { $source->_pri_cols }
764 catch {
765 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
766 };
057db5ce 767
768 $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
769 if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
770
771 for my $col (keys %$blob_cols) {
772 my $blob = $blob_cols->{$col};
773
774 my %where = map { ($_, $row{$_}) } @primary_cols;
775
776 my $cursor = $self->select ($source, [$col], \%where, {});
777 $cursor->next;
778 my $sth = $cursor->sth;
779
780 if (not $sth) {
057db5ce 781 $self->throw_exception(
782 "Could not find row in table '$table' for blob update:\n"
6298a324 783 . (Dumper \%where)
057db5ce 784 );
785 }
786
9780718f 787 try {
057db5ce 788 do {
789 $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
790 } while $sth->fetch;
791
792 $sth->func('ct_prepare_send') or die $sth->errstr;
793
794 my $log_on_update = $self->_blob_log_on_update;
795 $log_on_update = 1 if not defined $log_on_update;
796
797 $sth->func('CS_SET', 1, {
798 total_txtlen => length($blob),
799 log_on_update => $log_on_update
800 }, 'ct_data_info') or die $sth->errstr;
801
802 $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
803
804 $sth->func('ct_finish_send') or die $sth->errstr;
9780718f 805 }
806 catch {
057db5ce 807 if ($self->using_freetds) {
808 $self->throw_exception (
9780718f 809 "TEXT/IMAGE operation failed, probably because you are using FreeTDS: $_"
057db5ce 810 );
9780718f 811 }
812 else {
813 $self->throw_exception($_);
057db5ce 814 }
815 }
9780718f 816 finally {
817 $sth->finish if $sth;
818 };
057db5ce 819 }
820}
821
822sub _insert_blobs_array {
823 my ($self, $source, $blob_cols, $cols, $data) = @_;
824
825 for my $i (0..$#$data) {
826 my $datum = $data->[$i];
827
828 my %row;
829 @row{ @$cols } = @$datum;
830
831 my %blob_vals;
832 for my $j (0..$#$cols) {
833 if (exists $blob_cols->[$i][$j]) {
834 $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
835 }
836 }
837
838 $self->_insert_blobs ($source, \%blob_vals, \%row);
839 }
840}
841
842=head2 connect_call_datetime_setup
843
844Used as:
845
846 on_connect_call => 'datetime_setup'
847
8384a713 848In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
057db5ce 849
850 $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
851 $dbh->do('set dateformat mdy'); # input fmt: 08/13/1979 18:08:55.080
852
c1e5a9ac 853This works for both C<DATETIME> and C<SMALLDATETIME> columns, note that
057db5ce 854C<SMALLDATETIME> columns only have minute precision.
855
856=cut
857
70c28808 858sub connect_call_datetime_setup {
859 my $self = shift;
860 my $dbh = $self->_get_dbh;
057db5ce 861
70c28808 862 if ($dbh->can('syb_date_fmt')) {
863 # amazingly, this works with FreeTDS
864 $dbh->syb_date_fmt('ISO_strict');
865 }
866 else {
867 carp_once
868 'Your DBD::Sybase is too old to support '
869 .'DBIx::Class::InflateColumn::DateTime, please upgrade!';
057db5ce 870
c6b7885f 871 # FIXME - in retrospect this is a rather bad US-centric choice
872 # of format. Not changing as a bugwards compat, though in reality
873 # the only piece that sees the results of $dt object formatting
874 # (as opposed to parsing) is the database itself, so theoretically
875 # changing both this SET command and the formatter definition of
876 # ::S::D::Sybase::ASE::DateTime::Format below should be safe and
877 # transparent
878
057db5ce 879 $dbh->do('SET DATEFORMAT mdy');
057db5ce 880 }
881}
882
057db5ce 883
90d7422f 884sub _exec_txn_begin {
057db5ce 885 my $self = shift;
886
887# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
888# TRAN once. However, we need to make sure there's a $dbh.
889 return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
890
891 $self->next::method(@_);
892
057db5ce 893 $self->_began_bulk_work(1) if $self->_is_bulk_storage;
894}
895
057db5ce 896# savepoint support using ASE syntax
897
90d7422f 898sub _exec_svp_begin {
057db5ce 899 my ($self, $name) = @_;
900
90d7422f 901 $self->_dbh->do("SAVE TRANSACTION $name");
057db5ce 902}
903
904# A new SAVE TRANSACTION with the same name releases the previous one.
90d7422f 905sub _exec_svp_release { 1 }
057db5ce 906
90d7422f 907sub _exec_svp_rollback {
057db5ce 908 my ($self, $name) = @_;
909
90d7422f 910 $self->_dbh->do("ROLLBACK TRANSACTION $name");
057db5ce 911}
912
deabd575 913package # hide from PAUSE
914 DBIx::Class::Storage::DBI::Sybase::ASE::DateTime::Format;
915
916my $datetime_parse_format = '%Y-%m-%dT%H:%M:%S.%3NZ';
917my $datetime_format_format = '%m/%d/%Y %H:%M:%S.%3N';
918
919my ($datetime_parser, $datetime_formatter);
920
921sub parse_datetime {
922 shift;
923 require DateTime::Format::Strptime;
924 $datetime_parser ||= DateTime::Format::Strptime->new(
925 pattern => $datetime_parse_format,
926 on_error => 'croak',
927 );
928 return $datetime_parser->parse_datetime(shift);
929}
930
931sub format_datetime {
932 shift;
933 require DateTime::Format::Strptime;
934 $datetime_formatter ||= DateTime::Format::Strptime->new(
935 pattern => $datetime_format_format,
936 on_error => 'croak',
937 );
938 return $datetime_formatter->format_datetime(shift);
939}
940
057db5ce 9411;
942
943=head1 Schema::Loader Support
944
290da7d6 945As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
c1e5a9ac 946most versions of Sybase ASE.
057db5ce 947
948=head1 FreeTDS
949
950This driver supports L<DBD::Sybase> compiled against FreeTDS
951(L<http://www.freetds.org/>) to the best of our ability, however it is
952recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
953libraries. They are a part of the Sybase ASE distribution:
954
955The Open Client FAQ is here:
956L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
957
958Sybase ASE for Linux (which comes with the Open Client libraries) may be
959downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
960
961To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
962
963 perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
964
c1e5a9ac 965It is recommended to set C<tds version> for your ASE server to C<5.0> in
966C</etc/freetds/freetds.conf>.
967
968Some versions or configurations of the libraries involved will not support
969placeholders, in which case the storage will be reblessed to
057db5ce 970L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
971
972In some configurations, placeholders will work but will throw implicit type
973conversion errors for anything that's not expecting a string. In such a case,
974the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
975automatically set, which you may enable on connection with
c1e5a9ac 976L<connect_call_set_auto_cast|DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>.
977The type info for the C<CAST>s is taken from the
978L<DBIx::Class::ResultSource/data_type> definitions in your Result classes, and
979are mapped to a Sybase type (if it isn't already) using a mapping based on
980L<SQL::Translator>.
057db5ce 981
48580715 982In other configurations, placeholders will work just as they do with the Sybase
057db5ce 983Open Client libraries.
984
985Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
986
987=head1 INSERTS WITH PLACEHOLDERS
988
989With placeholders enabled, inserts are done in a transaction so that there are
990no concurrency issues with getting the inserted identity value using
991C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
992mode.
993
994In addition, they are done on a separate connection so that it's possible to
995have active cursors when doing an insert.
996
997When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
c1e5a9ac 998are unnecessary and not used, as there are no concurrency issues with C<SELECT
999@@IDENTITY> which is a session variable.
057db5ce 1000
1001=head1 TRANSACTIONS
1002
c1e5a9ac 1003Due to limitations of the TDS protocol and L<DBD::Sybase>, you cannot begin a
1004transaction while there are active cursors, nor can you use multiple active
1005cursors within a transaction. An active cursor is, for example, a
057db5ce 1006L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
1007C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
1008
1009For example, this will not work:
1010
1011 $schema->txn_do(sub {
1012 my $rs = $schema->resultset('Book');
1013 while (my $row = $rs->next) {
1014 $schema->resultset('MetaData')->create({
1015 book_id => $row->id,
1016 ...
1017 });
1018 }
1019 });
1020
1021This won't either:
1022
1023 my $first_row = $large_rs->first;
1024 $schema->txn_do(sub { ... });
1025
1026Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
1027are not affected, as they are done on an extra database handle.
1028
1029Some workarounds:
1030
1031=over 4
1032
1033=item * use L<DBIx::Class::Storage::DBI::Replicated>
1034
1035=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
1036
1037=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
1038
1039=back
1040
1041=head1 MAXIMUM CONNECTIONS
1042
1043The TDS protocol makes separate connections to the server for active statements
1044in the background. By default the number of such connections is limited to 25,
1045on both the client side and the server side.
1046
1047This is a bit too low for a complex L<DBIx::Class> application, so on connection
1048the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
1049can override it to whatever setting you like in the DSN.
1050
1051See
1052L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
1053for information on changing the setting on the server side.
1054
1055=head1 DATES
1056
1057See L</connect_call_datetime_setup> to setup date formats
1058for L<DBIx::Class::InflateColumn::DateTime>.
1059
1060=head1 TEXT/IMAGE COLUMNS
1061
1062L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
1063C<TEXT/IMAGE> columns.
1064
1065Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
1066
1067 $schema->storage->dbh->do("SET TEXTSIZE $bytes");
1068
1069or
1070
1071 $schema->storage->set_textsize($bytes);
1072
1073instead.
1074
1075However, the C<LongReadLen> you pass in
8384a713 1076L<connect_info|DBIx::Class::Storage::DBI/connect_info> is used to execute the
1077equivalent C<SET TEXTSIZE> command on connection.
057db5ce 1078
8384a713 1079See L</connect_call_blob_setup> for a
1080L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting you need to work
1081with C<IMAGE> columns.
057db5ce 1082
1083=head1 BULK API
1084
1085The experimental L<DBD::Sybase> Bulk API support is used for
1086L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
1087on a separate connection.
1088
1089To use this feature effectively, use a large number of rows for each
1090L<populate|DBIx::Class::ResultSet/populate> call, eg.:
1091
1092 while (my $rows = $data_source->get_100_rows()) {
1093 $rs->populate($rows);
1094 }
1095
1096B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
1097calls in your C<Result> classes B<must> list columns in database order for this
1098to work. Also, you may have to unset the C<LANG> environment variable before
c1e5a9ac 1099loading your app, as C<BCP -Y> is not yet supported in DBD::Sybase .
057db5ce 1100
1101When inserting IMAGE columns using this method, you'll need to use
1102L</connect_call_blob_setup> as well.
1103
6476fd66 1104=head1 COMPUTED COLUMNS
1105
1106If you have columns such as:
1107
1108 created_dtm AS getdate()
1109
1110represent them in your Result classes as:
1111
1112 created_dtm => {
1113 data_type => undef,
1114 default_value => \'getdate()',
1115 is_nullable => 0,
c1e5a9ac 1116 inflate_datetime => 1,
6476fd66 1117 }
1118
1119The C<data_type> must exist and must be C<undef>. Then empty inserts will work
1120on tables with such columns.
1121
1122=head1 TIMESTAMP COLUMNS
1123
1124C<timestamp> columns in Sybase ASE are not really timestamps, see:
1125L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
1126
1127They should be defined in your Result classes as:
1128
1129 ts => {
1130 data_type => 'timestamp',
1131 is_nullable => 0,
1132 inflate_datetime => 0,
1133 }
1134
1135The C<<inflate_datetime => 0>> is necessary if you use
1136L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
1137be able to read these values.
1138
1139The values will come back as hexadecimal.
1140
057db5ce 1141=head1 TODO
1142
1143=over
1144
1145=item *
1146
1147Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
1148any active cursors, using eager cursors.
1149
1150=item *
1151
1152Real limits and limited counts using stored procedures deployed on startup.
1153
1154=item *
1155
057db5ce 1156Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
1157
1158=item *
1159
1160bulk_insert using prepare_cached (see comments.)
1161
1162=back
1163
1164=head1 AUTHOR
1165
8138f96a 1166See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
057db5ce 1167
1168=head1 LICENSE
1169
1170You may distribute this code under the same terms as Perl itself.
1171
1172=cut
1173# vim:sts=2 sw=2: