Ditch Carp::Clan for our own thing
[dbsrgits/DBIx-Class.git] / lib / DBIx / Class / Storage / DBI / Sybase / ASE.pm
CommitLineData
057db5ce 1package DBIx::Class::Storage::DBI::Sybase::ASE;
2
3use strict;
4use warnings;
5
6use base qw/
7 DBIx::Class::Storage::DBI::Sybase
8 DBIx::Class::Storage::DBI::AutoCast
9/;
10use mro 'c3';
70c28808 11use DBIx::Class::Carp;
6298a324 12use Scalar::Util 'blessed';
13use List::Util 'first';
057db5ce 14use Sub::Name();
6298a324 15use Data::Dumper::Concise 'Dumper';
ed7ab0f4 16use Try::Tiny;
fd323bf1 17use namespace::clean;
057db5ce 18
6a247f33 19__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ');
2b8cc2f2 20__PACKAGE__->sql_quote_char ([qw/[ ]/]);
6f7a118e 21__PACKAGE__->datetime_parser_type('DateTime::Format::Sybase');
6a247f33 22
057db5ce 23__PACKAGE__->mk_group_accessors('simple' =>
24 qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
25 _bulk_storage _is_bulk_storage _began_bulk_work
26 _bulk_disabled_due_to_coderef_connect_info_warned
27 _identity_method/
28);
29
30my @also_proxy_to_extra_storages = qw/
31 connect_call_set_auto_cast auto_cast connect_call_blob_setup
32 connect_call_datetime_setup
33
34 disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
35 auto_savepoint unsafe cursor_class debug debugobj schema
36/;
37
38=head1 NAME
39
40DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
41DBIx::Class
42
43=head1 SYNOPSIS
44
45This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
46
47=head1 DESCRIPTION
48
49If your version of Sybase does not support placeholders, then your storage will
50be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
51You can also enable that driver explicitly, see the documentation for more
52details.
53
54With this driver there is unfortunately no way to get the C<last_insert_id>
55without doing a C<SELECT MAX(col)>. This is done safely in a transaction
56(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
57
8384a713 58A recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting:
057db5ce 59
60 on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
61
62=head1 METHODS
63
64=cut
65
66sub _rebless {
67 my $self = shift;
68
69 my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
70
71 if ($self->using_freetds) {
72 carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
73
74You are using FreeTDS with Sybase.
75
76We will do our best to support this configuration, but please consider this
77support experimental.
78
79TEXT/IMAGE columns will definitely not work.
80
81You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
82instead.
83
84See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
85
86To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
87variable.
88EOF
89
bbdda281 90 if (not $self->_use_typeless_placeholders) {
91 if ($self->_use_placeholders) {
057db5ce 92 $self->auto_cast(1);
93 }
94 else {
95 $self->ensure_class_loaded($no_bind_vars);
96 bless $self, $no_bind_vars;
97 $self->_rebless;
98 }
99 }
100 }
101
102 elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
103 # not necessarily FreeTDS, but no placeholders nevertheless
104 $self->ensure_class_loaded($no_bind_vars);
105 bless $self, $no_bind_vars;
106 $self->_rebless;
107 }
108 # this is highly unlikely, but we check just in case
bbdda281 109 elsif (not $self->_use_typeless_placeholders) {
057db5ce 110 $self->auto_cast(1);
111 }
112}
113
114sub _init {
115 my $self = shift;
116 $self->_set_max_connect(256);
117
118# create storage for insert/(update blob) transactions,
119# unless this is that storage
120 return if $self->_is_extra_storage;
121
122 my $writer_storage = (ref $self)->new;
123
124 $writer_storage->_is_extra_storage(1);
125 $writer_storage->connect_info($self->connect_info);
126 $writer_storage->auto_cast($self->auto_cast);
127
128 $self->_writer_storage($writer_storage);
129
130# create a bulk storage unless connect_info is a coderef
131 return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
132
133 my $bulk_storage = (ref $self)->new;
134
135 $bulk_storage->_is_extra_storage(1);
136 $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
137 $bulk_storage->connect_info($self->connect_info);
138
139# this is why
140 $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
141
142 $self->_bulk_storage($bulk_storage);
143}
144
145for my $method (@also_proxy_to_extra_storages) {
146 no strict 'refs';
147 no warnings 'redefine';
148
149 my $replaced = __PACKAGE__->can($method);
150
151 *{$method} = Sub::Name::subname $method => sub {
152 my $self = shift;
153 $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
154 $self->_bulk_storage->$replaced(@_) if $self->_bulk_storage;
155 return $self->$replaced(@_);
156 };
157}
158
159sub disconnect {
160 my $self = shift;
161
162# Even though we call $sth->finish for uses off the bulk API, there's still an
163# "active statement" warning on disconnect, which we throw away here.
164# This is due to the bug described in insert_bulk.
165# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
166 local $SIG{__WARN__} = sub {
167 warn $_[0] unless $_[0] =~ /active statement/i;
168 } if $self->_is_bulk_storage;
169
170# so that next transaction gets a dbh
171 $self->_began_bulk_work(0) if $self->_is_bulk_storage;
172
173 $self->next::method;
174}
175
176# Set up session settings for Sybase databases for the connection.
177#
178# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
179# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
180# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
181# only want when AutoCommit is off.
182#
183# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work.
184sub _run_connection_actions {
185 my $self = shift;
186
187 if ($self->_is_bulk_storage) {
188# this should be cleared on every reconnect
189 $self->_began_bulk_work(0);
190 return;
191 }
192
193 if (not $self->using_freetds) {
194 $self->_dbh->{syb_chained_txn} = 1;
195 } else {
196 # based on LongReadLen in connect_info
197 $self->set_textsize;
198
199 if ($self->_dbh_autocommit) {
200 $self->_dbh->do('SET CHAINED OFF');
201 } else {
202 $self->_dbh->do('SET CHAINED ON');
203 }
204 }
205
206 $self->next::method(@_);
207}
208
209=head2 connect_call_blob_setup
210
211Used as:
212
213 on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
214
215Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
216instead of as a hex string.
217
218Recommended.
219
220Also sets the C<log_on_update> value for blob write operations. The default is
221C<1>, but C<0> is better if your database is configured for it.
222
223See
224L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
225
226=cut
227
228sub connect_call_blob_setup {
229 my $self = shift;
230 my %args = @_;
231 my $dbh = $self->_dbh;
232 $dbh->{syb_binary_images} = 1;
233
234 $self->_blob_log_on_update($args{log_on_update})
235 if exists $args{log_on_update};
236}
237
057db5ce 238sub _is_lob_column {
239 my ($self, $source, $column) = @_;
240
241 return $self->_is_lob_type($source->column_info($column)->{data_type});
242}
243
244sub _prep_for_execute {
245 my $self = shift;
246 my ($op, $extra_bind, $ident, $args) = @_;
247
248 my ($sql, $bind) = $self->next::method (@_);
249
6298a324 250 my $table = blessed $ident ? $ident->from : $ident;
057db5ce 251
252 my $bind_info = $self->_resolve_column_info(
253 $ident, [map $_->[0], @{$bind}]
254 );
6298a324 255 my $bound_identity_col =
256 first { $bind_info->{$_}{is_auto_increment} }
257 keys %$bind_info
057db5ce 258 ;
e366f807 259
260 my $columns_info = blessed $ident && $ident->columns_info;
261
6298a324 262 my $identity_col =
e366f807 263 $columns_info &&
264 first { $columns_info->{$_}{is_auto_increment} }
265 keys %$columns_info
057db5ce 266 ;
267
268 if (($op eq 'insert' && $bound_identity_col) ||
269 ($op eq 'update' && exists $args->[0]{$identity_col})) {
270 $sql = join ("\n",
271 $self->_set_table_identity_sql($op => $table, 'on'),
272 $sql,
273 $self->_set_table_identity_sql($op => $table, 'off'),
274 );
275 }
276
277 if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
278 (not $self->{insert_bulk})) {
279 $sql =
280 "$sql\n" .
281 $self->_fetch_identity_sql($ident, $identity_col);
282 }
283
284 return ($sql, $bind);
285}
286
287sub _set_table_identity_sql {
288 my ($self, $op, $table, $on_off) = @_;
289
290 return sprintf 'SET IDENTITY_%s %s %s',
291 uc($op), $self->sql_maker->_quote($table), uc($on_off);
292}
293
294# Stolen from SQLT, with some modifications. This is a makeshift
295# solution before a sane type-mapping library is available, thus
296# the 'our' for easy overrides.
297our %TYPE_MAPPING = (
298 number => 'numeric',
299 money => 'money',
300 varchar => 'varchar',
301 varchar2 => 'varchar',
302 timestamp => 'datetime',
303 text => 'varchar',
304 real => 'double precision',
305 comment => 'text',
306 bit => 'bit',
307 tinyint => 'smallint',
308 float => 'double precision',
309 serial => 'numeric',
310 bigserial => 'numeric',
311 boolean => 'varchar',
312 long => 'varchar',
313);
314
315sub _native_data_type {
316 my ($self, $type) = @_;
317
318 $type = lc $type;
319 $type =~ s/\s* identity//x;
320
321 return uc($TYPE_MAPPING{$type} || $type);
322}
323
324sub _fetch_identity_sql {
325 my ($self, $source, $col) = @_;
326
327 return sprintf ("SELECT MAX(%s) FROM %s",
328 map { $self->sql_maker->_quote ($_) } ($col, $source->from)
329 );
330}
331
332sub _execute {
333 my $self = shift;
334 my ($op) = @_;
335
336 my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
337
338 if ($op eq 'insert') {
339 $self->_identity($sth->fetchrow_array);
340 $sth->finish;
341 }
342
343 return wantarray ? ($rv, $sth, @bind) : $rv;
344}
345
346sub last_insert_id { shift->_identity }
347
348# handles TEXT/IMAGE and transaction for last_insert_id
349sub insert {
350 my $self = shift;
351 my ($source, $to_insert) = @_;
352
e366f807 353 my $columns_info = $source->columns_info;
354
6298a324 355 my $identity_col =
e366f807 356 (first { $columns_info->{$_}{is_auto_increment} }
357 keys %$columns_info )
6298a324 358 || '';
057db5ce 359
360 # check for empty insert
361 # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
6469dabf 362 # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
363 # and computed columns)
057db5ce 364 if (not %$to_insert) {
365 for my $col ($source->columns) {
366 next if $col eq $identity_col;
6469dabf 367
368 my $info = $source->column_info($col);
369
370 next if ref $info->{default_value} eq 'SCALAR'
371 || (exists $info->{data_type} && (not defined $info->{data_type}));
372
373 next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
374
057db5ce 375 $to_insert->{$col} = \'DEFAULT';
376 }
377 }
378
379 my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
380
381 # do we need the horrific SELECT MAX(COL) hack?
382 my $dumb_last_insert_id =
383 $identity_col
384 && (not exists $to_insert->{$identity_col})
385 && ($self->_identity_method||'') ne '@@IDENTITY';
386
387 my $next = $self->next::can;
388
389 # we are already in a transaction, or there are no blobs
390 # and we don't need the PK - just (try to) do it
391 if ($self->{transaction_depth}
392 || (!$blob_cols && !$dumb_last_insert_id)
393 ) {
394 return $self->_insert (
395 $next, $source, $to_insert, $blob_cols, $identity_col
396 );
397 }
398
399 # otherwise use the _writer_storage to do the insert+transaction on another
400 # connection
401 my $guard = $self->_writer_storage->txn_scope_guard;
402
403 my $updated_cols = $self->_writer_storage->_insert (
404 $next, $source, $to_insert, $blob_cols, $identity_col
405 );
406
407 $self->_identity($self->_writer_storage->_identity);
408
409 $guard->commit;
410
411 return $updated_cols;
412}
413
414sub _insert {
415 my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
416
417 my $updated_cols = $self->$next ($source, $to_insert);
418
419 my $final_row = {
420 ($identity_col ?
421 ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
422 %$to_insert,
423 %$updated_cols,
424 };
425
426 $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
427
428 return $updated_cols;
429}
430
431sub update {
432 my $self = shift;
433 my ($source, $fields, $where, @rest) = @_;
434
057db5ce 435 my $blob_cols = $self->_remove_blob_cols($source, $fields);
436
437 my $table = $source->name;
438
e366f807 439 my $columns_info = $source->columns_info;
440
6298a324 441 my $identity_col =
e366f807 442 first { $columns_info->{$_}{is_auto_increment} }
443 keys %$columns_info;
057db5ce 444
445 my $is_identity_update = $identity_col && defined $fields->{$identity_col};
446
447 return $self->next::method(@_) unless $blob_cols;
448
449# If there are any blobs in $where, Sybase will return a descriptive error
450# message.
451# XXX blobs can still be used with a LIKE query, and this should be handled.
452
453# update+blob update(s) done atomically on separate connection
454 $self = $self->_writer_storage;
455
456 my $guard = $self->txn_scope_guard;
457
458# First update the blob columns to be updated to '' (taken from $fields, where
459# it is originally put by _remove_blob_cols .)
460 my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
461
462# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
463
464 $self->next::method($source, \%blobs_to_empty, $where, @rest);
465
466# Now update the blobs before the other columns in case the update of other
467# columns makes the search condition invalid.
468 $self->_update_blobs($source, $blob_cols, $where);
469
470 my @res;
471 if (%$fields) {
cca282b6 472 if (wantarray) {
057db5ce 473 @res = $self->next::method(@_);
474 }
cca282b6 475 elsif (defined wantarray) {
057db5ce 476 $res[0] = $self->next::method(@_);
477 }
478 else {
479 $self->next::method(@_);
480 }
481 }
482
483 $guard->commit;
484
cca282b6 485 return wantarray ? @res : $res[0];
057db5ce 486}
487
488sub insert_bulk {
489 my $self = shift;
490 my ($source, $cols, $data) = @_;
491
e366f807 492 my $columns_info = $source->columns_info;
493
6298a324 494 my $identity_col =
e366f807 495 first { $columns_info->{$_}{is_auto_increment} }
496 keys %$columns_info;
057db5ce 497
6298a324 498 my $is_identity_insert = (first { $_ eq $identity_col } @{$cols}) ? 1 : 0;
057db5ce 499
500 my @source_columns = $source->columns;
501
502 my $use_bulk_api =
503 $self->_bulk_storage &&
504 $self->_get_dbh->{syb_has_blk};
505
506 if ((not $use_bulk_api)
507 &&
508 (ref($self->_dbi_connect_info->[0]) eq 'CODE')
509 &&
510 (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
511 carp <<'EOF';
512Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
513regular array inserts.
514EOF
515 $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
516 }
517
518 if (not $use_bulk_api) {
519 my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
520
521# _execute_array uses a txn anyway, but it ends too early in case we need to
522# select max(col) to get the identity for inserting blobs.
523 ($self, my $guard) = $self->{transaction_depth} == 0 ?
524 ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
525 :
526 ($self, undef);
527
528 local $self->{insert_bulk} = 1;
529
530 $self->next::method(@_);
531
532 if ($blob_cols) {
533 if ($is_identity_insert) {
534 $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
535 }
536 else {
537 my @cols_with_identities = (@$cols, $identity_col);
538
539 ## calculate identities
540 # XXX This assumes identities always increase by 1, which may or may not
541 # be true.
542 my ($last_identity) =
543 $self->_dbh->selectrow_array (
544 $self->_fetch_identity_sql($source, $identity_col)
545 );
546 my @identities = (($last_identity - @$data + 1) .. $last_identity);
547
548 my @data_with_identities = map [@$_, shift @identities], @$data;
549
550 $self->_insert_blobs_array (
551 $source, $blob_cols, \@cols_with_identities, \@data_with_identities
552 );
553 }
554 }
555
556 $guard->commit if $guard;
557
558 return;
559 }
560
561# otherwise, use the bulk API
562
563# rearrange @$data so that columns are in database order
564 my %orig_idx;
565 @orig_idx{@$cols} = 0..$#$cols;
566
567 my %new_idx;
568 @new_idx{@source_columns} = 0..$#source_columns;
569
570 my @new_data;
571 for my $datum (@$data) {
572 my $new_datum = [];
573 for my $col (@source_columns) {
574# identity data will be 'undef' if not $is_identity_insert
575# columns with defaults will also be 'undef'
576 $new_datum->[ $new_idx{$col} ] =
577 exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
578 }
579 push @new_data, $new_datum;
580 }
581
582# bcp identity index is 1-based
583 my $identity_idx = exists $new_idx{$identity_col} ?
584 $new_idx{$identity_col} + 1 : 0;
585
586## Set a client-side conversion error handler, straight from DBD::Sybase docs.
587# This ignores any data conversion errors detected by the client side libs, as
588# they are usually harmless.
589 my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
590 Sub::Name::subname insert_bulk => sub {
591 my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
592
593 return 1 if $errno == 36;
594
595 carp
596 "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
597 ($errmsg ? "\n$errmsg" : '') .
598 ($osmsg ? "\n$osmsg" : '') .
599 ($blkmsg ? "\n$blkmsg" : '');
600
601 return 0;
602 });
603
4edfce2f 604 my $exception = '';
ed7ab0f4 605 try {
057db5ce 606 my $bulk = $self->_bulk_storage;
607
608 my $guard = $bulk->txn_scope_guard;
609
610## XXX get this to work instead of our own $sth
611## will require SQLA or *Hacks changes for ordered columns
612# $bulk->next::method($source, \@source_columns, \@new_data, {
613# syb_bcp_attribs => {
614# identity_flag => $is_identity_insert,
615# identity_column => $identity_idx,
616# }
617# });
618 my $sql = 'INSERT INTO ' .
619 $bulk->sql_maker->_quote($source->name) . ' (' .
620# colname list is ignored for BCP, but does no harm
621 (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
622 ' VALUES ('. (join ', ', ('?') x @source_columns) . ')';
623
624## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
625## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
626## better yet the version above. Should be fixed in DBD::Sybase .
627 my $sth = $bulk->_get_dbh->prepare($sql,
628# 'insert', # op
629 {
630 syb_bcp_attribs => {
631 identity_flag => $is_identity_insert,
632 identity_column => $identity_idx,
633 }
634 }
635 );
636
637 my @bind = do {
638 my $idx = 0;
639 map [ $_, $idx++ ], @source_columns;
640 };
641
642 $self->_execute_array(
643 $source, $sth, \@bind, \@source_columns, \@new_data, sub {
644 $guard->commit
645 }
646 );
647
648 $bulk->_query_end($sql);
ed7ab0f4 649 } catch {
650 $exception = shift;
057db5ce 651 };
652
057db5ce 653 DBD::Sybase::set_cslib_cb($orig_cslib_cb);
654
655 if ($exception =~ /-Y option/) {
f32e99f9 656 my $w = 'Sybase bulk API operation failed due to character set incompatibility, '
657 . 'reverting to regular array inserts. Try unsetting the LANG environment variable'
658 ;
659 $w .= "\n$exception" if $self->debug;
660 carp $w;
057db5ce 661
057db5ce 662 $self->_bulk_storage(undef);
663 unshift @_, $self;
664 goto \&insert_bulk;
665 }
666 elsif ($exception) {
667# rollback makes the bulkLogin connection unusable
668 $self->_bulk_storage->disconnect;
669 $self->throw_exception($exception);
670 }
671}
672
673sub _dbh_execute_array {
674 my ($self, $sth, $tuple_status, $cb) = @_;
675
676 my $rv = $self->next::method($sth, $tuple_status);
677 $cb->() if $cb;
678
679 return $rv;
680}
681
682# Make sure blobs are not bound as placeholders, and return any non-empty ones
683# as a hash.
684sub _remove_blob_cols {
685 my ($self, $source, $fields) = @_;
686
687 my %blob_cols;
688
689 for my $col (keys %$fields) {
690 if ($self->_is_lob_column($source, $col)) {
691 my $blob_val = delete $fields->{$col};
692 if (not defined $blob_val) {
693 $fields->{$col} = \'NULL';
694 }
695 else {
696 $fields->{$col} = \"''";
697 $blob_cols{$col} = $blob_val unless $blob_val eq '';
698 }
699 }
700 }
701
702 return %blob_cols ? \%blob_cols : undef;
703}
704
705# same for insert_bulk
706sub _remove_blob_cols_array {
707 my ($self, $source, $cols, $data) = @_;
708
709 my @blob_cols;
710
711 for my $i (0..$#$cols) {
712 my $col = $cols->[$i];
713
714 if ($self->_is_lob_column($source, $col)) {
715 for my $j (0..$#$data) {
716 my $blob_val = delete $data->[$j][$i];
717 if (not defined $blob_val) {
718 $data->[$j][$i] = \'NULL';
719 }
720 else {
721 $data->[$j][$i] = \"''";
722 $blob_cols[$j][$i] = $blob_val
723 unless $blob_val eq '';
724 }
725 }
726 }
727 }
728
729 return @blob_cols ? \@blob_cols : undef;
730}
731
732sub _update_blobs {
733 my ($self, $source, $blob_cols, $where) = @_;
734
9780718f 735 my @primary_cols = try
736 { $source->_pri_cols }
737 catch {
738 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
739 };
057db5ce 740
741# check if we're updating a single row by PK
742 my $pk_cols_in_where = 0;
743 for my $col (@primary_cols) {
744 $pk_cols_in_where++ if defined $where->{$col};
745 }
746 my @rows;
747
748 if ($pk_cols_in_where == @primary_cols) {
749 my %row_to_update;
750 @row_to_update{@primary_cols} = @{$where}{@primary_cols};
751 @rows = \%row_to_update;
752 } else {
753 my $cursor = $self->select ($source, \@primary_cols, $where, {});
754 @rows = map {
755 my %row; @row{@primary_cols} = @$_; \%row
756 } $cursor->all;
757 }
758
759 for my $row (@rows) {
760 $self->_insert_blobs($source, $blob_cols, $row);
761 }
762}
763
764sub _insert_blobs {
765 my ($self, $source, $blob_cols, $row) = @_;
766 my $dbh = $self->_get_dbh;
767
768 my $table = $source->name;
769
770 my %row = %$row;
9780718f 771 my @primary_cols = try
772 { $source->_pri_cols }
773 catch {
774 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
775 };
057db5ce 776
777 $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
778 if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
779
780 for my $col (keys %$blob_cols) {
781 my $blob = $blob_cols->{$col};
782
783 my %where = map { ($_, $row{$_}) } @primary_cols;
784
785 my $cursor = $self->select ($source, [$col], \%where, {});
786 $cursor->next;
787 my $sth = $cursor->sth;
788
789 if (not $sth) {
057db5ce 790 $self->throw_exception(
791 "Could not find row in table '$table' for blob update:\n"
6298a324 792 . (Dumper \%where)
057db5ce 793 );
794 }
795
9780718f 796 try {
057db5ce 797 do {
798 $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
799 } while $sth->fetch;
800
801 $sth->func('ct_prepare_send') or die $sth->errstr;
802
803 my $log_on_update = $self->_blob_log_on_update;
804 $log_on_update = 1 if not defined $log_on_update;
805
806 $sth->func('CS_SET', 1, {
807 total_txtlen => length($blob),
808 log_on_update => $log_on_update
809 }, 'ct_data_info') or die $sth->errstr;
810
811 $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
812
813 $sth->func('ct_finish_send') or die $sth->errstr;
9780718f 814 }
815 catch {
057db5ce 816 if ($self->using_freetds) {
817 $self->throw_exception (
9780718f 818 "TEXT/IMAGE operation failed, probably because you are using FreeTDS: $_"
057db5ce 819 );
9780718f 820 }
821 else {
822 $self->throw_exception($_);
057db5ce 823 }
824 }
9780718f 825 finally {
826 $sth->finish if $sth;
827 };
057db5ce 828 }
829}
830
831sub _insert_blobs_array {
832 my ($self, $source, $blob_cols, $cols, $data) = @_;
833
834 for my $i (0..$#$data) {
835 my $datum = $data->[$i];
836
837 my %row;
838 @row{ @$cols } = @$datum;
839
840 my %blob_vals;
841 for my $j (0..$#$cols) {
842 if (exists $blob_cols->[$i][$j]) {
843 $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
844 }
845 }
846
847 $self->_insert_blobs ($source, \%blob_vals, \%row);
848 }
849}
850
851=head2 connect_call_datetime_setup
852
853Used as:
854
855 on_connect_call => 'datetime_setup'
856
8384a713 857In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
057db5ce 858
859 $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
860 $dbh->do('set dateformat mdy'); # input fmt: 08/13/1979 18:08:55.080
861
862On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
863L<DateTime::Format::Sybase>, which you will need to install.
864
865This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
866C<SMALLDATETIME> columns only have minute precision.
867
868=cut
869
70c28808 870sub connect_call_datetime_setup {
871 my $self = shift;
872 my $dbh = $self->_get_dbh;
057db5ce 873
70c28808 874 if ($dbh->can('syb_date_fmt')) {
875 # amazingly, this works with FreeTDS
876 $dbh->syb_date_fmt('ISO_strict');
877 }
878 else {
879 carp_once
880 'Your DBD::Sybase is too old to support '
881 .'DBIx::Class::InflateColumn::DateTime, please upgrade!';
057db5ce 882
883 $dbh->do('SET DATEFORMAT mdy');
057db5ce 884 1;
885 }
886}
887
057db5ce 888
889# ->begin_work and such have no effect with FreeTDS but we run them anyway to
890# let the DBD keep any state it needs to.
891#
892# If they ever do start working, the extra statements will do no harm (because
893# Sybase supports nested transactions.)
894
895sub _dbh_begin_work {
896 my $self = shift;
897
898# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
899# TRAN once. However, we need to make sure there's a $dbh.
900 return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
901
902 $self->next::method(@_);
903
904 if ($self->using_freetds) {
905 $self->_get_dbh->do('BEGIN TRAN');
906 }
907
908 $self->_began_bulk_work(1) if $self->_is_bulk_storage;
909}
910
911sub _dbh_commit {
912 my $self = shift;
913 if ($self->using_freetds) {
914 $self->_dbh->do('COMMIT');
915 }
916 return $self->next::method(@_);
917}
918
919sub _dbh_rollback {
920 my $self = shift;
921 if ($self->using_freetds) {
922 $self->_dbh->do('ROLLBACK');
923 }
924 return $self->next::method(@_);
925}
926
927# savepoint support using ASE syntax
928
929sub _svp_begin {
930 my ($self, $name) = @_;
931
932 $self->_get_dbh->do("SAVE TRANSACTION $name");
933}
934
935# A new SAVE TRANSACTION with the same name releases the previous one.
936sub _svp_release { 1 }
937
938sub _svp_rollback {
939 my ($self, $name) = @_;
940
941 $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
942}
943
9441;
945
946=head1 Schema::Loader Support
947
290da7d6 948As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
949most (if not all) versions of Sybase ASE.
057db5ce 950
951=head1 FreeTDS
952
953This driver supports L<DBD::Sybase> compiled against FreeTDS
954(L<http://www.freetds.org/>) to the best of our ability, however it is
955recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
956libraries. They are a part of the Sybase ASE distribution:
957
958The Open Client FAQ is here:
959L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
960
961Sybase ASE for Linux (which comes with the Open Client libraries) may be
962downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
963
964To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
965
966 perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
967
968Some versions of the libraries involved will not support placeholders, in which
969case the storage will be reblessed to
970L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
971
972In some configurations, placeholders will work but will throw implicit type
973conversion errors for anything that's not expecting a string. In such a case,
974the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
975automatically set, which you may enable on connection with
976L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
977for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
978definitions in your Result classes, and are mapped to a Sybase type (if it isn't
979already) using a mapping based on L<SQL::Translator>.
980
48580715 981In other configurations, placeholders will work just as they do with the Sybase
057db5ce 982Open Client libraries.
983
984Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
985
986=head1 INSERTS WITH PLACEHOLDERS
987
988With placeholders enabled, inserts are done in a transaction so that there are
989no concurrency issues with getting the inserted identity value using
990C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
991mode.
992
993In addition, they are done on a separate connection so that it's possible to
994have active cursors when doing an insert.
995
996When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
997are disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as
998it's a session variable.
999
1000=head1 TRANSACTIONS
1001
48580715 1002Due to limitations of the TDS protocol, L<DBD::Sybase>, or both, you cannot
1003begin a transaction while there are active cursors, nor can you use multiple
057db5ce 1004active cursors within a transaction. An active cursor is, for example, a
1005L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
1006C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
1007
1008For example, this will not work:
1009
1010 $schema->txn_do(sub {
1011 my $rs = $schema->resultset('Book');
1012 while (my $row = $rs->next) {
1013 $schema->resultset('MetaData')->create({
1014 book_id => $row->id,
1015 ...
1016 });
1017 }
1018 });
1019
1020This won't either:
1021
1022 my $first_row = $large_rs->first;
1023 $schema->txn_do(sub { ... });
1024
1025Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
1026are not affected, as they are done on an extra database handle.
1027
1028Some workarounds:
1029
1030=over 4
1031
1032=item * use L<DBIx::Class::Storage::DBI::Replicated>
1033
1034=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
1035
1036=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
1037
1038=back
1039
1040=head1 MAXIMUM CONNECTIONS
1041
1042The TDS protocol makes separate connections to the server for active statements
1043in the background. By default the number of such connections is limited to 25,
1044on both the client side and the server side.
1045
1046This is a bit too low for a complex L<DBIx::Class> application, so on connection
1047the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
1048can override it to whatever setting you like in the DSN.
1049
1050See
1051L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
1052for information on changing the setting on the server side.
1053
1054=head1 DATES
1055
1056See L</connect_call_datetime_setup> to setup date formats
1057for L<DBIx::Class::InflateColumn::DateTime>.
1058
1059=head1 TEXT/IMAGE COLUMNS
1060
1061L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
1062C<TEXT/IMAGE> columns.
1063
1064Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
1065
1066 $schema->storage->dbh->do("SET TEXTSIZE $bytes");
1067
1068or
1069
1070 $schema->storage->set_textsize($bytes);
1071
1072instead.
1073
1074However, the C<LongReadLen> you pass in
8384a713 1075L<connect_info|DBIx::Class::Storage::DBI/connect_info> is used to execute the
1076equivalent C<SET TEXTSIZE> command on connection.
057db5ce 1077
8384a713 1078See L</connect_call_blob_setup> for a
1079L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting you need to work
1080with C<IMAGE> columns.
057db5ce 1081
1082=head1 BULK API
1083
1084The experimental L<DBD::Sybase> Bulk API support is used for
1085L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
1086on a separate connection.
1087
1088To use this feature effectively, use a large number of rows for each
1089L<populate|DBIx::Class::ResultSet/populate> call, eg.:
1090
1091 while (my $rows = $data_source->get_100_rows()) {
1092 $rs->populate($rows);
1093 }
1094
1095B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
1096calls in your C<Result> classes B<must> list columns in database order for this
1097to work. Also, you may have to unset the C<LANG> environment variable before
1098loading your app, if it doesn't match the character set of your database.
1099
1100When inserting IMAGE columns using this method, you'll need to use
1101L</connect_call_blob_setup> as well.
1102
6476fd66 1103=head1 COMPUTED COLUMNS
1104
1105If you have columns such as:
1106
1107 created_dtm AS getdate()
1108
1109represent them in your Result classes as:
1110
1111 created_dtm => {
1112 data_type => undef,
1113 default_value => \'getdate()',
1114 is_nullable => 0,
1115 }
1116
1117The C<data_type> must exist and must be C<undef>. Then empty inserts will work
1118on tables with such columns.
1119
1120=head1 TIMESTAMP COLUMNS
1121
1122C<timestamp> columns in Sybase ASE are not really timestamps, see:
1123L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
1124
1125They should be defined in your Result classes as:
1126
1127 ts => {
1128 data_type => 'timestamp',
1129 is_nullable => 0,
1130 inflate_datetime => 0,
1131 }
1132
1133The C<<inflate_datetime => 0>> is necessary if you use
1134L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
1135be able to read these values.
1136
1137The values will come back as hexadecimal.
1138
057db5ce 1139=head1 TODO
1140
1141=over
1142
1143=item *
1144
1145Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
1146any active cursors, using eager cursors.
1147
1148=item *
1149
1150Real limits and limited counts using stored procedures deployed on startup.
1151
1152=item *
1153
d5dedbd6 1154Adaptive Server Anywhere (ASA) support
057db5ce 1155
1156=item *
1157
1158Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
1159
1160=item *
1161
1162bulk_insert using prepare_cached (see comments.)
1163
1164=back
1165
1166=head1 AUTHOR
1167
8138f96a 1168See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
057db5ce 1169
1170=head1 LICENSE
1171
1172You may distribute this code under the same terms as Perl itself.
1173
1174=cut
1175# vim:sts=2 sw=2: