Quieter Sybase tests
[dbsrgits/DBIx-Class.git] / lib / DBIx / Class / Storage / DBI / Sybase / ASE.pm
CommitLineData
057db5ce 1package DBIx::Class::Storage::DBI::Sybase::ASE;
2
3use strict;
4use warnings;
5
6use base qw/
7 DBIx::Class::Storage::DBI::Sybase
8 DBIx::Class::Storage::DBI::AutoCast
9/;
10use mro 'c3';
11use Carp::Clan qw/^DBIx::Class/;
6298a324 12use Scalar::Util 'blessed';
13use List::Util 'first';
057db5ce 14use Sub::Name();
6298a324 15use Data::Dumper::Concise 'Dumper';
ed7ab0f4 16use Try::Tiny;
fd323bf1 17use namespace::clean;
057db5ce 18
6a247f33 19__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ');
20
057db5ce 21__PACKAGE__->mk_group_accessors('simple' =>
22 qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
23 _bulk_storage _is_bulk_storage _began_bulk_work
24 _bulk_disabled_due_to_coderef_connect_info_warned
25 _identity_method/
26);
27
28my @also_proxy_to_extra_storages = qw/
29 connect_call_set_auto_cast auto_cast connect_call_blob_setup
30 connect_call_datetime_setup
31
32 disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
33 auto_savepoint unsafe cursor_class debug debugobj schema
34/;
35
36=head1 NAME
37
38DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
39DBIx::Class
40
41=head1 SYNOPSIS
42
43This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
44
45=head1 DESCRIPTION
46
47If your version of Sybase does not support placeholders, then your storage will
48be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
49You can also enable that driver explicitly, see the documentation for more
50details.
51
52With this driver there is unfortunately no way to get the C<last_insert_id>
53without doing a C<SELECT MAX(col)>. This is done safely in a transaction
54(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
55
8384a713 56A recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting:
057db5ce 57
58 on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
59
60=head1 METHODS
61
62=cut
63
64sub _rebless {
65 my $self = shift;
66
67 my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
68
69 if ($self->using_freetds) {
70 carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
71
72You are using FreeTDS with Sybase.
73
74We will do our best to support this configuration, but please consider this
75support experimental.
76
77TEXT/IMAGE columns will definitely not work.
78
79You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
80instead.
81
82See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
83
84To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
85variable.
86EOF
87
bbdda281 88 if (not $self->_use_typeless_placeholders) {
89 if ($self->_use_placeholders) {
057db5ce 90 $self->auto_cast(1);
91 }
92 else {
93 $self->ensure_class_loaded($no_bind_vars);
94 bless $self, $no_bind_vars;
95 $self->_rebless;
96 }
97 }
98 }
99
100 elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
101 # not necessarily FreeTDS, but no placeholders nevertheless
102 $self->ensure_class_loaded($no_bind_vars);
103 bless $self, $no_bind_vars;
104 $self->_rebless;
105 }
106 # this is highly unlikely, but we check just in case
bbdda281 107 elsif (not $self->_use_typeless_placeholders) {
057db5ce 108 $self->auto_cast(1);
109 }
110}
111
112sub _init {
113 my $self = shift;
114 $self->_set_max_connect(256);
115
116# create storage for insert/(update blob) transactions,
117# unless this is that storage
118 return if $self->_is_extra_storage;
119
120 my $writer_storage = (ref $self)->new;
121
122 $writer_storage->_is_extra_storage(1);
123 $writer_storage->connect_info($self->connect_info);
124 $writer_storage->auto_cast($self->auto_cast);
125
126 $self->_writer_storage($writer_storage);
127
128# create a bulk storage unless connect_info is a coderef
129 return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
130
131 my $bulk_storage = (ref $self)->new;
132
133 $bulk_storage->_is_extra_storage(1);
134 $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
135 $bulk_storage->connect_info($self->connect_info);
136
137# this is why
138 $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
139
140 $self->_bulk_storage($bulk_storage);
141}
142
143for my $method (@also_proxy_to_extra_storages) {
144 no strict 'refs';
145 no warnings 'redefine';
146
147 my $replaced = __PACKAGE__->can($method);
148
149 *{$method} = Sub::Name::subname $method => sub {
150 my $self = shift;
151 $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
152 $self->_bulk_storage->$replaced(@_) if $self->_bulk_storage;
153 return $self->$replaced(@_);
154 };
155}
156
157sub disconnect {
158 my $self = shift;
159
160# Even though we call $sth->finish for uses off the bulk API, there's still an
161# "active statement" warning on disconnect, which we throw away here.
162# This is due to the bug described in insert_bulk.
163# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
164 local $SIG{__WARN__} = sub {
165 warn $_[0] unless $_[0] =~ /active statement/i;
166 } if $self->_is_bulk_storage;
167
168# so that next transaction gets a dbh
169 $self->_began_bulk_work(0) if $self->_is_bulk_storage;
170
171 $self->next::method;
172}
173
174# Set up session settings for Sybase databases for the connection.
175#
176# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
177# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
178# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
179# only want when AutoCommit is off.
180#
181# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work.
182sub _run_connection_actions {
183 my $self = shift;
184
185 if ($self->_is_bulk_storage) {
186# this should be cleared on every reconnect
187 $self->_began_bulk_work(0);
188 return;
189 }
190
191 if (not $self->using_freetds) {
192 $self->_dbh->{syb_chained_txn} = 1;
193 } else {
194 # based on LongReadLen in connect_info
195 $self->set_textsize;
196
197 if ($self->_dbh_autocommit) {
198 $self->_dbh->do('SET CHAINED OFF');
199 } else {
200 $self->_dbh->do('SET CHAINED ON');
201 }
202 }
203
204 $self->next::method(@_);
205}
206
207=head2 connect_call_blob_setup
208
209Used as:
210
211 on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
212
213Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
214instead of as a hex string.
215
216Recommended.
217
218Also sets the C<log_on_update> value for blob write operations. The default is
219C<1>, but C<0> is better if your database is configured for it.
220
221See
222L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
223
224=cut
225
226sub connect_call_blob_setup {
227 my $self = shift;
228 my %args = @_;
229 my $dbh = $self->_dbh;
230 $dbh->{syb_binary_images} = 1;
231
232 $self->_blob_log_on_update($args{log_on_update})
233 if exists $args{log_on_update};
234}
235
236sub _is_lob_type {
237 my $self = shift;
238 my $type = shift;
239 $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
240}
241
242sub _is_lob_column {
243 my ($self, $source, $column) = @_;
244
245 return $self->_is_lob_type($source->column_info($column)->{data_type});
246}
247
248sub _prep_for_execute {
249 my $self = shift;
250 my ($op, $extra_bind, $ident, $args) = @_;
251
252 my ($sql, $bind) = $self->next::method (@_);
253
6298a324 254 my $table = blessed $ident ? $ident->from : $ident;
057db5ce 255
256 my $bind_info = $self->_resolve_column_info(
257 $ident, [map $_->[0], @{$bind}]
258 );
6298a324 259 my $bound_identity_col =
260 first { $bind_info->{$_}{is_auto_increment} }
261 keys %$bind_info
057db5ce 262 ;
e366f807 263
264 my $columns_info = blessed $ident && $ident->columns_info;
265
6298a324 266 my $identity_col =
e366f807 267 $columns_info &&
268 first { $columns_info->{$_}{is_auto_increment} }
269 keys %$columns_info
057db5ce 270 ;
271
272 if (($op eq 'insert' && $bound_identity_col) ||
273 ($op eq 'update' && exists $args->[0]{$identity_col})) {
274 $sql = join ("\n",
275 $self->_set_table_identity_sql($op => $table, 'on'),
276 $sql,
277 $self->_set_table_identity_sql($op => $table, 'off'),
278 );
279 }
280
281 if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
282 (not $self->{insert_bulk})) {
283 $sql =
284 "$sql\n" .
285 $self->_fetch_identity_sql($ident, $identity_col);
286 }
287
288 return ($sql, $bind);
289}
290
291sub _set_table_identity_sql {
292 my ($self, $op, $table, $on_off) = @_;
293
294 return sprintf 'SET IDENTITY_%s %s %s',
295 uc($op), $self->sql_maker->_quote($table), uc($on_off);
296}
297
298# Stolen from SQLT, with some modifications. This is a makeshift
299# solution before a sane type-mapping library is available, thus
300# the 'our' for easy overrides.
301our %TYPE_MAPPING = (
302 number => 'numeric',
303 money => 'money',
304 varchar => 'varchar',
305 varchar2 => 'varchar',
306 timestamp => 'datetime',
307 text => 'varchar',
308 real => 'double precision',
309 comment => 'text',
310 bit => 'bit',
311 tinyint => 'smallint',
312 float => 'double precision',
313 serial => 'numeric',
314 bigserial => 'numeric',
315 boolean => 'varchar',
316 long => 'varchar',
317);
318
319sub _native_data_type {
320 my ($self, $type) = @_;
321
322 $type = lc $type;
323 $type =~ s/\s* identity//x;
324
325 return uc($TYPE_MAPPING{$type} || $type);
326}
327
328sub _fetch_identity_sql {
329 my ($self, $source, $col) = @_;
330
331 return sprintf ("SELECT MAX(%s) FROM %s",
332 map { $self->sql_maker->_quote ($_) } ($col, $source->from)
333 );
334}
335
336sub _execute {
337 my $self = shift;
338 my ($op) = @_;
339
340 my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
341
342 if ($op eq 'insert') {
343 $self->_identity($sth->fetchrow_array);
344 $sth->finish;
345 }
346
347 return wantarray ? ($rv, $sth, @bind) : $rv;
348}
349
350sub last_insert_id { shift->_identity }
351
352# handles TEXT/IMAGE and transaction for last_insert_id
353sub insert {
354 my $self = shift;
355 my ($source, $to_insert) = @_;
356
e366f807 357 my $columns_info = $source->columns_info;
358
6298a324 359 my $identity_col =
e366f807 360 (first { $columns_info->{$_}{is_auto_increment} }
361 keys %$columns_info )
6298a324 362 || '';
057db5ce 363
364 # check for empty insert
365 # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
6469dabf 366 # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
367 # and computed columns)
057db5ce 368 if (not %$to_insert) {
369 for my $col ($source->columns) {
370 next if $col eq $identity_col;
6469dabf 371
372 my $info = $source->column_info($col);
373
374 next if ref $info->{default_value} eq 'SCALAR'
375 || (exists $info->{data_type} && (not defined $info->{data_type}));
376
377 next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
378
057db5ce 379 $to_insert->{$col} = \'DEFAULT';
380 }
381 }
382
383 my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
384
385 # do we need the horrific SELECT MAX(COL) hack?
386 my $dumb_last_insert_id =
387 $identity_col
388 && (not exists $to_insert->{$identity_col})
389 && ($self->_identity_method||'') ne '@@IDENTITY';
390
391 my $next = $self->next::can;
392
393 # we are already in a transaction, or there are no blobs
394 # and we don't need the PK - just (try to) do it
395 if ($self->{transaction_depth}
396 || (!$blob_cols && !$dumb_last_insert_id)
397 ) {
398 return $self->_insert (
399 $next, $source, $to_insert, $blob_cols, $identity_col
400 );
401 }
402
403 # otherwise use the _writer_storage to do the insert+transaction on another
404 # connection
405 my $guard = $self->_writer_storage->txn_scope_guard;
406
407 my $updated_cols = $self->_writer_storage->_insert (
408 $next, $source, $to_insert, $blob_cols, $identity_col
409 );
410
411 $self->_identity($self->_writer_storage->_identity);
412
413 $guard->commit;
414
415 return $updated_cols;
416}
417
418sub _insert {
419 my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
420
421 my $updated_cols = $self->$next ($source, $to_insert);
422
423 my $final_row = {
424 ($identity_col ?
425 ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
426 %$to_insert,
427 %$updated_cols,
428 };
429
430 $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
431
432 return $updated_cols;
433}
434
435sub update {
436 my $self = shift;
437 my ($source, $fields, $where, @rest) = @_;
438
439 my $wantarray = wantarray;
440
441 my $blob_cols = $self->_remove_blob_cols($source, $fields);
442
443 my $table = $source->name;
444
e366f807 445 my $columns_info = $source->columns_info;
446
6298a324 447 my $identity_col =
e366f807 448 first { $columns_info->{$_}{is_auto_increment} }
449 keys %$columns_info;
057db5ce 450
451 my $is_identity_update = $identity_col && defined $fields->{$identity_col};
452
453 return $self->next::method(@_) unless $blob_cols;
454
455# If there are any blobs in $where, Sybase will return a descriptive error
456# message.
457# XXX blobs can still be used with a LIKE query, and this should be handled.
458
459# update+blob update(s) done atomically on separate connection
460 $self = $self->_writer_storage;
461
462 my $guard = $self->txn_scope_guard;
463
464# First update the blob columns to be updated to '' (taken from $fields, where
465# it is originally put by _remove_blob_cols .)
466 my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
467
468# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
469
470 $self->next::method($source, \%blobs_to_empty, $where, @rest);
471
472# Now update the blobs before the other columns in case the update of other
473# columns makes the search condition invalid.
474 $self->_update_blobs($source, $blob_cols, $where);
475
476 my @res;
477 if (%$fields) {
478 if ($wantarray) {
479 @res = $self->next::method(@_);
480 }
481 elsif (defined $wantarray) {
482 $res[0] = $self->next::method(@_);
483 }
484 else {
485 $self->next::method(@_);
486 }
487 }
488
489 $guard->commit;
490
491 return $wantarray ? @res : $res[0];
492}
493
494sub insert_bulk {
495 my $self = shift;
496 my ($source, $cols, $data) = @_;
497
e366f807 498 my $columns_info = $source->columns_info;
499
6298a324 500 my $identity_col =
e366f807 501 first { $columns_info->{$_}{is_auto_increment} }
502 keys %$columns_info;
057db5ce 503
6298a324 504 my $is_identity_insert = (first { $_ eq $identity_col } @{$cols}) ? 1 : 0;
057db5ce 505
506 my @source_columns = $source->columns;
507
508 my $use_bulk_api =
509 $self->_bulk_storage &&
510 $self->_get_dbh->{syb_has_blk};
511
512 if ((not $use_bulk_api)
513 &&
514 (ref($self->_dbi_connect_info->[0]) eq 'CODE')
515 &&
516 (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
517 carp <<'EOF';
518Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
519regular array inserts.
520EOF
521 $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
522 }
523
524 if (not $use_bulk_api) {
525 my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
526
527# _execute_array uses a txn anyway, but it ends too early in case we need to
528# select max(col) to get the identity for inserting blobs.
529 ($self, my $guard) = $self->{transaction_depth} == 0 ?
530 ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
531 :
532 ($self, undef);
533
534 local $self->{insert_bulk} = 1;
535
536 $self->next::method(@_);
537
538 if ($blob_cols) {
539 if ($is_identity_insert) {
540 $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
541 }
542 else {
543 my @cols_with_identities = (@$cols, $identity_col);
544
545 ## calculate identities
546 # XXX This assumes identities always increase by 1, which may or may not
547 # be true.
548 my ($last_identity) =
549 $self->_dbh->selectrow_array (
550 $self->_fetch_identity_sql($source, $identity_col)
551 );
552 my @identities = (($last_identity - @$data + 1) .. $last_identity);
553
554 my @data_with_identities = map [@$_, shift @identities], @$data;
555
556 $self->_insert_blobs_array (
557 $source, $blob_cols, \@cols_with_identities, \@data_with_identities
558 );
559 }
560 }
561
562 $guard->commit if $guard;
563
564 return;
565 }
566
567# otherwise, use the bulk API
568
569# rearrange @$data so that columns are in database order
570 my %orig_idx;
571 @orig_idx{@$cols} = 0..$#$cols;
572
573 my %new_idx;
574 @new_idx{@source_columns} = 0..$#source_columns;
575
576 my @new_data;
577 for my $datum (@$data) {
578 my $new_datum = [];
579 for my $col (@source_columns) {
580# identity data will be 'undef' if not $is_identity_insert
581# columns with defaults will also be 'undef'
582 $new_datum->[ $new_idx{$col} ] =
583 exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
584 }
585 push @new_data, $new_datum;
586 }
587
588# bcp identity index is 1-based
589 my $identity_idx = exists $new_idx{$identity_col} ?
590 $new_idx{$identity_col} + 1 : 0;
591
592## Set a client-side conversion error handler, straight from DBD::Sybase docs.
593# This ignores any data conversion errors detected by the client side libs, as
594# they are usually harmless.
595 my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
596 Sub::Name::subname insert_bulk => sub {
597 my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
598
599 return 1 if $errno == 36;
600
601 carp
602 "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
603 ($errmsg ? "\n$errmsg" : '') .
604 ($osmsg ? "\n$osmsg" : '') .
605 ($blkmsg ? "\n$blkmsg" : '');
606
607 return 0;
608 });
609
4edfce2f 610 my $exception = '';
ed7ab0f4 611 try {
057db5ce 612 my $bulk = $self->_bulk_storage;
613
614 my $guard = $bulk->txn_scope_guard;
615
616## XXX get this to work instead of our own $sth
617## will require SQLA or *Hacks changes for ordered columns
618# $bulk->next::method($source, \@source_columns, \@new_data, {
619# syb_bcp_attribs => {
620# identity_flag => $is_identity_insert,
621# identity_column => $identity_idx,
622# }
623# });
624 my $sql = 'INSERT INTO ' .
625 $bulk->sql_maker->_quote($source->name) . ' (' .
626# colname list is ignored for BCP, but does no harm
627 (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
628 ' VALUES ('. (join ', ', ('?') x @source_columns) . ')';
629
630## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
631## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
632## better yet the version above. Should be fixed in DBD::Sybase .
633 my $sth = $bulk->_get_dbh->prepare($sql,
634# 'insert', # op
635 {
636 syb_bcp_attribs => {
637 identity_flag => $is_identity_insert,
638 identity_column => $identity_idx,
639 }
640 }
641 );
642
643 my @bind = do {
644 my $idx = 0;
645 map [ $_, $idx++ ], @source_columns;
646 };
647
648 $self->_execute_array(
649 $source, $sth, \@bind, \@source_columns, \@new_data, sub {
650 $guard->commit
651 }
652 );
653
654 $bulk->_query_end($sql);
ed7ab0f4 655 } catch {
656 $exception = shift;
057db5ce 657 };
658
057db5ce 659 DBD::Sybase::set_cslib_cb($orig_cslib_cb);
660
661 if ($exception =~ /-Y option/) {
f32e99f9 662 my $w = 'Sybase bulk API operation failed due to character set incompatibility, '
663 . 'reverting to regular array inserts. Try unsetting the LANG environment variable'
664 ;
665 $w .= "\n$exception" if $self->debug;
666 carp $w;
057db5ce 667
057db5ce 668 $self->_bulk_storage(undef);
669 unshift @_, $self;
670 goto \&insert_bulk;
671 }
672 elsif ($exception) {
673# rollback makes the bulkLogin connection unusable
674 $self->_bulk_storage->disconnect;
675 $self->throw_exception($exception);
676 }
677}
678
679sub _dbh_execute_array {
680 my ($self, $sth, $tuple_status, $cb) = @_;
681
682 my $rv = $self->next::method($sth, $tuple_status);
683 $cb->() if $cb;
684
685 return $rv;
686}
687
688# Make sure blobs are not bound as placeholders, and return any non-empty ones
689# as a hash.
690sub _remove_blob_cols {
691 my ($self, $source, $fields) = @_;
692
693 my %blob_cols;
694
695 for my $col (keys %$fields) {
696 if ($self->_is_lob_column($source, $col)) {
697 my $blob_val = delete $fields->{$col};
698 if (not defined $blob_val) {
699 $fields->{$col} = \'NULL';
700 }
701 else {
702 $fields->{$col} = \"''";
703 $blob_cols{$col} = $blob_val unless $blob_val eq '';
704 }
705 }
706 }
707
708 return %blob_cols ? \%blob_cols : undef;
709}
710
711# same for insert_bulk
712sub _remove_blob_cols_array {
713 my ($self, $source, $cols, $data) = @_;
714
715 my @blob_cols;
716
717 for my $i (0..$#$cols) {
718 my $col = $cols->[$i];
719
720 if ($self->_is_lob_column($source, $col)) {
721 for my $j (0..$#$data) {
722 my $blob_val = delete $data->[$j][$i];
723 if (not defined $blob_val) {
724 $data->[$j][$i] = \'NULL';
725 }
726 else {
727 $data->[$j][$i] = \"''";
728 $blob_cols[$j][$i] = $blob_val
729 unless $blob_val eq '';
730 }
731 }
732 }
733 }
734
735 return @blob_cols ? \@blob_cols : undef;
736}
737
738sub _update_blobs {
739 my ($self, $source, $blob_cols, $where) = @_;
740
9780718f 741 my @primary_cols = try
742 { $source->_pri_cols }
743 catch {
744 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
745 };
057db5ce 746
747# check if we're updating a single row by PK
748 my $pk_cols_in_where = 0;
749 for my $col (@primary_cols) {
750 $pk_cols_in_where++ if defined $where->{$col};
751 }
752 my @rows;
753
754 if ($pk_cols_in_where == @primary_cols) {
755 my %row_to_update;
756 @row_to_update{@primary_cols} = @{$where}{@primary_cols};
757 @rows = \%row_to_update;
758 } else {
759 my $cursor = $self->select ($source, \@primary_cols, $where, {});
760 @rows = map {
761 my %row; @row{@primary_cols} = @$_; \%row
762 } $cursor->all;
763 }
764
765 for my $row (@rows) {
766 $self->_insert_blobs($source, $blob_cols, $row);
767 }
768}
769
770sub _insert_blobs {
771 my ($self, $source, $blob_cols, $row) = @_;
772 my $dbh = $self->_get_dbh;
773
774 my $table = $source->name;
775
776 my %row = %$row;
9780718f 777 my @primary_cols = try
778 { $source->_pri_cols }
779 catch {
780 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
781 };
057db5ce 782
783 $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
784 if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
785
786 for my $col (keys %$blob_cols) {
787 my $blob = $blob_cols->{$col};
788
789 my %where = map { ($_, $row{$_}) } @primary_cols;
790
791 my $cursor = $self->select ($source, [$col], \%where, {});
792 $cursor->next;
793 my $sth = $cursor->sth;
794
795 if (not $sth) {
057db5ce 796 $self->throw_exception(
797 "Could not find row in table '$table' for blob update:\n"
6298a324 798 . (Dumper \%where)
057db5ce 799 );
800 }
801
9780718f 802 try {
057db5ce 803 do {
804 $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
805 } while $sth->fetch;
806
807 $sth->func('ct_prepare_send') or die $sth->errstr;
808
809 my $log_on_update = $self->_blob_log_on_update;
810 $log_on_update = 1 if not defined $log_on_update;
811
812 $sth->func('CS_SET', 1, {
813 total_txtlen => length($blob),
814 log_on_update => $log_on_update
815 }, 'ct_data_info') or die $sth->errstr;
816
817 $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
818
819 $sth->func('ct_finish_send') or die $sth->errstr;
9780718f 820 }
821 catch {
057db5ce 822 if ($self->using_freetds) {
823 $self->throw_exception (
9780718f 824 "TEXT/IMAGE operation failed, probably because you are using FreeTDS: $_"
057db5ce 825 );
9780718f 826 }
827 else {
828 $self->throw_exception($_);
057db5ce 829 }
830 }
9780718f 831 finally {
832 $sth->finish if $sth;
833 };
057db5ce 834 }
835}
836
837sub _insert_blobs_array {
838 my ($self, $source, $blob_cols, $cols, $data) = @_;
839
840 for my $i (0..$#$data) {
841 my $datum = $data->[$i];
842
843 my %row;
844 @row{ @$cols } = @$datum;
845
846 my %blob_vals;
847 for my $j (0..$#$cols) {
848 if (exists $blob_cols->[$i][$j]) {
849 $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
850 }
851 }
852
853 $self->_insert_blobs ($source, \%blob_vals, \%row);
854 }
855}
856
857=head2 connect_call_datetime_setup
858
859Used as:
860
861 on_connect_call => 'datetime_setup'
862
8384a713 863In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
057db5ce 864
865 $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
866 $dbh->do('set dateformat mdy'); # input fmt: 08/13/1979 18:08:55.080
867
868On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
869L<DateTime::Format::Sybase>, which you will need to install.
870
871This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
872C<SMALLDATETIME> columns only have minute precision.
873
874=cut
875
876{
877 my $old_dbd_warned = 0;
878
879 sub connect_call_datetime_setup {
880 my $self = shift;
881 my $dbh = $self->_get_dbh;
882
883 if ($dbh->can('syb_date_fmt')) {
884 # amazingly, this works with FreeTDS
885 $dbh->syb_date_fmt('ISO_strict');
886 } elsif (not $old_dbd_warned) {
887 carp "Your DBD::Sybase is too old to support ".
888 "DBIx::Class::InflateColumn::DateTime, please upgrade!";
889 $old_dbd_warned = 1;
890 }
891
892 $dbh->do('SET DATEFORMAT mdy');
893
894 1;
895 }
896}
897
898sub datetime_parser_type { "DateTime::Format::Sybase" }
899
900# ->begin_work and such have no effect with FreeTDS but we run them anyway to
901# let the DBD keep any state it needs to.
902#
903# If they ever do start working, the extra statements will do no harm (because
904# Sybase supports nested transactions.)
905
906sub _dbh_begin_work {
907 my $self = shift;
908
909# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
910# TRAN once. However, we need to make sure there's a $dbh.
911 return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
912
913 $self->next::method(@_);
914
915 if ($self->using_freetds) {
916 $self->_get_dbh->do('BEGIN TRAN');
917 }
918
919 $self->_began_bulk_work(1) if $self->_is_bulk_storage;
920}
921
922sub _dbh_commit {
923 my $self = shift;
924 if ($self->using_freetds) {
925 $self->_dbh->do('COMMIT');
926 }
927 return $self->next::method(@_);
928}
929
930sub _dbh_rollback {
931 my $self = shift;
932 if ($self->using_freetds) {
933 $self->_dbh->do('ROLLBACK');
934 }
935 return $self->next::method(@_);
936}
937
938# savepoint support using ASE syntax
939
940sub _svp_begin {
941 my ($self, $name) = @_;
942
943 $self->_get_dbh->do("SAVE TRANSACTION $name");
944}
945
946# A new SAVE TRANSACTION with the same name releases the previous one.
947sub _svp_release { 1 }
948
949sub _svp_rollback {
950 my ($self, $name) = @_;
951
952 $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
953}
954
9551;
956
957=head1 Schema::Loader Support
958
290da7d6 959As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
960most (if not all) versions of Sybase ASE.
057db5ce 961
962=head1 FreeTDS
963
964This driver supports L<DBD::Sybase> compiled against FreeTDS
965(L<http://www.freetds.org/>) to the best of our ability, however it is
966recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
967libraries. They are a part of the Sybase ASE distribution:
968
969The Open Client FAQ is here:
970L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
971
972Sybase ASE for Linux (which comes with the Open Client libraries) may be
973downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
974
975To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
976
977 perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
978
979Some versions of the libraries involved will not support placeholders, in which
980case the storage will be reblessed to
981L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
982
983In some configurations, placeholders will work but will throw implicit type
984conversion errors for anything that's not expecting a string. In such a case,
985the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
986automatically set, which you may enable on connection with
987L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
988for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
989definitions in your Result classes, and are mapped to a Sybase type (if it isn't
990already) using a mapping based on L<SQL::Translator>.
991
48580715 992In other configurations, placeholders will work just as they do with the Sybase
057db5ce 993Open Client libraries.
994
995Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
996
997=head1 INSERTS WITH PLACEHOLDERS
998
999With placeholders enabled, inserts are done in a transaction so that there are
1000no concurrency issues with getting the inserted identity value using
1001C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
1002mode.
1003
1004In addition, they are done on a separate connection so that it's possible to
1005have active cursors when doing an insert.
1006
1007When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
1008are disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as
1009it's a session variable.
1010
1011=head1 TRANSACTIONS
1012
48580715 1013Due to limitations of the TDS protocol, L<DBD::Sybase>, or both, you cannot
1014begin a transaction while there are active cursors, nor can you use multiple
057db5ce 1015active cursors within a transaction. An active cursor is, for example, a
1016L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
1017C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
1018
1019For example, this will not work:
1020
1021 $schema->txn_do(sub {
1022 my $rs = $schema->resultset('Book');
1023 while (my $row = $rs->next) {
1024 $schema->resultset('MetaData')->create({
1025 book_id => $row->id,
1026 ...
1027 });
1028 }
1029 });
1030
1031This won't either:
1032
1033 my $first_row = $large_rs->first;
1034 $schema->txn_do(sub { ... });
1035
1036Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
1037are not affected, as they are done on an extra database handle.
1038
1039Some workarounds:
1040
1041=over 4
1042
1043=item * use L<DBIx::Class::Storage::DBI::Replicated>
1044
1045=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
1046
1047=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
1048
1049=back
1050
1051=head1 MAXIMUM CONNECTIONS
1052
1053The TDS protocol makes separate connections to the server for active statements
1054in the background. By default the number of such connections is limited to 25,
1055on both the client side and the server side.
1056
1057This is a bit too low for a complex L<DBIx::Class> application, so on connection
1058the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
1059can override it to whatever setting you like in the DSN.
1060
1061See
1062L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
1063for information on changing the setting on the server side.
1064
1065=head1 DATES
1066
1067See L</connect_call_datetime_setup> to setup date formats
1068for L<DBIx::Class::InflateColumn::DateTime>.
1069
1070=head1 TEXT/IMAGE COLUMNS
1071
1072L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
1073C<TEXT/IMAGE> columns.
1074
1075Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
1076
1077 $schema->storage->dbh->do("SET TEXTSIZE $bytes");
1078
1079or
1080
1081 $schema->storage->set_textsize($bytes);
1082
1083instead.
1084
1085However, the C<LongReadLen> you pass in
8384a713 1086L<connect_info|DBIx::Class::Storage::DBI/connect_info> is used to execute the
1087equivalent C<SET TEXTSIZE> command on connection.
057db5ce 1088
8384a713 1089See L</connect_call_blob_setup> for a
1090L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting you need to work
1091with C<IMAGE> columns.
057db5ce 1092
1093=head1 BULK API
1094
1095The experimental L<DBD::Sybase> Bulk API support is used for
1096L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
1097on a separate connection.
1098
1099To use this feature effectively, use a large number of rows for each
1100L<populate|DBIx::Class::ResultSet/populate> call, eg.:
1101
1102 while (my $rows = $data_source->get_100_rows()) {
1103 $rs->populate($rows);
1104 }
1105
1106B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
1107calls in your C<Result> classes B<must> list columns in database order for this
1108to work. Also, you may have to unset the C<LANG> environment variable before
1109loading your app, if it doesn't match the character set of your database.
1110
1111When inserting IMAGE columns using this method, you'll need to use
1112L</connect_call_blob_setup> as well.
1113
6476fd66 1114=head1 COMPUTED COLUMNS
1115
1116If you have columns such as:
1117
1118 created_dtm AS getdate()
1119
1120represent them in your Result classes as:
1121
1122 created_dtm => {
1123 data_type => undef,
1124 default_value => \'getdate()',
1125 is_nullable => 0,
1126 }
1127
1128The C<data_type> must exist and must be C<undef>. Then empty inserts will work
1129on tables with such columns.
1130
1131=head1 TIMESTAMP COLUMNS
1132
1133C<timestamp> columns in Sybase ASE are not really timestamps, see:
1134L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
1135
1136They should be defined in your Result classes as:
1137
1138 ts => {
1139 data_type => 'timestamp',
1140 is_nullable => 0,
1141 inflate_datetime => 0,
1142 }
1143
1144The C<<inflate_datetime => 0>> is necessary if you use
1145L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
1146be able to read these values.
1147
1148The values will come back as hexadecimal.
1149
057db5ce 1150=head1 TODO
1151
1152=over
1153
1154=item *
1155
1156Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
1157any active cursors, using eager cursors.
1158
1159=item *
1160
1161Real limits and limited counts using stored procedures deployed on startup.
1162
1163=item *
1164
d5dedbd6 1165Adaptive Server Anywhere (ASA) support
057db5ce 1166
1167=item *
1168
1169Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
1170
1171=item *
1172
1173bulk_insert using prepare_cached (see comments.)
1174
1175=back
1176
1177=head1 AUTHOR
1178
1179See L<DBIx::Class/CONTRIBUTORS>.
1180
1181=head1 LICENSE
1182
1183You may distribute this code under the same terms as Perl itself.
1184
1185=cut
1186# vim:sts=2 sw=2: