merge and improve _is_lob_type from Sybase::ASE into Storage::DBI
[dbsrgits/DBIx-Class.git] / lib / DBIx / Class / Storage / DBI / Sybase / ASE.pm
CommitLineData
057db5ce 1package DBIx::Class::Storage::DBI::Sybase::ASE;
2
3use strict;
4use warnings;
5
6use base qw/
7 DBIx::Class::Storage::DBI::Sybase
8 DBIx::Class::Storage::DBI::AutoCast
9/;
10use mro 'c3';
11use Carp::Clan qw/^DBIx::Class/;
6298a324 12use Scalar::Util 'blessed';
13use List::Util 'first';
057db5ce 14use Sub::Name();
6298a324 15use Data::Dumper::Concise 'Dumper';
ed7ab0f4 16use Try::Tiny;
fd323bf1 17use namespace::clean;
057db5ce 18
6a247f33 19__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ');
20
057db5ce 21__PACKAGE__->mk_group_accessors('simple' =>
22 qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
23 _bulk_storage _is_bulk_storage _began_bulk_work
24 _bulk_disabled_due_to_coderef_connect_info_warned
25 _identity_method/
26);
27
28my @also_proxy_to_extra_storages = qw/
29 connect_call_set_auto_cast auto_cast connect_call_blob_setup
30 connect_call_datetime_setup
31
32 disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
33 auto_savepoint unsafe cursor_class debug debugobj schema
34/;
35
36=head1 NAME
37
38DBIx::Class::Storage::DBI::Sybase::ASE - Sybase ASE SQL Server support for
39DBIx::Class
40
41=head1 SYNOPSIS
42
43This subclass supports L<DBD::Sybase> for real (non-Microsoft) Sybase databases.
44
45=head1 DESCRIPTION
46
47If your version of Sybase does not support placeholders, then your storage will
48be reblessed to L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
49You can also enable that driver explicitly, see the documentation for more
50details.
51
52With this driver there is unfortunately no way to get the C<last_insert_id>
53without doing a C<SELECT MAX(col)>. This is done safely in a transaction
54(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
55
8384a713 56A recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting:
057db5ce 57
58 on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
59
60=head1 METHODS
61
62=cut
63
64sub _rebless {
65 my $self = shift;
66
67 my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
68
69 if ($self->using_freetds) {
70 carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
71
72You are using FreeTDS with Sybase.
73
74We will do our best to support this configuration, but please consider this
75support experimental.
76
77TEXT/IMAGE columns will definitely not work.
78
79You are encouraged to recompile DBD::Sybase with the Sybase Open Client libraries
80instead.
81
82See perldoc DBIx::Class::Storage::DBI::Sybase::ASE for more details.
83
84To turn off this warning set the DBIC_SYBASE_FREETDS_NOWARN environment
85variable.
86EOF
87
bbdda281 88 if (not $self->_use_typeless_placeholders) {
89 if ($self->_use_placeholders) {
057db5ce 90 $self->auto_cast(1);
91 }
92 else {
93 $self->ensure_class_loaded($no_bind_vars);
94 bless $self, $no_bind_vars;
95 $self->_rebless;
96 }
97 }
98 }
99
100 elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
101 # not necessarily FreeTDS, but no placeholders nevertheless
102 $self->ensure_class_loaded($no_bind_vars);
103 bless $self, $no_bind_vars;
104 $self->_rebless;
105 }
106 # this is highly unlikely, but we check just in case
bbdda281 107 elsif (not $self->_use_typeless_placeholders) {
057db5ce 108 $self->auto_cast(1);
109 }
110}
111
112sub _init {
113 my $self = shift;
114 $self->_set_max_connect(256);
115
116# create storage for insert/(update blob) transactions,
117# unless this is that storage
118 return if $self->_is_extra_storage;
119
120 my $writer_storage = (ref $self)->new;
121
122 $writer_storage->_is_extra_storage(1);
123 $writer_storage->connect_info($self->connect_info);
124 $writer_storage->auto_cast($self->auto_cast);
125
126 $self->_writer_storage($writer_storage);
127
128# create a bulk storage unless connect_info is a coderef
129 return if ref($self->_dbi_connect_info->[0]) eq 'CODE';
130
131 my $bulk_storage = (ref $self)->new;
132
133 $bulk_storage->_is_extra_storage(1);
134 $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
135 $bulk_storage->connect_info($self->connect_info);
136
137# this is why
138 $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
139
140 $self->_bulk_storage($bulk_storage);
141}
142
143for my $method (@also_proxy_to_extra_storages) {
144 no strict 'refs';
145 no warnings 'redefine';
146
147 my $replaced = __PACKAGE__->can($method);
148
149 *{$method} = Sub::Name::subname $method => sub {
150 my $self = shift;
151 $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
152 $self->_bulk_storage->$replaced(@_) if $self->_bulk_storage;
153 return $self->$replaced(@_);
154 };
155}
156
157sub disconnect {
158 my $self = shift;
159
160# Even though we call $sth->finish for uses off the bulk API, there's still an
161# "active statement" warning on disconnect, which we throw away here.
162# This is due to the bug described in insert_bulk.
163# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
164 local $SIG{__WARN__} = sub {
165 warn $_[0] unless $_[0] =~ /active statement/i;
166 } if $self->_is_bulk_storage;
167
168# so that next transaction gets a dbh
169 $self->_began_bulk_work(0) if $self->_is_bulk_storage;
170
171 $self->next::method;
172}
173
174# Set up session settings for Sybase databases for the connection.
175#
176# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
177# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
178# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
179# only want when AutoCommit is off.
180#
181# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work.
182sub _run_connection_actions {
183 my $self = shift;
184
185 if ($self->_is_bulk_storage) {
186# this should be cleared on every reconnect
187 $self->_began_bulk_work(0);
188 return;
189 }
190
191 if (not $self->using_freetds) {
192 $self->_dbh->{syb_chained_txn} = 1;
193 } else {
194 # based on LongReadLen in connect_info
195 $self->set_textsize;
196
197 if ($self->_dbh_autocommit) {
198 $self->_dbh->do('SET CHAINED OFF');
199 } else {
200 $self->_dbh->do('SET CHAINED ON');
201 }
202 }
203
204 $self->next::method(@_);
205}
206
207=head2 connect_call_blob_setup
208
209Used as:
210
211 on_connect_call => [ [ 'blob_setup', log_on_update => 0 ] ]
212
213Does C<< $dbh->{syb_binary_images} = 1; >> to return C<IMAGE> data as raw binary
214instead of as a hex string.
215
216Recommended.
217
218Also sets the C<log_on_update> value for blob write operations. The default is
219C<1>, but C<0> is better if your database is configured for it.
220
221See
222L<DBD::Sybase/Handling_IMAGE/TEXT_data_with_syb_ct_get_data()/syb_ct_send_data()>.
223
224=cut
225
226sub connect_call_blob_setup {
227 my $self = shift;
228 my %args = @_;
229 my $dbh = $self->_dbh;
230 $dbh->{syb_binary_images} = 1;
231
232 $self->_blob_log_on_update($args{log_on_update})
233 if exists $args{log_on_update};
234}
235
057db5ce 236sub _is_lob_column {
237 my ($self, $source, $column) = @_;
238
239 return $self->_is_lob_type($source->column_info($column)->{data_type});
240}
241
242sub _prep_for_execute {
243 my $self = shift;
244 my ($op, $extra_bind, $ident, $args) = @_;
245
246 my ($sql, $bind) = $self->next::method (@_);
247
6298a324 248 my $table = blessed $ident ? $ident->from : $ident;
057db5ce 249
250 my $bind_info = $self->_resolve_column_info(
251 $ident, [map $_->[0], @{$bind}]
252 );
6298a324 253 my $bound_identity_col =
254 first { $bind_info->{$_}{is_auto_increment} }
255 keys %$bind_info
057db5ce 256 ;
e366f807 257
258 my $columns_info = blessed $ident && $ident->columns_info;
259
6298a324 260 my $identity_col =
e366f807 261 $columns_info &&
262 first { $columns_info->{$_}{is_auto_increment} }
263 keys %$columns_info
057db5ce 264 ;
265
266 if (($op eq 'insert' && $bound_identity_col) ||
267 ($op eq 'update' && exists $args->[0]{$identity_col})) {
268 $sql = join ("\n",
269 $self->_set_table_identity_sql($op => $table, 'on'),
270 $sql,
271 $self->_set_table_identity_sql($op => $table, 'off'),
272 );
273 }
274
275 if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
276 (not $self->{insert_bulk})) {
277 $sql =
278 "$sql\n" .
279 $self->_fetch_identity_sql($ident, $identity_col);
280 }
281
282 return ($sql, $bind);
283}
284
285sub _set_table_identity_sql {
286 my ($self, $op, $table, $on_off) = @_;
287
288 return sprintf 'SET IDENTITY_%s %s %s',
289 uc($op), $self->sql_maker->_quote($table), uc($on_off);
290}
291
292# Stolen from SQLT, with some modifications. This is a makeshift
293# solution before a sane type-mapping library is available, thus
294# the 'our' for easy overrides.
295our %TYPE_MAPPING = (
296 number => 'numeric',
297 money => 'money',
298 varchar => 'varchar',
299 varchar2 => 'varchar',
300 timestamp => 'datetime',
301 text => 'varchar',
302 real => 'double precision',
303 comment => 'text',
304 bit => 'bit',
305 tinyint => 'smallint',
306 float => 'double precision',
307 serial => 'numeric',
308 bigserial => 'numeric',
309 boolean => 'varchar',
310 long => 'varchar',
311);
312
313sub _native_data_type {
314 my ($self, $type) = @_;
315
316 $type = lc $type;
317 $type =~ s/\s* identity//x;
318
319 return uc($TYPE_MAPPING{$type} || $type);
320}
321
322sub _fetch_identity_sql {
323 my ($self, $source, $col) = @_;
324
325 return sprintf ("SELECT MAX(%s) FROM %s",
326 map { $self->sql_maker->_quote ($_) } ($col, $source->from)
327 );
328}
329
330sub _execute {
331 my $self = shift;
332 my ($op) = @_;
333
334 my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
335
336 if ($op eq 'insert') {
337 $self->_identity($sth->fetchrow_array);
338 $sth->finish;
339 }
340
341 return wantarray ? ($rv, $sth, @bind) : $rv;
342}
343
344sub last_insert_id { shift->_identity }
345
346# handles TEXT/IMAGE and transaction for last_insert_id
347sub insert {
348 my $self = shift;
349 my ($source, $to_insert) = @_;
350
e366f807 351 my $columns_info = $source->columns_info;
352
6298a324 353 my $identity_col =
e366f807 354 (first { $columns_info->{$_}{is_auto_increment} }
355 keys %$columns_info )
6298a324 356 || '';
057db5ce 357
358 # check for empty insert
359 # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
6469dabf 360 # try to insert explicit 'DEFAULT's instead (except for identity, timestamp
361 # and computed columns)
057db5ce 362 if (not %$to_insert) {
363 for my $col ($source->columns) {
364 next if $col eq $identity_col;
6469dabf 365
366 my $info = $source->column_info($col);
367
368 next if ref $info->{default_value} eq 'SCALAR'
369 || (exists $info->{data_type} && (not defined $info->{data_type}));
370
371 next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i;
372
057db5ce 373 $to_insert->{$col} = \'DEFAULT';
374 }
375 }
376
377 my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
378
379 # do we need the horrific SELECT MAX(COL) hack?
380 my $dumb_last_insert_id =
381 $identity_col
382 && (not exists $to_insert->{$identity_col})
383 && ($self->_identity_method||'') ne '@@IDENTITY';
384
385 my $next = $self->next::can;
386
387 # we are already in a transaction, or there are no blobs
388 # and we don't need the PK - just (try to) do it
389 if ($self->{transaction_depth}
390 || (!$blob_cols && !$dumb_last_insert_id)
391 ) {
392 return $self->_insert (
393 $next, $source, $to_insert, $blob_cols, $identity_col
394 );
395 }
396
397 # otherwise use the _writer_storage to do the insert+transaction on another
398 # connection
399 my $guard = $self->_writer_storage->txn_scope_guard;
400
401 my $updated_cols = $self->_writer_storage->_insert (
402 $next, $source, $to_insert, $blob_cols, $identity_col
403 );
404
405 $self->_identity($self->_writer_storage->_identity);
406
407 $guard->commit;
408
409 return $updated_cols;
410}
411
412sub _insert {
413 my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
414
415 my $updated_cols = $self->$next ($source, $to_insert);
416
417 my $final_row = {
418 ($identity_col ?
419 ($identity_col => $self->last_insert_id($source, $identity_col)) : ()),
420 %$to_insert,
421 %$updated_cols,
422 };
423
424 $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
425
426 return $updated_cols;
427}
428
429sub update {
430 my $self = shift;
431 my ($source, $fields, $where, @rest) = @_;
432
057db5ce 433 my $blob_cols = $self->_remove_blob_cols($source, $fields);
434
435 my $table = $source->name;
436
e366f807 437 my $columns_info = $source->columns_info;
438
6298a324 439 my $identity_col =
e366f807 440 first { $columns_info->{$_}{is_auto_increment} }
441 keys %$columns_info;
057db5ce 442
443 my $is_identity_update = $identity_col && defined $fields->{$identity_col};
444
445 return $self->next::method(@_) unless $blob_cols;
446
447# If there are any blobs in $where, Sybase will return a descriptive error
448# message.
449# XXX blobs can still be used with a LIKE query, and this should be handled.
450
451# update+blob update(s) done atomically on separate connection
452 $self = $self->_writer_storage;
453
454 my $guard = $self->txn_scope_guard;
455
456# First update the blob columns to be updated to '' (taken from $fields, where
457# it is originally put by _remove_blob_cols .)
458 my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
459
460# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
461
462 $self->next::method($source, \%blobs_to_empty, $where, @rest);
463
464# Now update the blobs before the other columns in case the update of other
465# columns makes the search condition invalid.
466 $self->_update_blobs($source, $blob_cols, $where);
467
468 my @res;
469 if (%$fields) {
cca282b6 470 if (wantarray) {
057db5ce 471 @res = $self->next::method(@_);
472 }
cca282b6 473 elsif (defined wantarray) {
057db5ce 474 $res[0] = $self->next::method(@_);
475 }
476 else {
477 $self->next::method(@_);
478 }
479 }
480
481 $guard->commit;
482
cca282b6 483 return wantarray ? @res : $res[0];
057db5ce 484}
485
486sub insert_bulk {
487 my $self = shift;
488 my ($source, $cols, $data) = @_;
489
e366f807 490 my $columns_info = $source->columns_info;
491
6298a324 492 my $identity_col =
e366f807 493 first { $columns_info->{$_}{is_auto_increment} }
494 keys %$columns_info;
057db5ce 495
6298a324 496 my $is_identity_insert = (first { $_ eq $identity_col } @{$cols}) ? 1 : 0;
057db5ce 497
498 my @source_columns = $source->columns;
499
500 my $use_bulk_api =
501 $self->_bulk_storage &&
502 $self->_get_dbh->{syb_has_blk};
503
504 if ((not $use_bulk_api)
505 &&
506 (ref($self->_dbi_connect_info->[0]) eq 'CODE')
507 &&
508 (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
509 carp <<'EOF';
510Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
511regular array inserts.
512EOF
513 $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
514 }
515
516 if (not $use_bulk_api) {
517 my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
518
519# _execute_array uses a txn anyway, but it ends too early in case we need to
520# select max(col) to get the identity for inserting blobs.
521 ($self, my $guard) = $self->{transaction_depth} == 0 ?
522 ($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
523 :
524 ($self, undef);
525
526 local $self->{insert_bulk} = 1;
527
528 $self->next::method(@_);
529
530 if ($blob_cols) {
531 if ($is_identity_insert) {
532 $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
533 }
534 else {
535 my @cols_with_identities = (@$cols, $identity_col);
536
537 ## calculate identities
538 # XXX This assumes identities always increase by 1, which may or may not
539 # be true.
540 my ($last_identity) =
541 $self->_dbh->selectrow_array (
542 $self->_fetch_identity_sql($source, $identity_col)
543 );
544 my @identities = (($last_identity - @$data + 1) .. $last_identity);
545
546 my @data_with_identities = map [@$_, shift @identities], @$data;
547
548 $self->_insert_blobs_array (
549 $source, $blob_cols, \@cols_with_identities, \@data_with_identities
550 );
551 }
552 }
553
554 $guard->commit if $guard;
555
556 return;
557 }
558
559# otherwise, use the bulk API
560
561# rearrange @$data so that columns are in database order
562 my %orig_idx;
563 @orig_idx{@$cols} = 0..$#$cols;
564
565 my %new_idx;
566 @new_idx{@source_columns} = 0..$#source_columns;
567
568 my @new_data;
569 for my $datum (@$data) {
570 my $new_datum = [];
571 for my $col (@source_columns) {
572# identity data will be 'undef' if not $is_identity_insert
573# columns with defaults will also be 'undef'
574 $new_datum->[ $new_idx{$col} ] =
575 exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
576 }
577 push @new_data, $new_datum;
578 }
579
580# bcp identity index is 1-based
581 my $identity_idx = exists $new_idx{$identity_col} ?
582 $new_idx{$identity_col} + 1 : 0;
583
584## Set a client-side conversion error handler, straight from DBD::Sybase docs.
585# This ignores any data conversion errors detected by the client side libs, as
586# they are usually harmless.
587 my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
588 Sub::Name::subname insert_bulk => sub {
589 my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
590
591 return 1 if $errno == 36;
592
593 carp
594 "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
595 ($errmsg ? "\n$errmsg" : '') .
596 ($osmsg ? "\n$osmsg" : '') .
597 ($blkmsg ? "\n$blkmsg" : '');
598
599 return 0;
600 });
601
4edfce2f 602 my $exception = '';
ed7ab0f4 603 try {
057db5ce 604 my $bulk = $self->_bulk_storage;
605
606 my $guard = $bulk->txn_scope_guard;
607
608## XXX get this to work instead of our own $sth
609## will require SQLA or *Hacks changes for ordered columns
610# $bulk->next::method($source, \@source_columns, \@new_data, {
611# syb_bcp_attribs => {
612# identity_flag => $is_identity_insert,
613# identity_column => $identity_idx,
614# }
615# });
616 my $sql = 'INSERT INTO ' .
617 $bulk->sql_maker->_quote($source->name) . ' (' .
618# colname list is ignored for BCP, but does no harm
619 (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
620 ' VALUES ('. (join ', ', ('?') x @source_columns) . ')';
621
622## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
623## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
624## better yet the version above. Should be fixed in DBD::Sybase .
625 my $sth = $bulk->_get_dbh->prepare($sql,
626# 'insert', # op
627 {
628 syb_bcp_attribs => {
629 identity_flag => $is_identity_insert,
630 identity_column => $identity_idx,
631 }
632 }
633 );
634
635 my @bind = do {
636 my $idx = 0;
637 map [ $_, $idx++ ], @source_columns;
638 };
639
640 $self->_execute_array(
641 $source, $sth, \@bind, \@source_columns, \@new_data, sub {
642 $guard->commit
643 }
644 );
645
646 $bulk->_query_end($sql);
ed7ab0f4 647 } catch {
648 $exception = shift;
057db5ce 649 };
650
057db5ce 651 DBD::Sybase::set_cslib_cb($orig_cslib_cb);
652
653 if ($exception =~ /-Y option/) {
f32e99f9 654 my $w = 'Sybase bulk API operation failed due to character set incompatibility, '
655 . 'reverting to regular array inserts. Try unsetting the LANG environment variable'
656 ;
657 $w .= "\n$exception" if $self->debug;
658 carp $w;
057db5ce 659
057db5ce 660 $self->_bulk_storage(undef);
661 unshift @_, $self;
662 goto \&insert_bulk;
663 }
664 elsif ($exception) {
665# rollback makes the bulkLogin connection unusable
666 $self->_bulk_storage->disconnect;
667 $self->throw_exception($exception);
668 }
669}
670
671sub _dbh_execute_array {
672 my ($self, $sth, $tuple_status, $cb) = @_;
673
674 my $rv = $self->next::method($sth, $tuple_status);
675 $cb->() if $cb;
676
677 return $rv;
678}
679
680# Make sure blobs are not bound as placeholders, and return any non-empty ones
681# as a hash.
682sub _remove_blob_cols {
683 my ($self, $source, $fields) = @_;
684
685 my %blob_cols;
686
687 for my $col (keys %$fields) {
688 if ($self->_is_lob_column($source, $col)) {
689 my $blob_val = delete $fields->{$col};
690 if (not defined $blob_val) {
691 $fields->{$col} = \'NULL';
692 }
693 else {
694 $fields->{$col} = \"''";
695 $blob_cols{$col} = $blob_val unless $blob_val eq '';
696 }
697 }
698 }
699
700 return %blob_cols ? \%blob_cols : undef;
701}
702
703# same for insert_bulk
704sub _remove_blob_cols_array {
705 my ($self, $source, $cols, $data) = @_;
706
707 my @blob_cols;
708
709 for my $i (0..$#$cols) {
710 my $col = $cols->[$i];
711
712 if ($self->_is_lob_column($source, $col)) {
713 for my $j (0..$#$data) {
714 my $blob_val = delete $data->[$j][$i];
715 if (not defined $blob_val) {
716 $data->[$j][$i] = \'NULL';
717 }
718 else {
719 $data->[$j][$i] = \"''";
720 $blob_cols[$j][$i] = $blob_val
721 unless $blob_val eq '';
722 }
723 }
724 }
725 }
726
727 return @blob_cols ? \@blob_cols : undef;
728}
729
730sub _update_blobs {
731 my ($self, $source, $blob_cols, $where) = @_;
732
9780718f 733 my @primary_cols = try
734 { $source->_pri_cols }
735 catch {
736 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
737 };
057db5ce 738
739# check if we're updating a single row by PK
740 my $pk_cols_in_where = 0;
741 for my $col (@primary_cols) {
742 $pk_cols_in_where++ if defined $where->{$col};
743 }
744 my @rows;
745
746 if ($pk_cols_in_where == @primary_cols) {
747 my %row_to_update;
748 @row_to_update{@primary_cols} = @{$where}{@primary_cols};
749 @rows = \%row_to_update;
750 } else {
751 my $cursor = $self->select ($source, \@primary_cols, $where, {});
752 @rows = map {
753 my %row; @row{@primary_cols} = @$_; \%row
754 } $cursor->all;
755 }
756
757 for my $row (@rows) {
758 $self->_insert_blobs($source, $blob_cols, $row);
759 }
760}
761
762sub _insert_blobs {
763 my ($self, $source, $blob_cols, $row) = @_;
764 my $dbh = $self->_get_dbh;
765
766 my $table = $source->name;
767
768 my %row = %$row;
9780718f 769 my @primary_cols = try
770 { $source->_pri_cols }
771 catch {
772 $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
773 };
057db5ce 774
775 $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
776 if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
777
778 for my $col (keys %$blob_cols) {
779 my $blob = $blob_cols->{$col};
780
781 my %where = map { ($_, $row{$_}) } @primary_cols;
782
783 my $cursor = $self->select ($source, [$col], \%where, {});
784 $cursor->next;
785 my $sth = $cursor->sth;
786
787 if (not $sth) {
057db5ce 788 $self->throw_exception(
789 "Could not find row in table '$table' for blob update:\n"
6298a324 790 . (Dumper \%where)
057db5ce 791 );
792 }
793
9780718f 794 try {
057db5ce 795 do {
796 $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
797 } while $sth->fetch;
798
799 $sth->func('ct_prepare_send') or die $sth->errstr;
800
801 my $log_on_update = $self->_blob_log_on_update;
802 $log_on_update = 1 if not defined $log_on_update;
803
804 $sth->func('CS_SET', 1, {
805 total_txtlen => length($blob),
806 log_on_update => $log_on_update
807 }, 'ct_data_info') or die $sth->errstr;
808
809 $sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
810
811 $sth->func('ct_finish_send') or die $sth->errstr;
9780718f 812 }
813 catch {
057db5ce 814 if ($self->using_freetds) {
815 $self->throw_exception (
9780718f 816 "TEXT/IMAGE operation failed, probably because you are using FreeTDS: $_"
057db5ce 817 );
9780718f 818 }
819 else {
820 $self->throw_exception($_);
057db5ce 821 }
822 }
9780718f 823 finally {
824 $sth->finish if $sth;
825 };
057db5ce 826 }
827}
828
829sub _insert_blobs_array {
830 my ($self, $source, $blob_cols, $cols, $data) = @_;
831
832 for my $i (0..$#$data) {
833 my $datum = $data->[$i];
834
835 my %row;
836 @row{ @$cols } = @$datum;
837
838 my %blob_vals;
839 for my $j (0..$#$cols) {
840 if (exists $blob_cols->[$i][$j]) {
841 $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
842 }
843 }
844
845 $self->_insert_blobs ($source, \%blob_vals, \%row);
846 }
847}
848
849=head2 connect_call_datetime_setup
850
851Used as:
852
853 on_connect_call => 'datetime_setup'
854
8384a713 855In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
057db5ce 856
857 $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
858 $dbh->do('set dateformat mdy'); # input fmt: 08/13/1979 18:08:55.080
859
860On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
861L<DateTime::Format::Sybase>, which you will need to install.
862
863This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
864C<SMALLDATETIME> columns only have minute precision.
865
866=cut
867
868{
869 my $old_dbd_warned = 0;
870
871 sub connect_call_datetime_setup {
872 my $self = shift;
873 my $dbh = $self->_get_dbh;
874
875 if ($dbh->can('syb_date_fmt')) {
876 # amazingly, this works with FreeTDS
877 $dbh->syb_date_fmt('ISO_strict');
878 } elsif (not $old_dbd_warned) {
879 carp "Your DBD::Sybase is too old to support ".
880 "DBIx::Class::InflateColumn::DateTime, please upgrade!";
881 $old_dbd_warned = 1;
882 }
883
884 $dbh->do('SET DATEFORMAT mdy');
885
886 1;
887 }
888}
889
890sub datetime_parser_type { "DateTime::Format::Sybase" }
891
892# ->begin_work and such have no effect with FreeTDS but we run them anyway to
893# let the DBD keep any state it needs to.
894#
895# If they ever do start working, the extra statements will do no harm (because
896# Sybase supports nested transactions.)
897
898sub _dbh_begin_work {
899 my $self = shift;
900
901# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
902# TRAN once. However, we need to make sure there's a $dbh.
903 return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
904
905 $self->next::method(@_);
906
907 if ($self->using_freetds) {
908 $self->_get_dbh->do('BEGIN TRAN');
909 }
910
911 $self->_began_bulk_work(1) if $self->_is_bulk_storage;
912}
913
914sub _dbh_commit {
915 my $self = shift;
916 if ($self->using_freetds) {
917 $self->_dbh->do('COMMIT');
918 }
919 return $self->next::method(@_);
920}
921
922sub _dbh_rollback {
923 my $self = shift;
924 if ($self->using_freetds) {
925 $self->_dbh->do('ROLLBACK');
926 }
927 return $self->next::method(@_);
928}
929
930# savepoint support using ASE syntax
931
932sub _svp_begin {
933 my ($self, $name) = @_;
934
935 $self->_get_dbh->do("SAVE TRANSACTION $name");
936}
937
938# A new SAVE TRANSACTION with the same name releases the previous one.
939sub _svp_release { 1 }
940
941sub _svp_rollback {
942 my ($self, $name) = @_;
943
944 $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
945}
946
9471;
948
949=head1 Schema::Loader Support
950
290da7d6 951As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
952most (if not all) versions of Sybase ASE.
057db5ce 953
954=head1 FreeTDS
955
956This driver supports L<DBD::Sybase> compiled against FreeTDS
957(L<http://www.freetds.org/>) to the best of our ability, however it is
958recommended that you recompile L<DBD::Sybase> against the Sybase Open Client
959libraries. They are a part of the Sybase ASE distribution:
960
961The Open Client FAQ is here:
962L<http://www.isug.com/Sybase_FAQ/ASE/section7.html>.
963
964Sybase ASE for Linux (which comes with the Open Client libraries) may be
965downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
966
967To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
968
969 perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
970
971Some versions of the libraries involved will not support placeholders, in which
972case the storage will be reblessed to
973L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
974
975In some configurations, placeholders will work but will throw implicit type
976conversion errors for anything that's not expecting a string. In such a case,
977the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
978automatically set, which you may enable on connection with
979L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
980for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
981definitions in your Result classes, and are mapped to a Sybase type (if it isn't
982already) using a mapping based on L<SQL::Translator>.
983
48580715 984In other configurations, placeholders will work just as they do with the Sybase
057db5ce 985Open Client libraries.
986
987Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
988
989=head1 INSERTS WITH PLACEHOLDERS
990
991With placeholders enabled, inserts are done in a transaction so that there are
992no concurrency issues with getting the inserted identity value using
993C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
994mode.
995
996In addition, they are done on a separate connection so that it's possible to
997have active cursors when doing an insert.
998
999When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
1000are disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as
1001it's a session variable.
1002
1003=head1 TRANSACTIONS
1004
48580715 1005Due to limitations of the TDS protocol, L<DBD::Sybase>, or both, you cannot
1006begin a transaction while there are active cursors, nor can you use multiple
057db5ce 1007active cursors within a transaction. An active cursor is, for example, a
1008L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
1009C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
1010
1011For example, this will not work:
1012
1013 $schema->txn_do(sub {
1014 my $rs = $schema->resultset('Book');
1015 while (my $row = $rs->next) {
1016 $schema->resultset('MetaData')->create({
1017 book_id => $row->id,
1018 ...
1019 });
1020 }
1021 });
1022
1023This won't either:
1024
1025 my $first_row = $large_rs->first;
1026 $schema->txn_do(sub { ... });
1027
1028Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
1029are not affected, as they are done on an extra database handle.
1030
1031Some workarounds:
1032
1033=over 4
1034
1035=item * use L<DBIx::Class::Storage::DBI::Replicated>
1036
1037=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
1038
1039=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
1040
1041=back
1042
1043=head1 MAXIMUM CONNECTIONS
1044
1045The TDS protocol makes separate connections to the server for active statements
1046in the background. By default the number of such connections is limited to 25,
1047on both the client side and the server side.
1048
1049This is a bit too low for a complex L<DBIx::Class> application, so on connection
1050the client side setting is set to C<256> (see L<DBD::Sybase/maxConnect>.) You
1051can override it to whatever setting you like in the DSN.
1052
1053See
1054L<http://infocenter.sybase.com/help/index.jsp?topic=/com.sybase.help.ase_15.0.sag1/html/sag1/sag1272.htm>
1055for information on changing the setting on the server side.
1056
1057=head1 DATES
1058
1059See L</connect_call_datetime_setup> to setup date formats
1060for L<DBIx::Class::InflateColumn::DateTime>.
1061
1062=head1 TEXT/IMAGE COLUMNS
1063
1064L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
1065C<TEXT/IMAGE> columns.
1066
1067Setting C<< $dbh->{LongReadLen} >> will also not work with FreeTDS use either:
1068
1069 $schema->storage->dbh->do("SET TEXTSIZE $bytes");
1070
1071or
1072
1073 $schema->storage->set_textsize($bytes);
1074
1075instead.
1076
1077However, the C<LongReadLen> you pass in
8384a713 1078L<connect_info|DBIx::Class::Storage::DBI/connect_info> is used to execute the
1079equivalent C<SET TEXTSIZE> command on connection.
057db5ce 1080
8384a713 1081See L</connect_call_blob_setup> for a
1082L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting you need to work
1083with C<IMAGE> columns.
057db5ce 1084
1085=head1 BULK API
1086
1087The experimental L<DBD::Sybase> Bulk API support is used for
1088L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
1089on a separate connection.
1090
1091To use this feature effectively, use a large number of rows for each
1092L<populate|DBIx::Class::ResultSet/populate> call, eg.:
1093
1094 while (my $rows = $data_source->get_100_rows()) {
1095 $rs->populate($rows);
1096 }
1097
1098B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
1099calls in your C<Result> classes B<must> list columns in database order for this
1100to work. Also, you may have to unset the C<LANG> environment variable before
1101loading your app, if it doesn't match the character set of your database.
1102
1103When inserting IMAGE columns using this method, you'll need to use
1104L</connect_call_blob_setup> as well.
1105
6476fd66 1106=head1 COMPUTED COLUMNS
1107
1108If you have columns such as:
1109
1110 created_dtm AS getdate()
1111
1112represent them in your Result classes as:
1113
1114 created_dtm => {
1115 data_type => undef,
1116 default_value => \'getdate()',
1117 is_nullable => 0,
1118 }
1119
1120The C<data_type> must exist and must be C<undef>. Then empty inserts will work
1121on tables with such columns.
1122
1123=head1 TIMESTAMP COLUMNS
1124
1125C<timestamp> columns in Sybase ASE are not really timestamps, see:
1126L<http://dba.fyicenter.com/Interview-Questions/SYBASE/The_timestamp_datatype_in_Sybase_.html>.
1127
1128They should be defined in your Result classes as:
1129
1130 ts => {
1131 data_type => 'timestamp',
1132 is_nullable => 0,
1133 inflate_datetime => 0,
1134 }
1135
1136The C<<inflate_datetime => 0>> is necessary if you use
1137L<DBIx::Class::InflateColumn::DateTime>, and most people do, and still want to
1138be able to read these values.
1139
1140The values will come back as hexadecimal.
1141
057db5ce 1142=head1 TODO
1143
1144=over
1145
1146=item *
1147
1148Transitions to AutoCommit=0 (starting a transaction) mode by exhausting
1149any active cursors, using eager cursors.
1150
1151=item *
1152
1153Real limits and limited counts using stored procedures deployed on startup.
1154
1155=item *
1156
d5dedbd6 1157Adaptive Server Anywhere (ASA) support
057db5ce 1158
1159=item *
1160
1161Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
1162
1163=item *
1164
1165bulk_insert using prepare_cached (see comments.)
1166
1167=back
1168
1169=head1 AUTHOR
1170
1171See L<DBIx::Class/CONTRIBUTORS>.
1172
1173=head1 LICENSE
1174
1175You may distribute this code under the same terms as Perl itself.
1176
1177=cut
1178# vim:sts=2 sw=2: