X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI%2FSybase.pm;h=eeb4f01edf2a3806495f10925641a7d166d036b6;hb=6862815907d42aa2f5fcc70afd4ff2dbc9b48f79;hp=6be29436bfb803cb8c95126318e9e948a492e1a8;hpb=166c656193b56e08b472c675e92b9076aac03a53;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI/Sybase.pm b/lib/DBIx/Class/Storage/DBI/Sybase.pm index 6be2943..eeb4f01 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase.pm @@ -9,12 +9,25 @@ use base qw/ /; use mro 'c3'; use Carp::Clan qw/^DBIx::Class/; -use List::Util (); +use List::Util(); +use Sub::Name(); +use Data::Dumper::Concise(); __PACKAGE__->mk_group_accessors('simple' => - qw/_identity _blob_log_on_update insert_txn _extra_dbh/ + qw/_identity _blob_log_on_update _writer_storage _is_extra_storage + _bulk_storage _is_bulk_storage _began_bulk_work + _bulk_disabled_due_to_coderef_connect_info_warned + _identity_method/ ); +my @also_proxy_to_extra_storages = qw/ + connect_call_set_auto_cast auto_cast connect_call_blob_setup + connect_call_datetime_setup + + disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching + auto_savepoint unsafe cursor_class debug debugobj schema +/; + =head1 NAME DBIx::Class::Storage::DBI::Sybase - Sybase support for DBIx::Class @@ -33,11 +46,7 @@ also enable that driver explicitly, see the documentation for more details. With this driver there is unfortunately no way to get the C without doing a C when placeholders are enabled. - -When using C transactions are -disabled. - -To turn off transactions for inserts (for an application that doesn't need -concurrency, or a loader, for example) use this setting in -L, - - on_connect_call => ['unsafe_insert'] - -To manipulate this setting at runtime, use: - - $schema->storage->insert_txn(0); # 1 to re-enable - -=cut - -sub connect_call_unsafe_insert { - my $self = shift; - $self->insert_txn(0); -} - sub _is_lob_type { my $self = shift; my $type = shift; $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i; } +sub _is_lob_column { + my ($self, $source, $column) = @_; + + return $self->_is_lob_type($source->column_info($column)->{data_type}); +} + sub _prep_for_execute { my $self = shift; my ($op, $extra_bind, $ident, $args) = @_; my ($sql, $bind) = $self->next::method (@_); - if ($op eq 'insert') { - my $table = $ident->from; - - my $bind_info = $self->_resolve_column_info( - $ident, [map $_->[0], @{$bind}] + my $table = Scalar::Util::blessed($ident) ? $ident->from : $ident; + + my $bind_info = $self->_resolve_column_info( + $ident, [map $_->[0], @{$bind}] + ); + my $bound_identity_col = List::Util::first + { $bind_info->{$_}{is_auto_increment} } + (keys %$bind_info) + ; + my $identity_col = Scalar::Util::blessed($ident) && + List::Util::first + { $ident->column_info($_)->{is_auto_increment} } + $ident->columns + ; + + if (($op eq 'insert' && $bound_identity_col) || + ($op eq 'update' && exists $args->[0]{$identity_col})) { + $sql = join ("\n", + $self->_set_table_identity_sql($op => $table, 'on'), + $sql, + $self->_set_table_identity_sql($op => $table, 'off'), ); - my $identity_col = List::Util::first - { $bind_info->{$_}{is_auto_increment} } - (keys %$bind_info) - ; - - if ($identity_col) { - $sql = join ("\n", - "SET IDENTITY_INSERT $table ON", - $sql, - "SET IDENTITY_INSERT $table OFF", - ); - } - else { - $identity_col = List::Util::first - { $ident->column_info($_)->{is_auto_increment} } - $ident->columns - ; - } + } - if ($identity_col) { - $sql = - "$sql\n" . - $self->_fetch_identity_sql($ident, $identity_col); - } + if ($op eq 'insert' && (not $bound_identity_col) && $identity_col && + (not $self->{insert_bulk})) { + $sql = + "$sql\n" . + $self->_fetch_identity_sql($ident, $identity_col); } return ($sql, $bind); } +sub _set_table_identity_sql { + my ($self, $op, $table, $on_off) = @_; + + return sprintf 'SET IDENTITY_%s %s %s', + uc($op), $self->sql_maker->_quote($table), uc($on_off); +} + # Stolen from SQLT, with some modifications. This is a makeshift # solution before a sane type-mapping library is available, thus # the 'our' for easy overrides. @@ -266,7 +320,7 @@ sub _native_data_type { my ($self, $type) = @_; $type = lc $type; - $type =~ s/ identity//; + $type =~ s/\s* identity//x; return uc($TYPE_MAPPING{$type} || $type); } @@ -274,7 +328,9 @@ sub _native_data_type { sub _fetch_identity_sql { my ($self, $source, $col) = @_; - return "SELECT MAX($col) FROM ".$source->from; + return sprintf ("SELECT MAX(%s) FROM %s", + map { $self->sql_maker->_quote ($_) } ($col, $source->from) + ); } sub _execute { @@ -293,82 +349,376 @@ sub _execute { sub last_insert_id { shift->_identity } -# override to handle TEXT/IMAGE and to do a transaction if necessary +# handles TEXT/IMAGE and transaction for last_insert_id sub insert { my $self = shift; my ($source, $to_insert) = @_; + my $identity_col = (List::Util::first + { $source->column_info($_)->{is_auto_increment} } + $source->columns) || ''; + + # check for empty insert + # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase + # try to insert explicit 'DEFAULT's instead (except for identity) + if (not %$to_insert) { + for my $col ($source->columns) { + next if $col eq $identity_col; + $to_insert->{$col} = \'DEFAULT'; + } + } + my $blob_cols = $self->_remove_blob_cols($source, $to_insert); - my $need_last_insert_id = 0; + # do we need the horrific SELECT MAX(COL) hack? + my $dumb_last_insert_id = + $identity_col + && (not exists $to_insert->{$identity_col}) + && ($self->_identity_method||'') ne '@@IDENTITY'; + + my $next = $self->next::can; + + # we are already in a transaction, or there are no blobs + # and we don't need the PK - just (try to) do it + if ($self->{transaction_depth} + || (!$blob_cols && !$dumb_last_insert_id) + ) { + return $self->_insert ( + $next, $source, $to_insert, $blob_cols, $identity_col + ); + } - my ($identity_col) = - map $_->[0], - grep $_->[1]{is_auto_increment}, - map [ $_, $source->column_info($_) ], - $source->columns; + # otherwise use the _writer_storage to do the insert+transaction on another + # connection + my $guard = $self->_writer_storage->txn_scope_guard; - $need_last_insert_id = 1 - if $identity_col && (not exists $to_insert->{$identity_col}); - - # We have to do the insert in a transaction to avoid race conditions with the - # SELECT MAX(COL) identity method used when placeholders are enabled. - my $updated_cols = do { - if ($need_last_insert_id && $self->insert_txn && - (not $self->{transaction_depth})) { - local $self->{_dbh} = $self->_extra_dbh; - my $guard = $self->txn_scope_guard; - my $upd_cols = $self->next::method (@_); - $guard->commit; - $upd_cols; - } - else { - $self->next::method(@_); - } + my $updated_cols = $self->_writer_storage->_insert ( + $next, $source, $to_insert, $blob_cols, $identity_col + ); + + $self->_identity($self->_writer_storage->_identity); + + $guard->commit; + + return $updated_cols; +} + +sub _insert { + my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_; + + my $updated_cols = $self->$next ($source, $to_insert); + + my $final_row = { + ($identity_col ? + ($identity_col => $self->last_insert_id($source, $identity_col)) : ()), + %$to_insert, + %$updated_cols, }; - $self->_insert_blobs($source, $blob_cols, $to_insert) if %$blob_cols; + $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols; return $updated_cols; } sub update { my $self = shift; - my ($source, $fields, $where) = @_; + my ($source, $fields, $where, @rest) = @_; my $wantarray = wantarray; my $blob_cols = $self->_remove_blob_cols($source, $fields); + my $table = $source->name; + + my $identity_col = List::Util::first + { $source->column_info($_)->{is_auto_increment} } + $source->columns; + + my $is_identity_update = $identity_col && defined $fields->{$identity_col}; + + return $self->next::method(@_) unless $blob_cols; + +# If there are any blobs in $where, Sybase will return a descriptive error +# message. +# XXX blobs can still be used with a LIKE query, and this should be handled. + +# update+blob update(s) done atomically on separate connection + $self = $self->_writer_storage; + + my $guard = $self->txn_scope_guard; + +# First update the blob columns to be updated to '' (taken from $fields, where +# it is originally put by _remove_blob_cols .) + my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols; + +# We can't only update NULL blobs, because blobs cannot be in the WHERE clause. + + $self->next::method($source, \%blobs_to_empty, $where, @rest); + +# Now update the blobs before the other columns in case the update of other +# columns makes the search condition invalid. + $self->_update_blobs($source, $blob_cols, $where); + my @res; - if ($wantarray) { - @res = $self->next::method(@_); + if (%$fields) { + if ($wantarray) { + @res = $self->next::method(@_); + } + elsif (defined $wantarray) { + $res[0] = $self->next::method(@_); + } + else { + $self->next::method(@_); + } } - elsif (defined $wantarray) { - $res[0] = $self->next::method(@_); + + $guard->commit; + + return $wantarray ? @res : $res[0]; +} + +sub insert_bulk { + my $self = shift; + my ($source, $cols, $data) = @_; + + my $identity_col = List::Util::first + { $source->column_info($_)->{is_auto_increment} } + $source->columns; + + my $is_identity_insert = (List::Util::first + { $_ eq $identity_col } + @{$cols} + ) ? 1 : 0; + + my @source_columns = $source->columns; + + my $use_bulk_api = + $self->_bulk_storage && + $self->_get_dbh->{syb_has_blk}; + + if ((not $use_bulk_api) && + (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE' && + (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) { + carp <<'EOF'; +Bulk API support disabled due to use of a CODEREF connect_info. Reverting to +regular array inserts. +EOF + $self->_bulk_disabled_due_to_coderef_connect_info_warned(1); } - else { + + if (not $use_bulk_api) { + my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data); + +# _execute_array uses a txn anyway, but it ends too early in case we need to +# select max(col) to get the identity for inserting blobs. + ($self, my $guard) = $self->{transaction_depth} == 0 ? + ($self->_writer_storage, $self->_writer_storage->txn_scope_guard) + : + ($self, undef); + + local $self->{insert_bulk} = 1; + $self->next::method(@_); + + if ($blob_cols) { + if ($is_identity_insert) { + $self->_insert_blobs_array ($source, $blob_cols, $cols, $data); + } + else { + my @cols_with_identities = (@$cols, $identity_col); + + ## calculate identities + # XXX This assumes identities always increase by 1, which may or may not + # be true. + my ($last_identity) = + $self->_dbh->selectrow_array ( + $self->_fetch_identity_sql($source, $identity_col) + ); + my @identities = (($last_identity - @$data + 1) .. $last_identity); + + my @data_with_identities = map [@$_, shift @identities], @$data; + + $self->_insert_blobs_array ( + $source, $blob_cols, \@cols_with_identities, \@data_with_identities + ); + } + } + + $guard->commit if $guard; + + return; } - $self->_update_blobs($source, $blob_cols, $where) if %$blob_cols; +# otherwise, use the bulk API - return $wantarray ? @res : $res[0]; +# rearrange @$data so that columns are in database order + my %orig_idx; + @orig_idx{@$cols} = 0..$#$cols; + + my %new_idx; + @new_idx{@source_columns} = 0..$#source_columns; + + my @new_data; + for my $datum (@$data) { + my $new_datum = []; + for my $col (@source_columns) { +# identity data will be 'undef' if not $is_identity_insert +# columns with defaults will also be 'undef' + $new_datum->[ $new_idx{$col} ] = + exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef; + } + push @new_data, $new_datum; + } + +# bcp identity index is 1-based + my $identity_idx = exists $new_idx{$identity_col} ? + $new_idx{$identity_col} + 1 : 0; + +## Set a client-side conversion error handler, straight from DBD::Sybase docs. +# This ignores any data conversion errors detected by the client side libs, as +# they are usually harmless. + my $orig_cslib_cb = DBD::Sybase::set_cslib_cb( + Sub::Name::subname insert_bulk => sub { + my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_; + + return 1 if $errno == 36; + + carp + "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" . + ($errmsg ? "\n$errmsg" : '') . + ($osmsg ? "\n$osmsg" : '') . + ($blkmsg ? "\n$blkmsg" : ''); + + return 0; + }); + + eval { + my $bulk = $self->_bulk_storage; + + my $guard = $bulk->txn_scope_guard; + +## XXX get this to work instead of our own $sth +## will require SQLA or *Hacks changes for ordered columns +# $bulk->next::method($source, \@source_columns, \@new_data, { +# syb_bcp_attribs => { +# identity_flag => $is_identity_insert, +# identity_column => $identity_idx, +# } +# }); + my $sql = 'INSERT INTO ' . + $bulk->sql_maker->_quote($source->name) . ' (' . +# colname list is ignored for BCP, but does no harm + (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '. + ' VALUES ('. (join ', ', ('?') x @source_columns) . ')'; + +## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for +## a prepare_cached statement ineffective. Replace with ->sth when fixed, or +## better yet the version above. Should be fixed in DBD::Sybase . + my $sth = $bulk->_get_dbh->prepare($sql, +# 'insert', # op + { + syb_bcp_attribs => { + identity_flag => $is_identity_insert, + identity_column => $identity_idx, + } + } + ); + + my @bind = do { + my $idx = 0; + map [ $_, $idx++ ], @source_columns; + }; + + $self->_execute_array( + $source, $sth, \@bind, \@source_columns, \@new_data, sub { + $guard->commit + } + ); + + $bulk->_query_end($sql); + }; + + my $exception = $@; + DBD::Sybase::set_cslib_cb($orig_cslib_cb); + + if ($exception =~ /-Y option/) { + carp <<"EOF"; + +Sybase bulk API operation failed due to character set incompatibility, reverting +to regular array inserts: + +*** Try unsetting the LANG environment variable. + +$exception +EOF + $self->_bulk_storage(undef); + unshift @_, $self; + goto \&insert_bulk; + } + elsif ($exception) { +# rollback makes the bulkLogin connection unusable + $self->_bulk_storage->disconnect; + $self->throw_exception($exception); + } } +sub _dbh_execute_array { + my ($self, $sth, $tuple_status, $cb) = @_; + + my $rv = $self->next::method($sth, $tuple_status); + $cb->() if $cb; + + return $rv; +} + +# Make sure blobs are not bound as placeholders, and return any non-empty ones +# as a hash. sub _remove_blob_cols { my ($self, $source, $fields) = @_; my %blob_cols; for my $col (keys %$fields) { - if ($self->_is_lob_type($source->column_info($col)->{data_type})) { - $blob_cols{$col} = delete $fields->{$col}; - $fields->{$col} = \"''"; + if ($self->_is_lob_column($source, $col)) { + my $blob_val = delete $fields->{$col}; + if (not defined $blob_val) { + $fields->{$col} = \'NULL'; + } + else { + $fields->{$col} = \"''"; + $blob_cols{$col} = $blob_val unless $blob_val eq ''; + } } } - return \%blob_cols; + return %blob_cols ? \%blob_cols : undef; +} + +# same for insert_bulk +sub _remove_blob_cols_array { + my ($self, $source, $cols, $data) = @_; + + my @blob_cols; + + for my $i (0..$#$cols) { + my $col = $cols->[$i]; + + if ($self->_is_lob_column($source, $col)) { + for my $j (0..$#$data) { + my $blob_val = delete $data->[$j][$i]; + if (not defined $blob_val) { + $data->[$j][$i] = \'NULL'; + } + else { + $data->[$j][$i] = \"''"; + $blob_cols[$j][$i] = $blob_val + unless $blob_val eq ''; + } + } + } + } + + return @blob_cols ? \@blob_cols : undef; } sub _update_blobs { @@ -376,7 +726,7 @@ sub _update_blobs { my (@primary_cols) = $source->primary_columns; - croak "Cannot update TEXT/IMAGE column(s) without a primary key" + $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key') unless @primary_cols; # check if we're updating a single row by PK @@ -391,14 +741,10 @@ sub _update_blobs { @row_to_update{@primary_cols} = @{$where}{@primary_cols}; @rows = \%row_to_update; } else { - my $rs = $source->resultset->search( - $where, - { - result_class => 'DBIx::Class::ResultClass::HashRefInflator', - select => \@primary_cols - } - ); - @rows = $rs->all; # statement must finish + my $cursor = $self->select ($source, \@primary_cols, $where, {}); + @rows = map { + my %row; @row{@primary_cols} = @$_; \%row + } $cursor->all; } for my $row (@rows) { @@ -408,35 +754,36 @@ sub _update_blobs { sub _insert_blobs { my ($self, $source, $blob_cols, $row) = @_; - my $dbh = $self->dbh; + my $dbh = $self->_get_dbh; - my $table = $source->from; + my $table = $source->name; my %row = %$row; my (@primary_cols) = $source->primary_columns; - croak "Cannot update TEXT/IMAGE column(s) without a primary key" + $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key') unless @primary_cols; - if ((grep { defined $row{$_} } @primary_cols) != @primary_cols) { - if (@primary_cols == 1) { - my $col = $primary_cols[0]; - $row{$col} = $self->last_insert_id($source, $col); - } else { - croak "Cannot update TEXT/IMAGE column(s) without primary key values"; - } - } + $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values') + if ((grep { defined $row{$_} } @primary_cols) != @primary_cols); for my $col (keys %$blob_cols) { my $blob = $blob_cols->{$col}; my %where = map { ($_, $row{$_}) } @primary_cols; - my $cursor = $source->resultset->search(\%where, { - select => [$col] - })->cursor; + + my $cursor = $self->select ($source, [$col], \%where, {}); $cursor->next; my $sth = $cursor->sth; + if (not $sth) { + + $self->throw_exception( + "Could not find row in table '$table' for blob update:\n" + . Data::Dumper::Concise::Dumper (\%where) + ); + } + eval { do { $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr; @@ -460,14 +807,34 @@ sub _insert_blobs { $sth->finish if $sth; if ($exception) { if ($self->using_freetds) { - croak ( + $self->throw_exception ( 'TEXT/IMAGE operation failed, probably because you are using FreeTDS: ' . $exception ); } else { - croak $exception; + $self->throw_exception($exception); + } + } + } +} + +sub _insert_blobs_array { + my ($self, $source, $blob_cols, $cols, $data) = @_; + + for my $i (0..$#$data) { + my $datum = $data->[$i]; + + my %row; + @row{ @$cols } = @$datum; + + my %blob_vals; + for my $j (0..$#$cols) { + if (exists $blob_cols->[$i][$j]) { + $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j]; } } + + $self->_insert_blobs ($source, \%blob_vals, \%row); } } @@ -495,7 +862,7 @@ C columns only have minute precision. sub connect_call_datetime_setup { my $self = shift; - my $dbh = $self->_dbh; + my $dbh = $self->_get_dbh; if ($dbh->can('syb_date_fmt')) { # amazingly, this works with FreeTDS @@ -522,10 +889,18 @@ sub datetime_parser_type { "DateTime::Format::Sybase" } sub _dbh_begin_work { my $self = shift; + +# bulkLogin=1 connections are always in a transaction, and can only call BEGIN +# TRAN once. However, we need to make sure there's a $dbh. + return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work; + $self->next::method(@_); + if ($self->using_freetds) { - $self->dbh->do('BEGIN TRAN'); + $self->_get_dbh->do('BEGIN TRAN'); } + + $self->_began_bulk_work(1) if $self->_is_bulk_storage; } sub _dbh_commit { @@ -549,7 +924,7 @@ sub _dbh_rollback { sub _svp_begin { my ($self, $name) = @_; - $self->dbh->do("SAVE TRANSACTION $name"); + $self->_get_dbh->do("SAVE TRANSACTION $name"); } # A new SAVE TRANSACTION with the same name releases the previous one. @@ -558,7 +933,7 @@ sub _svp_release { 1 } sub _svp_rollback { my ($self, $name) = @_; - $self->dbh->do("ROLLBACK TRANSACTION $name"); + $self->_get_dbh->do("ROLLBACK TRANSACTION $name"); } 1; @@ -607,20 +982,59 @@ Open Client libraries. Inserts or updates of TEXT/IMAGE columns will B work with FreeTDS. +=head1 INSERTS WITH PLACEHOLDERS + +With placeholders enabled, inserts are done in a transaction so that there are +no concurrency issues with getting the inserted identity value using +C as it's a +session variable. + =head1 TRANSACTIONS Due to limitations of the TDS protocol, L, or both; you cannot -begin a transaction while there are active cursors. An active cursor is, for -example, a L that has been executed using -C or C but has not been exhausted or -L. +begin a transaction while there are active cursors; nor can you use multiple +active cursors within a transaction. An active cursor is, for example, a +L that has been executed using C or +C but has not been exhausted or L. + +For example, this will not work: + + $schema->txn_do(sub { + my $rs = $schema->resultset('Book'); + while (my $row = $rs->next) { + $schema->resultset('MetaData')->create({ + book_id => $row->id, + ... + }); + } + }); + +This won't either: -To get around this problem, use L for smaller -ResultSets, and/or put the active cursors you will need in the scope of the -transaction. + my $first_row = $large_rs->first; + $schema->txn_do(sub { ... }); Transactions done for inserts in C mode when placeholders are in use -are not affected, as they are executed on a separate connection. +are not affected, as they are done on an extra database handle. + +Some workarounds: + +=over 4 + +=item * use L + +=item * L another L + +=item * load the data from your cursor with L + +=back =head1 MAXIMUM CONNECTIONS @@ -663,6 +1077,54 @@ C command on connection. See L for a L setting you need to work with C columns. +=head1 BULK API + +The experimental L Bulk API support is used for +L in B context, in a transaction +on a separate connection. + +To use this feature effectively, use a large number of rows for each +L call, eg.: + + while (my $rows = $data_source->get_100_rows()) { + $rs->populate($rows); + } + +B the L +calls in your C classes B list columns in database order for this +to work. Also, you may have to unset the C environment variable before +loading your app, if it doesn't match the character set of your database. + +When inserting IMAGE columns using this method, you'll need to use +L as well. + +=head1 TODO + +=over + +=item * + +Transitions to AutoCommit=0 (starting a transaction) mode by exhausting +any active cursors, using eager cursors. + +=item * + +Real limits and limited counts using stored procedures deployed on startup. + +=item * + +Adaptive Server Anywhere (ASA) support, with possible SQLA::Limit support. + +=item * + +Blob update with a LIKE query on a blob, without invalidating the WHERE condition. + +=item * + +bulk_insert using prepare_cached (see comments.) + +=back + =head1 AUTHOR See L.