X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI%2FSybase%2FASE.pm;h=5282b7f9bd39763522e577b749609840ba953f79;hb=b83736a7d3235d2f50fe5695550eb3637432d960;hp=9c4f2f25be33b7e2db6380d892d957e0b7fadb02;hpb=52cef7e30a43620553dc38ce52a10946b76a814c;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm b/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm index 9c4f2f2..5282b7f 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm @@ -4,28 +4,29 @@ use strict; use warnings; use base qw/ - DBIx::Class::Storage::DBI::Sybase - DBIx::Class::Storage::DBI::AutoCast + DBIx::Class::Storage::DBI::Sybase + DBIx::Class::Storage::DBI::AutoCast + DBIx::Class::Storage::DBI::IdentityInsert /; use mro 'c3'; use DBIx::Class::Carp; -use Scalar::Util 'blessed'; -use List::Util 'first'; -use Sub::Name(); -use Data::Dumper::Concise 'Dumper'; +use Scalar::Util qw/blessed weaken/; use Try::Tiny; +use Context::Preserve 'preserve_context'; +use DBIx::Class::_Util qw( sigwarn_silencer dbic_internal_try dump_value scope_guard set_subname ); use namespace::clean; -__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ'); +__PACKAGE__->sql_limit_dialect ('GenericSubQ'); __PACKAGE__->sql_quote_char ([qw/[ ]/]); __PACKAGE__->datetime_parser_type( 'DBIx::Class::Storage::DBI::Sybase::ASE::DateTime::Format' ); __PACKAGE__->mk_group_accessors('simple' => - qw/_identity _blob_log_on_update _writer_storage _is_extra_storage + qw/_identity _identity_method _blob_log_on_update _parent_storage + _writer_storage _is_writer_storage _bulk_storage _is_bulk_storage _began_bulk_work - _identity_method/ + / ); @@ -70,7 +71,7 @@ sub _rebless { my $no_bind_vars = __PACKAGE__ . '::NoBindVars'; - if ($self->using_freetds) { + if ($self->_using_freetds) { carp_once <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN}; You are using FreeTDS with Sybase. @@ -115,18 +116,30 @@ EOF sub _init { my $self = shift; + + $self->next::method(@_); + + if ($self->_using_freetds && (my $ver = $self->_using_freetds_version||999) > 0.82) { + carp_once( + "Buggy FreeTDS version $ver detected, statement caching will not work and " + . 'will be disabled.' + ); + $self->disable_sth_caching(1); + } + $self->_set_max_connect(256); # create storage for insert/(update blob) transactions, # unless this is that storage - return if $self->_is_extra_storage; + return if $self->_parent_storage; my $writer_storage = (ref $self)->new; - $writer_storage->_is_extra_storage(1); + $writer_storage->_is_writer_storage(1); # just info $writer_storage->connect_info($self->connect_info); $writer_storage->auto_cast($self->auto_cast); + weaken ($writer_storage->{_parent_storage} = $self); $self->_writer_storage($writer_storage); # create a bulk storage unless connect_info is a coderef @@ -134,13 +147,13 @@ sub _init { my $bulk_storage = (ref $self)->new; - $bulk_storage->_is_extra_storage(1); $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics $bulk_storage->connect_info($self->connect_info); # this is why $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1'; + weaken ($bulk_storage->{_parent_storage} = $self); $self->_bulk_storage($bulk_storage); } @@ -150,7 +163,7 @@ for my $method (@also_proxy_to_extra_storages) { my $replaced = __PACKAGE__->can($method); - *{$method} = Sub::Name::subname $method => sub { + *{$method} = set_subname $method => sub { my $self = shift; $self->_writer_storage->$replaced(@_) if $self->_writer_storage; $self->_bulk_storage->$replaced(@_) if $self->_bulk_storage; @@ -163,11 +176,10 @@ sub disconnect { # Even though we call $sth->finish for uses off the bulk API, there's still an # "active statement" warning on disconnect, which we throw away here. -# This is due to the bug described in insert_bulk. +# This is due to the bug described in _insert_bulk. # Currently a noop because 'prepare' is used instead of 'prepare_cached'. - local $SIG{__WARN__} = sub { - warn $_[0] unless $_[0] =~ /active statement/i; - } if $self->_is_bulk_storage; + local $SIG{__WARN__} = sigwarn_silencer(qr/active statement/i) + if $self->_is_bulk_storage; # so that next transaction gets a dbh $self->_began_bulk_work(0) if $self->_is_bulk_storage; @@ -198,7 +210,7 @@ sub _run_connection_actions { } $self->_dbh->{syb_chained_txn} = 1 - unless $self->using_freetds; + unless $self->_using_freetds; $self->next::method(@_); } @@ -218,7 +230,7 @@ Also sets the C value for blob write operations. The default is C<1>, but C<0> is better if your database is configured for it. See -L. +L. =cut @@ -235,73 +247,42 @@ sub connect_call_blob_setup { sub _is_lob_column { my ($self, $source, $column) = @_; - return $self->_is_lob_type($source->column_info($column)->{data_type}); + return $self->_is_lob_type( + $source->columns_info([$column])->{$column}{data_type} + ); } sub _prep_for_execute { - my $self = shift; - my ($op, $ident, $args) = @_; - - my ($sql, $bind) = $self->next::method (@_); + my ($self, $op, $ident, $args) = @_; - my $table = blessed $ident ? $ident->from : $ident; - - my $bind_info = $self->_resolve_column_info( - $ident, [map { $_->[0]{dbic_colname} || () } @{$bind}] - ); - my $bound_identity_col = - first { $bind_info->{$_}{is_auto_increment} } - keys %$bind_info - ; + my $limit; # extract and use shortcut on limit without offset + if ($op eq 'select' and ! $args->[4] and $limit = $args->[3]) { + $args = [ @$args ]; + $args->[3] = undef; + } - my $columns_info = blessed $ident && $ident->columns_info; + my ($sql, $bind) = $self->next::method($op, $ident, $args); - my $identity_col = - $columns_info && - first { $columns_info->{$_}{is_auto_increment} } - keys %$columns_info - ; + # $limit is already sanitized by now + $sql = join( "\n", + "SET ROWCOUNT $limit", + $sql, + "SET ROWCOUNT 0", + ) if $limit; - if ( - ($bound_identity_col and $op eq 'insert') - or - ( - $op eq 'update' - and - defined $identity_col - and - exists $args->[0]{$identity_col} - ) - ) { - $sql = join ("\n", - $self->_set_table_identity_sql($op => $table, 'on'), - $sql, - $self->_set_table_identity_sql($op => $table, 'off'), - ); - } - - if ( - (not $bound_identity_col) - and - $identity_col - and - (not $self->{insert_bulk}) - and - $op eq 'insert' - ) { - $sql = - "$sql\n" . - $self->_fetch_identity_sql($ident, $identity_col); + if (my $identity_col = $self->_perform_autoinc_retrieval) { + $sql .= "\n" . $self->_fetch_identity_sql($ident, $identity_col) } return ($sql, $bind); } -sub _set_table_identity_sql { - my ($self, $op, $table, $on_off) = @_; +sub _fetch_identity_sql { + my ($self, $source, $col) = @_; - return sprintf 'SET IDENTITY_%s %s %s', - uc($op), $self->sql_maker->_quote($table), uc($on_off); + return sprintf ("SELECT MAX(%s) FROM %s", + map { $self->sql_maker->_quote ($_) } ($col, $source->from) + ); } # Stolen from SQLT, with some modifications. This is a makeshift @@ -334,24 +315,13 @@ sub _native_data_type { return uc($TYPE_MAPPING{$type} || $type); } -sub _fetch_identity_sql { - my ($self, $source, $col) = @_; - - return sprintf ("SELECT MAX(%s) FROM %s", - map { $self->sql_maker->_quote ($_) } ($col, $source->from) - ); -} sub _execute { my $self = shift; - my ($op) = @_; - my ($rv, $sth, @bind) = $self->next::method(@_); - if ($op eq 'insert') { - $self->_identity($sth->fetchrow_array); - $sth->finish; - } + $self->_identity( ($sth->fetchall_arrayref)->[0][0] ) + if $self->_perform_autoinc_retrieval; return wantarray ? ($rv, $sth, @bind) : $rv; } @@ -365,25 +335,52 @@ sub insert { my $columns_info = $source->columns_info; - my $identity_col = - (first { $columns_info->{$_}{is_auto_increment} } - keys %$columns_info ) - || ''; + my ($identity_col) = grep + { $columns_info->{$_}{is_auto_increment} } + keys %$columns_info + ; + + $identity_col = '' if ! defined $identity_col; + + # FIXME - this is duplication from DBI.pm. When refactored towards + # the LobWriter this can be folded back where it belongs. + local $self->{_autoinc_supplied_for_op} = exists $to_insert->{$identity_col} + ? 1 + : 0 + ; + + local $self->{_perform_autoinc_retrieval} = $self->{_autoinc_supplied_for_op} + ? undef + : $identity_col + ; # check for empty insert # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase # try to insert explicit 'DEFAULT's instead (except for identity, timestamp # and computed columns) if (not %$to_insert) { + + my $ci; + # same order as add_columns for my $col ($source->columns) { next if $col eq $identity_col; - my $info = $source->column_info($col); - - next if ref $info->{default_value} eq 'SCALAR' - || (exists $info->{data_type} && (not defined $info->{data_type})); - - next if $info->{data_type} && $info->{data_type} =~ /^timestamp\z/i; + my $info = ( $ci ||= $source->columns_info )->{$col}; + + next if ( + ref $info->{default_value} eq 'SCALAR' + or + ( + exists $info->{data_type} + and + ! defined $info->{data_type} + ) + or + ( + ( $info->{data_type} || '' ) + =~ /^timestamp\z/i + ) + ); $to_insert->{$col} = \'DEFAULT'; } @@ -391,52 +388,42 @@ sub insert { my $blob_cols = $self->_remove_blob_cols($source, $to_insert); - # do we need the horrific SELECT MAX(COL) hack? - my $dumb_last_insert_id = - $identity_col - && (not exists $to_insert->{$identity_col}) - && ($self->_identity_method||'') ne '@@IDENTITY'; - - my $next = $self->next::can; - - # we are already in a transaction, or there are no blobs - # and we don't need the PK - just (try to) do it - if ($self->{transaction_depth} - || (!$blob_cols && !$dumb_last_insert_id) + # if a new txn is needed - it must happen on the _writer/new connection (for now) + my $guard; + if ( + ! $self->transaction_depth + and + ( + $blob_cols + or + # do we need the horrific SELECT MAX(COL) hack? + ( + $self->_perform_autoinc_retrieval + and + ( ($self->_identity_method||'') ne '@@IDENTITY' ) + ) + ) ) { - return $self->_insert ( - $next, $source, $to_insert, $blob_cols, $identity_col - ); + $self = $self->_writer_storage; + $guard = $self->txn_scope_guard; } - # otherwise use the _writer_storage to do the insert+transaction on another - # connection - my $guard = $self->_writer_storage->txn_scope_guard; - - my $updated_cols = $self->_writer_storage->_insert ( - $next, $source, $to_insert, $blob_cols, $identity_col - ); + my $updated_cols = $self->next::method ($source, $to_insert); - $self->_identity($self->_writer_storage->_identity); - - $guard->commit; - - return $updated_cols; -} - -sub _insert { - my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_; - - my $updated_cols = $self->$next ($source, $to_insert); - - my $final_row = { - ($identity_col ? - ($identity_col => $self->last_insert_id($source, $identity_col)) : ()), - %$to_insert, - %$updated_cols, - }; + $self->_insert_blobs ( + $source, + $blob_cols, + { + ( $identity_col + ? ( $identity_col => $self->last_insert_id($source, $identity_col) ) + : () + ), + %$to_insert, + %$updated_cols, + }, + ) if $blob_cols; - $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols; + $guard->commit if $guard; return $updated_cols; } @@ -445,69 +432,75 @@ sub update { my $self = shift; my ($source, $fields, $where, @rest) = @_; - my $blob_cols = $self->_remove_blob_cols($source, $fields); - - my $table = $source->name; - - my $columns_info = $source->columns_info; + # + # When *updating* identities, ASE requires SET IDENTITY_UPDATE called + # + if (my $blob_cols = $self->_remove_blob_cols($source, $fields)) { - my $identity_col = - first { $columns_info->{$_}{is_auto_increment} } - keys %$columns_info; - - my $is_identity_update = $identity_col && defined $fields->{$identity_col}; + # If there are any blobs in $where, Sybase will return a descriptive error + # message. + # XXX blobs can still be used with a LIKE query, and this should be handled. - return $self->next::method(@_) unless $blob_cols; + # update+blob update(s) done atomically on separate connection + $self = $self->_writer_storage; -# If there are any blobs in $where, Sybase will return a descriptive error -# message. -# XXX blobs can still be used with a LIKE query, and this should be handled. + my $guard = $self->txn_scope_guard; -# update+blob update(s) done atomically on separate connection - $self = $self->_writer_storage; + # First update the blob columns to be updated to '' (taken from $fields, where + # it is originally put by _remove_blob_cols .) + my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols; - my $guard = $self->txn_scope_guard; + # We can't only update NULL blobs, because blobs cannot be in the WHERE clause. + $self->next::method($source, \%blobs_to_empty, $where, @rest); -# First update the blob columns to be updated to '' (taken from $fields, where -# it is originally put by _remove_blob_cols .) - my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols; + # Now update the blobs before the other columns in case the update of other + # columns makes the search condition invalid. + my $rv = $self->_update_blobs($source, $blob_cols, $where); -# We can't only update NULL blobs, because blobs cannot be in the WHERE clause. - $self->next::method($source, \%blobs_to_empty, $where, @rest); + if (keys %$fields) { -# Now update the blobs before the other columns in case the update of other -# columns makes the search condition invalid. - $self->_update_blobs($source, $blob_cols, $where); + # Now set the identity update flags for the actual update + local $self->{_autoinc_supplied_for_op} = grep + { $_->{is_auto_increment} } + values %{ $source->columns_info([ keys %$fields ]) } + ; - my @res; - if (%$fields) { - if (wantarray) { - @res = $self->next::method(@_); - } - elsif (defined wantarray) { - $res[0] = $self->next::method(@_); + my $next = $self->next::can; + my $args = \@_; + return preserve_context { + $self->$next(@$args); + } after => sub { $guard->commit }; } else { - $self->next::method(@_); + $guard->commit; + return $rv; } } + else { + # Set the identity update flags for the actual update + local $self->{_autoinc_supplied_for_op} = grep + { $_->{is_auto_increment} } + values %{ $source->columns_info([ keys %$fields ]) } + ; - $guard->commit; - - return wantarray ? @res : $res[0]; + return $self->next::method(@_); + } } -sub insert_bulk { +sub _insert_bulk { my $self = shift; my ($source, $cols, $data) = @_; my $columns_info = $source->columns_info; - my $identity_col = - first { $columns_info->{$_}{is_auto_increment} } + my ($identity_col) = + grep { $columns_info->{$_}{is_auto_increment} } keys %$columns_info; - my $is_identity_insert = (first { $_ eq $identity_col } @{$cols}) ? 1 : 0; + # FIXME - this is duplication from DBI.pm. When refactored towards + # the LobWriter this can be folded back where it belongs. + local $self->{_autoinc_supplied_for_op} + = grep { $_ eq $identity_col } @$cols; my $use_bulk_api = $self->_bulk_storage && @@ -525,17 +518,15 @@ sub insert_bulk { # next::method uses a txn anyway, but it ends too early in case we need to # select max(col) to get the identity for inserting blobs. - ($self, my $guard) = $self->{transaction_depth} == 0 ? - ($self->_writer_storage, $self->_writer_storage->txn_scope_guard) - : - ($self, undef); - - local $self->{insert_bulk} = 1; + ($self, my $guard) = $self->transaction_depth + ? ($self, undef) + : ($self->_writer_storage, $self->_writer_storage->txn_scope_guard) + ; $self->next::method(@_); if ($blob_cols) { - if ($is_identity_insert) { + if ($self->_autoinc_supplied_for_op) { $self->_insert_blobs_array ($source, $blob_cols, $cols, $data); } else { @@ -572,13 +563,13 @@ sub insert_bulk { my @source_columns = $source->columns; # bcp identity index is 1-based - my $identity_idx = first { $source_columns[$_] eq $identity_col } (0..$#source_columns); + my ($identity_idx) = grep { $source_columns[$_] eq $identity_col } (0..$#source_columns); $identity_idx = defined $identity_idx ? $identity_idx + 1 : 0; my @new_data; for my $slice_idx (0..$#$data) { push @new_data, [map { - # identity data will be 'undef' if not $is_identity_insert + # identity data will be 'undef' if not _autoinc_supplied_for_op() # columns with defaults will also be 'undef' exists $orig_order{$_} ? $data->[$slice_idx][$orig_order{$_}] @@ -599,7 +590,7 @@ sub insert_bulk { # This ignores any data conversion errors detected by the client side libs, as # they are usually harmless. my $orig_cslib_cb = DBD::Sybase::set_cslib_cb( - Sub::Name::subname insert_bulk => sub { + set_subname _insert_bulk_cslib_errhandler => sub { my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_; return 1 if $errno == 36; @@ -614,7 +605,7 @@ sub insert_bulk { }); my $exception = ''; - try { + dbic_internal_try { my $bulk = $self->_bulk_storage; my $guard = $bulk->txn_scope_guard; @@ -624,7 +615,7 @@ sub insert_bulk { ## will require SQLA or *Hacks changes for ordered columns # $bulk->next::method($source, \@source_columns, \@new_data, { # syb_bcp_attribs => { -# identity_flag => $is_identity_insert, +# identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0, # identity_column => $identity_idx, # } # }); @@ -641,7 +632,7 @@ sub insert_bulk { # 'insert', # op { syb_bcp_attribs => { - identity_flag => $is_identity_insert, + identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0, identity_column => $identity_idx, } } @@ -670,14 +661,14 @@ sub insert_bulk { if ($exception =~ /-Y option/) { my $w = 'Sybase bulk API operation failed due to character set incompatibility, ' - . 'reverting to regular array inserts. Try unsetting the LANG environment variable' + . 'reverting to regular array inserts. Try unsetting the LC_ALL environment variable' ; $w .= "\n$exception" if $self->debug; carp $w; $self->_bulk_storage(undef); unshift @_, $self; - goto \&insert_bulk; + goto \&_insert_bulk; } elsif ($exception) { # rollback makes the bulkLogin connection unusable @@ -701,7 +692,8 @@ sub _remove_blob_cols { } else { $fields->{$col} = \"''"; - $blob_cols{$col} = $blob_val unless $blob_val eq ''; + $blob_cols{$col} = $blob_val + if length $blob_val; } } } @@ -709,7 +701,7 @@ sub _remove_blob_cols { return %blob_cols ? \%blob_cols : undef; } -# same for insert_bulk +# same for _insert_bulk sub _remove_blob_cols_array { my ($self, $source, $cols, $data) = @_; @@ -727,7 +719,7 @@ sub _remove_blob_cols_array { else { $data->[$j][$i] = \"''"; $blob_cols[$j][$i] = $blob_val - unless $blob_val eq ''; + if length $blob_val; } } } @@ -739,8 +731,8 @@ sub _remove_blob_cols_array { sub _update_blobs { my ($self, $source, $blob_cols, $where) = @_; - my @primary_cols = try - { $source->_pri_cols } + my @primary_cols = dbic_internal_try + { $source->_pri_cols_or_die } catch { $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_") }; @@ -749,7 +741,7 @@ sub _update_blobs { if ( ref $where eq 'HASH' and - @primary_cols == grep { defined $where->{$_} } @primary_cols + ! grep { ! defined $where->{$_} } @primary_cols ) { my %row_to_update; @row_to_update{@primary_cols} = @{$where}{@primary_cols}; @@ -768,26 +760,29 @@ sub _update_blobs { } sub _insert_blobs { - my ($self, $source, $blob_cols, $row) = @_; - my $dbh = $self->_get_dbh; + my ($self, $source, $blob_cols, $row_data) = @_; my $table = $source->name; - my %row = %$row; - my @primary_cols = try - { $source->_pri_cols } + my @primary_cols = dbic_internal_try + { $source->_pri_cols_or_die } catch { $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_") }; $self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values') - if ((grep { defined $row{$_} } @primary_cols) != @primary_cols); + if grep { ! defined $row_data->{$_} } @primary_cols; + + # if we are 2-phase inserting a blob - there is nothing to retrieve anymore, + # regardless of the previous state of the flag + local $self->{_perform_autoinc_retrieval} + if $self->_perform_autoinc_retrieval; + + my %where = map {( $_ => $row_data->{$_} )} @primary_cols; for my $col (keys %$blob_cols) { my $blob = $blob_cols->{$col}; - my %where = map { ($_, $row{$_}) } @primary_cols; - my $cursor = $self->select ($source, [$col], \%where, {}); $cursor->next; my $sth = $cursor->sth; @@ -795,11 +790,17 @@ sub _insert_blobs { if (not $sth) { $self->throw_exception( "Could not find row in table '$table' for blob update:\n" - . (Dumper \%where) + . dump_value \%where ); } - try { + # FIXME - it is not clear if this is needed at all. But it's been + # there since 2009 ( d867eedaa ), might as well let sleeping dogs + # lie... sigh. + weaken( my $wsth = $sth ); + my $g = scope_guard { $wsth->finish if $wsth }; + + dbic_internal_try { do { $sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr; } while $sth->fetch; @@ -819,7 +820,7 @@ sub _insert_blobs { $sth->func('ct_finish_send') or die $sth->errstr; } catch { - if ($self->using_freetds) { + if ($self->_using_freetds) { $self->throw_exception ( "TEXT/IMAGE operation failed, probably because you are using FreeTDS: $_" ); @@ -827,9 +828,6 @@ sub _insert_blobs { else { $self->throw_exception($_); } - } - finally { - $sth->finish if $sth; }; } } @@ -973,7 +971,7 @@ L. Sybase ASE for Linux (which comes with the Open Client libraries) may be downloaded here: L. -To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run: +To see if you're using FreeTDS run: perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}' @@ -1025,9 +1023,9 @@ For example, this will not work: $schema->txn_do(sub { my $rs = $schema->resultset('Book'); - while (my $row = $rs->next) { + while (my $result = $rs->next) { $schema->resultset('MetaData')->create({ - book_id => $row->id, + book_id => $result->id, ... }); } @@ -1072,6 +1070,18 @@ for information on changing the setting on the server side. See L to setup date formats for L. +=head1 LIMITED QUERIES + +Because ASE does not have a good way to limit results in SQL that works for +all types of queries, the limit dialect is set to +L. + +Fortunately, ASE and L support cursors properly, so when +L is too slow +you can use the L +L attribute to simulate limited queries by skipping +over records. + =head1 TEXT/IMAGE COLUMNS L compiled with FreeTDS will B allow you to insert or update @@ -1110,7 +1120,7 @@ L call, eg.: B the L calls in your C classes B list columns in database order for this -to work. Also, you may have to unset the C environment variable before +to work. Also, you may have to unset the C environment variable before loading your app, as C is not yet supported in DBD::Sybase . When inserting IMAGE columns using this method, you'll need to use @@ -1176,13 +1186,13 @@ bulk_insert using prepare_cached (see comments.) =back -=head1 AUTHOR +=head1 FURTHER QUESTIONS? -See L and L. +Check the list of L. -=head1 LICENSE +=head1 COPYRIGHT AND LICENSE -You may distribute this code under the same terms as Perl itself. - -=cut -# vim:sts=2 sw=2: +This module is free software L +by the L. You can +redistribute it and/or modify it under the same terms as the +L.