X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI%2FSybase%2FASE.pm;h=3915eb2e2b13520743299ed1cf78609a44c6102c;hb=47d7b769c034e04989840b1efc2f5991518cff23;hp=b5f481866d320f1be963ea691fb75a2cda00ef8f;hpb=153a03985a7f299a10f70dbfba9c628579cd9db6;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm b/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm index b5f4818..3915eb2 100644 --- a/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm +++ b/lib/DBIx/Class/Storage/DBI/Sybase/ASE.pm @@ -4,27 +4,34 @@ use strict; use warnings; use base qw/ - DBIx::Class::Storage::DBI::Sybase - DBIx::Class::Storage::DBI::AutoCast + DBIx::Class::Storage::DBI::Sybase + DBIx::Class::Storage::DBI::AutoCast + DBIx::Class::Storage::DBI::IdentityInsert /; use mro 'c3'; -use Carp::Clan qw/^DBIx::Class/; -use Scalar::Util 'blessed'; +use DBIx::Class::Carp; +use Scalar::Util qw/blessed weaken/; use List::Util 'first'; use Sub::Name(); use Data::Dumper::Concise 'Dumper'; use Try::Tiny; +use Context::Preserve 'preserve_context'; use namespace::clean; -__PACKAGE__->sql_limit_dialect ('RowCountOrGenericSubQ'); +__PACKAGE__->sql_limit_dialect ('GenericSubQ'); +__PACKAGE__->sql_quote_char ([qw/[ ]/]); +__PACKAGE__->datetime_parser_type( + 'DBIx::Class::Storage::DBI::Sybase::ASE::DateTime::Format' +); __PACKAGE__->mk_group_accessors('simple' => - qw/_identity _blob_log_on_update _writer_storage _is_extra_storage + qw/_identity _identity_method _blob_log_on_update _parent_storage + _writer_storage _is_writer_storage _bulk_storage _is_bulk_storage _began_bulk_work - _bulk_disabled_due_to_coderef_connect_info_warned - _identity_method/ + / ); + my @also_proxy_to_extra_storages = qw/ connect_call_set_auto_cast auto_cast connect_call_blob_setup connect_call_datetime_setup @@ -66,8 +73,8 @@ sub _rebless { my $no_bind_vars = __PACKAGE__ . '::NoBindVars'; - if ($self->using_freetds) { - carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN}; + if ($self->_using_freetds) { + carp_once <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN}; You are using FreeTDS with Sybase. @@ -111,18 +118,30 @@ EOF sub _init { my $self = shift; + + $self->next::method(@_); + + if ($self->_using_freetds && (my $ver = $self->_using_freetds_version||999) > 0.82) { + carp_once( + "Buggy FreeTDS version $ver detected, statement caching will not work and " + . 'will be disabled.' + ); + $self->disable_sth_caching(1); + } + $self->_set_max_connect(256); # create storage for insert/(update blob) transactions, # unless this is that storage - return if $self->_is_extra_storage; + return if $self->_parent_storage; my $writer_storage = (ref $self)->new; - $writer_storage->_is_extra_storage(1); + $writer_storage->_is_writer_storage(1); # just info $writer_storage->connect_info($self->connect_info); $writer_storage->auto_cast($self->auto_cast); + weaken ($writer_storage->{_parent_storage} = $self); $self->_writer_storage($writer_storage); # create a bulk storage unless connect_info is a coderef @@ -130,13 +149,13 @@ sub _init { my $bulk_storage = (ref $self)->new; - $bulk_storage->_is_extra_storage(1); $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics $bulk_storage->connect_info($self->connect_info); # this is why $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1'; + weaken ($bulk_storage->{_parent_storage} = $self); $self->_bulk_storage($bulk_storage); } @@ -171,35 +190,30 @@ sub disconnect { $self->next::method; } +# This is only invoked for FreeTDS drivers by ::Storage::DBI::Sybase::FreeTDS +sub _set_autocommit_stmt { + my ($self, $on) = @_; + + return 'SET CHAINED ' . ($on ? 'OFF' : 'ON'); +} + # Set up session settings for Sybase databases for the connection. # # Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS # DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however # we're using FreeTDS, CHAINED mode turns on an implicit transaction which we # only want when AutoCommit is off. -# -# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work. sub _run_connection_actions { my $self = shift; if ($self->_is_bulk_storage) { -# this should be cleared on every reconnect + # this should be cleared on every reconnect $self->_began_bulk_work(0); return; } - if (not $self->using_freetds) { - $self->_dbh->{syb_chained_txn} = 1; - } else { - # based on LongReadLen in connect_info - $self->set_textsize; - - if ($self->_dbh_autocommit) { - $self->_dbh->do('SET CHAINED OFF'); - } else { - $self->_dbh->do('SET CHAINED ON'); - } - } + $self->_dbh->{syb_chained_txn} = 1 + unless $self->_using_freetds; $self->next::method(@_); } @@ -240,53 +254,53 @@ sub _is_lob_column { } sub _prep_for_execute { - my $self = shift; - my ($op, $extra_bind, $ident, $args) = @_; - - my ($sql, $bind) = $self->next::method (@_); - - my $table = blessed $ident ? $ident->from : $ident; + my ($self, $op, $ident, $args) = @_; + + # +### This is commented out because all tests pass. However I am leaving it +### here as it may prove necessary (can't think through all combinations) +### BTW it doesn't currently work exactly - need better sensitivity to + # currently set value + # + #my ($op, $ident) = @_; + # + # inherit these from the parent for the duration of _prep_for_execute + # Don't know how to make a localizing loop with if's, otherwise I would + #local $self->{_autoinc_supplied_for_op} + # = $self->_parent_storage->_autoinc_supplied_for_op + #if ($op eq 'insert' or $op eq 'update') and $self->_parent_storage; + #local $self->{_perform_autoinc_retrieval} + # = $self->_parent_storage->_perform_autoinc_retrieval + #if ($op eq 'insert' or $op eq 'update') and $self->_parent_storage; + + my $limit; # extract and use shortcut on limit without offset + if ($op eq 'select' and ! $args->[4] and $limit = $args->[3]) { + $args = [ @$args ]; + $args->[3] = undef; + } - my $bind_info = $self->_resolve_column_info( - $ident, [map $_->[0], @{$bind}] - ); - my $bound_identity_col = - first { $bind_info->{$_}{is_auto_increment} } - keys %$bind_info - ; + my ($sql, $bind) = $self->next::method($op, $ident, $args); - my $columns_info = blessed $ident && $ident->columns_info; + # $limit is already sanitized by now + $sql = join( "\n", + "SET ROWCOUNT $limit", + $sql, + "SET ROWCOUNT 0", + ) if $limit; - my $identity_col = - $columns_info && - first { $columns_info->{$_}{is_auto_increment} } - keys %$columns_info - ; - - if (($op eq 'insert' && $bound_identity_col) || - ($op eq 'update' && exists $args->[0]{$identity_col})) { - $sql = join ("\n", - $self->_set_table_identity_sql($op => $table, 'on'), - $sql, - $self->_set_table_identity_sql($op => $table, 'off'), - ); - } - - if ($op eq 'insert' && (not $bound_identity_col) && $identity_col && - (not $self->{insert_bulk})) { - $sql = - "$sql\n" . - $self->_fetch_identity_sql($ident, $identity_col); + if (my $identity_col = $self->_perform_autoinc_retrieval) { + $sql .= "\n" . $self->_fetch_identity_sql($ident, $identity_col) } return ($sql, $bind); } -sub _set_table_identity_sql { - my ($self, $op, $table, $on_off) = @_; +sub _fetch_identity_sql { + my ($self, $source, $col) = @_; - return sprintf 'SET IDENTITY_%s %s %s', - uc($op), $self->sql_maker->_quote($table), uc($on_off); + return sprintf ("SELECT MAX(%s) FROM %s", + map { $self->sql_maker->_quote ($_) } ($col, $source->from) + ); } # Stolen from SQLT, with some modifications. This is a makeshift @@ -319,24 +333,13 @@ sub _native_data_type { return uc($TYPE_MAPPING{$type} || $type); } -sub _fetch_identity_sql { - my ($self, $source, $col) = @_; - - return sprintf ("SELECT MAX(%s) FROM %s", - map { $self->sql_maker->_quote ($_) } ($col, $source->from) - ); -} sub _execute { my $self = shift; - my ($op) = @_; + my ($rv, $sth, @bind) = $self->next::method(@_); - my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_); - - if ($op eq 'insert') { - $self->_identity($sth->fetchrow_array); - $sth->finish; - } + $self->_identity( ($sth->fetchall_arrayref)->[0][0] ) + if $self->_perform_autoinc_retrieval; return wantarray ? ($rv, $sth, @bind) : $rv; } @@ -355,6 +358,18 @@ sub insert { keys %$columns_info ) || ''; + # FIXME - this is duplication from DBI.pm. When refactored towards + # the LobWriter this can be folded back where it belongs. + local $self->{_autoinc_supplied_for_op} = exists $to_insert->{$identity_col} + ? 1 + : 0 + ; + local $self->{_perform_autoinc_retrieval} = + ($identity_col and ! exists $to_insert->{$identity_col}) + ? $identity_col + : undef + ; + # check for empty insert # INSERT INTO foo DEFAULT VALUES -- does not work with Sybase # try to insert explicit 'DEFAULT's instead (except for identity, timestamp @@ -377,17 +392,18 @@ sub insert { my $blob_cols = $self->_remove_blob_cols($source, $to_insert); # do we need the horrific SELECT MAX(COL) hack? - my $dumb_last_insert_id = - $identity_col - && (not exists $to_insert->{$identity_col}) - && ($self->_identity_method||'') ne '@@IDENTITY'; + my $need_dumb_last_insert_id = ( + $self->_perform_autoinc_retrieval + && + ($self->_identity_method||'') ne '@@IDENTITY' + ); my $next = $self->next::can; # we are already in a transaction, or there are no blobs # and we don't need the PK - just (try to) do it if ($self->{transaction_depth} - || (!$blob_cols && !$dumb_last_insert_id) + || (!$blob_cols && !$need_dumb_last_insert_id) ) { return $self->_insert ( $next, $source, $to_insert, $blob_cols, $identity_col @@ -430,57 +446,59 @@ sub update { my $self = shift; my ($source, $fields, $where, @rest) = @_; - my $blob_cols = $self->_remove_blob_cols($source, $fields); - - my $table = $source->name; - - my $columns_info = $source->columns_info; + # + # When *updating* identities, ASE requires SET IDENTITY_UPDATE called + # + if (my $blob_cols = $self->_remove_blob_cols($source, $fields)) { - my $identity_col = - first { $columns_info->{$_}{is_auto_increment} } - keys %$columns_info; + # If there are any blobs in $where, Sybase will return a descriptive error + # message. + # XXX blobs can still be used with a LIKE query, and this should be handled. - my $is_identity_update = $identity_col && defined $fields->{$identity_col}; + # update+blob update(s) done atomically on separate connection + $self = $self->_writer_storage; - return $self->next::method(@_) unless $blob_cols; + my $guard = $self->txn_scope_guard; -# If there are any blobs in $where, Sybase will return a descriptive error -# message. -# XXX blobs can still be used with a LIKE query, and this should be handled. + # First update the blob columns to be updated to '' (taken from $fields, where + # it is originally put by _remove_blob_cols .) + my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols; -# update+blob update(s) done atomically on separate connection - $self = $self->_writer_storage; + # We can't only update NULL blobs, because blobs cannot be in the WHERE clause. + $self->next::method($source, \%blobs_to_empty, $where, @rest); - my $guard = $self->txn_scope_guard; + # Now update the blobs before the other columns in case the update of other + # columns makes the search condition invalid. + my $rv = $self->_update_blobs($source, $blob_cols, $where); -# First update the blob columns to be updated to '' (taken from $fields, where -# it is originally put by _remove_blob_cols .) - my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols; + if (keys %$fields) { -# We can't only update NULL blobs, because blobs cannot be in the WHERE clause. + # Now set the identity update flags for the actual update + local $self->{_autoinc_supplied_for_op} = (first + { $_->{is_auto_increment} } + values %{ $source->columns_info([ keys %$fields ]) } + ) ? 1 : 0; - $self->next::method($source, \%blobs_to_empty, $where, @rest); - -# Now update the blobs before the other columns in case the update of other -# columns makes the search condition invalid. - $self->_update_blobs($source, $blob_cols, $where); - - my @res; - if (%$fields) { - if (wantarray) { - @res = $self->next::method(@_); - } - elsif (defined wantarray) { - $res[0] = $self->next::method(@_); + my $next = $self->next::can; + my $args = \@_; + return preserve_context { + $self->$next(@$args); + } after => sub { $guard->commit }; } else { - $self->next::method(@_); + $guard->commit; + return $rv; } } - - $guard->commit; - - return wantarray ? @res : $res[0]; + else { + # Set the identity update flags for the actual update + local $self->{_autoinc_supplied_for_op} = (first + { $_->{is_auto_increment} } + values %{ $source->columns_info([ keys %$fields ]) } + ) ? 1 : 0; + + return $self->next::method(@_); + } } sub insert_bulk { @@ -493,42 +511,39 @@ sub insert_bulk { first { $columns_info->{$_}{is_auto_increment} } keys %$columns_info; - my $is_identity_insert = (first { $_ eq $identity_col } @{$cols}) ? 1 : 0; - - my @source_columns = $source->columns; + # FIXME - this is duplication from DBI.pm. When refactored towards + # the LobWriter this can be folded back where it belongs. + local $self->{_autoinc_supplied_for_op} = + (first { $_ eq $identity_col } @$cols) + ? 1 + : 0 + ; my $use_bulk_api = $self->_bulk_storage && $self->_get_dbh->{syb_has_blk}; - if ((not $use_bulk_api) - && - (ref($self->_dbi_connect_info->[0]) eq 'CODE') - && - (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) { - carp <<'EOF'; -Bulk API support disabled due to use of a CODEREF connect_info. Reverting to -regular array inserts. -EOF - $self->_bulk_disabled_due_to_coderef_connect_info_warned(1); + if (! $use_bulk_api and ref($self->_dbi_connect_info->[0]) eq 'CODE') { + carp_unique( join ' ', + 'Bulk API support disabled due to use of a CODEREF connect_info.', + 'Reverting to regular array inserts.', + ); } if (not $use_bulk_api) { my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data); -# _execute_array uses a txn anyway, but it ends too early in case we need to +# next::method uses a txn anyway, but it ends too early in case we need to # select max(col) to get the identity for inserting blobs. ($self, my $guard) = $self->{transaction_depth} == 0 ? ($self->_writer_storage, $self->_writer_storage->txn_scope_guard) : ($self, undef); - local $self->{insert_bulk} = 1; - $self->next::method(@_); if ($blob_cols) { - if ($is_identity_insert) { + if ($self->_autoinc_supplied_for_op) { $self->_insert_blobs_array ($source, $blob_cols, $cols, $data); } else { @@ -559,27 +574,34 @@ EOF # otherwise, use the bulk API # rearrange @$data so that columns are in database order - my %orig_idx; - @orig_idx{@$cols} = 0..$#$cols; +# and so we submit a full column list + my %orig_order = map { $cols->[$_] => $_ } 0..$#$cols; + + my @source_columns = $source->columns; - my %new_idx; - @new_idx{@source_columns} = 0..$#source_columns; + # bcp identity index is 1-based + my $identity_idx = first { $source_columns[$_] eq $identity_col } (0..$#source_columns); + $identity_idx = defined $identity_idx ? $identity_idx + 1 : 0; my @new_data; - for my $datum (@$data) { - my $new_datum = []; - for my $col (@source_columns) { -# identity data will be 'undef' if not $is_identity_insert -# columns with defaults will also be 'undef' - $new_datum->[ $new_idx{$col} ] = - exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef; - } - push @new_data, $new_datum; + for my $slice_idx (0..$#$data) { + push @new_data, [map { + # identity data will be 'undef' if not _autoinc_supplied_for_op() + # columns with defaults will also be 'undef' + exists $orig_order{$_} + ? $data->[$slice_idx][$orig_order{$_}] + : undef + } @source_columns]; } -# bcp identity index is 1-based - my $identity_idx = exists $new_idx{$identity_col} ? - $new_idx{$identity_col} + 1 : 0; + my $proto_bind = $self->_resolve_bindattrs( + $source, + [map { + [ { dbic_colname => $source_columns[$_], _bind_data_slice_idx => $_ } + => $new_data[0][$_] ] + } (0 ..$#source_columns) ], + $columns_info + ); ## Set a client-side conversion error handler, straight from DBD::Sybase docs. # This ignores any data conversion errors detected by the client side libs, as @@ -605,11 +627,12 @@ EOF my $guard = $bulk->txn_scope_guard; +## FIXME - once this is done - address the FIXME on finish() below ## XXX get this to work instead of our own $sth ## will require SQLA or *Hacks changes for ordered columns # $bulk->next::method($source, \@source_columns, \@new_data, { # syb_bcp_attribs => { -# identity_flag => $is_identity_insert, +# identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0, # identity_column => $identity_idx, # } # }); @@ -626,22 +649,25 @@ EOF # 'insert', # op { syb_bcp_attribs => { - identity_flag => $is_identity_insert, + identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0, identity_column => $identity_idx, } } ); - my @bind = do { - my $idx = 0; - map [ $_, $idx++ ], @source_columns; - }; + { + # FIXME the $sth->finish in _execute_array does a rollback for some + # reason. Disable it temporarily until we fix the SQLMaker thing above + no warnings 'redefine'; + no strict 'refs'; + local *{ref($sth).'::finish'} = sub {}; - $self->_execute_array( - $source, $sth, \@bind, \@source_columns, \@new_data, sub { - $guard->commit - } - ); + $self->_dbh_execute_for_fetch( + $source, $sth, $proto_bind, \@source_columns, \@new_data + ); + } + + $guard->commit; $bulk->_query_end($sql); } catch { @@ -668,15 +694,6 @@ EOF } } -sub _dbh_execute_array { - my ($self, $sth, $tuple_status, $cb) = @_; - - my $rv = $self->next::method($sth, $tuple_status); - $cb->() if $cb; - - return $rv; -} - # Make sure blobs are not bound as placeholders, and return any non-empty ones # as a hash. sub _remove_blob_cols { @@ -736,26 +753,25 @@ sub _update_blobs { $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_") }; -# check if we're updating a single row by PK - my $pk_cols_in_where = 0; - for my $col (@primary_cols) { - $pk_cols_in_where++ if defined $where->{$col}; - } - my @rows; - - if ($pk_cols_in_where == @primary_cols) { + my @pks_to_update; + if ( + ref $where eq 'HASH' + and + @primary_cols == grep { defined $where->{$_} } @primary_cols + ) { my %row_to_update; @row_to_update{@primary_cols} = @{$where}{@primary_cols}; - @rows = \%row_to_update; - } else { + @pks_to_update = \%row_to_update; + } + else { my $cursor = $self->select ($source, \@primary_cols, $where, {}); - @rows = map { + @pks_to_update = map { my %row; @row{@primary_cols} = @$_; \%row } $cursor->all; } - for my $row (@rows) { - $self->_insert_blobs($source, $blob_cols, $row); + for my $ident (@pks_to_update) { + $self->_insert_blobs($source, $blob_cols, $ident); } } @@ -811,7 +827,7 @@ sub _insert_blobs { $sth->func('ct_finish_send') or die $sth->errstr; } catch { - if ($self->using_freetds) { + if ($self->_using_freetds) { $self->throw_exception ( "TEXT/IMAGE operation failed, probably because you are using FreeTDS: $_" ); @@ -857,45 +873,38 @@ In L to set: $dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z $dbh->do('set dateformat mdy'); # input fmt: 08/13/1979 18:08:55.080 -On connection for use with L, using -L, which you will need to install. - -This works for both C and C columns, although +This works for both C and C columns, note that C columns only have minute precision. =cut -{ - my $old_dbd_warned = 0; +sub connect_call_datetime_setup { + my $self = shift; + my $dbh = $self->_get_dbh; - sub connect_call_datetime_setup { - my $self = shift; - my $dbh = $self->_get_dbh; - - if ($dbh->can('syb_date_fmt')) { - # amazingly, this works with FreeTDS - $dbh->syb_date_fmt('ISO_strict'); - } elsif (not $old_dbd_warned) { - carp "Your DBD::Sybase is too old to support ". - "DBIx::Class::InflateColumn::DateTime, please upgrade!"; - $old_dbd_warned = 1; - } + if ($dbh->can('syb_date_fmt')) { + # amazingly, this works with FreeTDS + $dbh->syb_date_fmt('ISO_strict'); + } + else { + carp_once + 'Your DBD::Sybase is too old to support ' + .'DBIx::Class::InflateColumn::DateTime, please upgrade!'; + + # FIXME - in retrospect this is a rather bad US-centric choice + # of format. Not changing as a bugwards compat, though in reality + # the only piece that sees the results of $dt object formatting + # (as opposed to parsing) is the database itself, so theoretically + # changing both this SET command and the formatter definition of + # ::S::D::Sybase::ASE::DateTime::Format below should be safe and + # transparent $dbh->do('SET DATEFORMAT mdy'); - - 1; } } -sub datetime_parser_type { "DateTime::Format::Sybase" } - -# ->begin_work and such have no effect with FreeTDS but we run them anyway to -# let the DBD keep any state it needs to. -# -# If they ever do start working, the extra statements will do no harm (because -# Sybase supports nested transactions.) -sub _dbh_begin_work { +sub _exec_txn_begin { my $self = shift; # bulkLogin=1 connections are always in a transaction, and can only call BEGIN @@ -904,44 +913,52 @@ sub _dbh_begin_work { $self->next::method(@_); - if ($self->using_freetds) { - $self->_get_dbh->do('BEGIN TRAN'); - } - $self->_began_bulk_work(1) if $self->_is_bulk_storage; } -sub _dbh_commit { - my $self = shift; - if ($self->using_freetds) { - $self->_dbh->do('COMMIT'); - } - return $self->next::method(@_); -} - -sub _dbh_rollback { - my $self = shift; - if ($self->using_freetds) { - $self->_dbh->do('ROLLBACK'); - } - return $self->next::method(@_); -} - # savepoint support using ASE syntax -sub _svp_begin { +sub _exec_svp_begin { my ($self, $name) = @_; - $self->_get_dbh->do("SAVE TRANSACTION $name"); + $self->_dbh->do("SAVE TRANSACTION $name"); } # A new SAVE TRANSACTION with the same name releases the previous one. -sub _svp_release { 1 } +sub _exec_svp_release { 1 } -sub _svp_rollback { +sub _exec_svp_rollback { my ($self, $name) = @_; - $self->_get_dbh->do("ROLLBACK TRANSACTION $name"); + $self->_dbh->do("ROLLBACK TRANSACTION $name"); +} + +package # hide from PAUSE + DBIx::Class::Storage::DBI::Sybase::ASE::DateTime::Format; + +my $datetime_parse_format = '%Y-%m-%dT%H:%M:%S.%3NZ'; +my $datetime_format_format = '%m/%d/%Y %H:%M:%S.%3N'; + +my ($datetime_parser, $datetime_formatter); + +sub parse_datetime { + shift; + require DateTime::Format::Strptime; + $datetime_parser ||= DateTime::Format::Strptime->new( + pattern => $datetime_parse_format, + on_error => 'croak', + ); + return $datetime_parser->parse_datetime(shift); +} + +sub format_datetime { + shift; + require DateTime::Format::Strptime; + $datetime_formatter ||= DateTime::Format::Strptime->new( + pattern => $datetime_format_format, + on_error => 'croak', + ); + return $datetime_formatter->format_datetime(shift); } 1; @@ -949,7 +966,7 @@ sub _svp_rollback { =head1 Schema::Loader Support As of version C<0.05000>, L should work well with -most (if not all) versions of Sybase ASE. +most versions of Sybase ASE. =head1 FreeTDS @@ -964,22 +981,26 @@ L. Sybase ASE for Linux (which comes with the Open Client libraries) may be downloaded here: L. -To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run: +To see if you're using FreeTDS run: perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}' -Some versions of the libraries involved will not support placeholders, in which -case the storage will be reblessed to +It is recommended to set C for your ASE server to C<5.0> in +C. + +Some versions or configurations of the libraries involved will not support +placeholders, in which case the storage will be reblessed to L. In some configurations, placeholders will work but will throw implicit type conversion errors for anything that's not expecting a string. In such a case, the C option from L is automatically set, which you may enable on connection with -L. The type info -for the Cs is taken from the L -definitions in your Result classes, and are mapped to a Sybase type (if it isn't -already) using a mapping based on L. +L. +The type info for the Cs is taken from the +L definitions in your Result classes, and +are mapped to a Sybase type (if it isn't already) using a mapping based on +L. In other configurations, placeholders will work just as they do with the Sybase Open Client libraries. @@ -997,14 +1018,14 @@ In addition, they are done on a separate connection so that it's possible to have active cursors when doing an insert. When using C transactions -are disabled, as there are no concurrency issues with C which is a session variable. =head1 TRANSACTIONS -Due to limitations of the TDS protocol, L, or both, you cannot -begin a transaction while there are active cursors, nor can you use multiple -active cursors within a transaction. An active cursor is, for example, a +Due to limitations of the TDS protocol and L, you cannot begin a +transaction while there are active cursors, nor can you use multiple active +cursors within a transaction. An active cursor is, for example, a L that has been executed using C or C but has not been exhausted or L. @@ -1012,9 +1033,9 @@ For example, this will not work: $schema->txn_do(sub { my $rs = $schema->resultset('Book'); - while (my $row = $rs->next) { + while (my $result = $rs->next) { $schema->resultset('MetaData')->create({ - book_id => $row->id, + book_id => $result->id, ... }); } @@ -1059,6 +1080,18 @@ for information on changing the setting on the server side. See L to setup date formats for L. +=head1 LIMITED QUERIES + +Because ASE does not have a good way to limit results in SQL that works for all +types of queries, the limit dialect is set to +L. + +Fortunately, ASE and L support cursors properly, so when +L is too slow you can use +the L +L attribute to simulate limited queries by skipping over +records. + =head1 TEXT/IMAGE COLUMNS L compiled with FreeTDS will B allow you to insert or update @@ -1098,7 +1131,7 @@ L call, eg.: B the L calls in your C classes B list columns in database order for this to work. Also, you may have to unset the C environment variable before -loading your app, if it doesn't match the character set of your database. +loading your app, as C is not yet supported in DBD::Sybase . When inserting IMAGE columns using this method, you'll need to use L as well. @@ -1115,6 +1148,7 @@ represent them in your Result classes as: data_type => undef, default_value => \'getdate()', is_nullable => 0, + inflate_datetime => 1, } The C must exist and must be C. Then empty inserts will work @@ -1154,10 +1188,6 @@ Real limits and limited counts using stored procedures deployed on startup. =item * -Adaptive Server Anywhere (ASA) support - -=item * - Blob update with a LIKE query on a blob, without invalidating the WHERE condition. =item * @@ -1168,7 +1198,7 @@ bulk_insert using prepare_cached (see comments.) =head1 AUTHOR -See L. +See L and L. =head1 LICENSE