use warnings;
use base qw/
- DBIx::Class::Storage::DBI::Sybase
- DBIx::Class::Storage::DBI::AutoCast
+ DBIx::Class::Storage::DBI::Sybase
+ DBIx::Class::Storage::DBI::AutoCast
+ DBIx::Class::Storage::DBI::IdentityInsert
/;
use mro 'c3';
-use Carp::Clan qw/^DBIx::Class/;
-use Scalar::Util();
-use List::Util();
+use DBIx::Class::Carp;
+use Scalar::Util qw/blessed weaken/;
+use List::Util 'first';
use Sub::Name();
-use Data::Dumper::Concise();
+use Data::Dumper::Concise 'Dumper';
+use Try::Tiny;
+use Context::Preserve 'preserve_context';
+use DBIx::Class::_Util 'sigwarn_silencer';
+use namespace::clean;
+
+__PACKAGE__->sql_limit_dialect ('GenericSubQ');
+__PACKAGE__->sql_quote_char ([qw/[ ]/]);
+__PACKAGE__->datetime_parser_type(
+ 'DBIx::Class::Storage::DBI::Sybase::ASE::DateTime::Format'
+);
__PACKAGE__->mk_group_accessors('simple' =>
- qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+ qw/_identity _identity_method _blob_log_on_update _parent_storage
+ _writer_storage _is_writer_storage
_bulk_storage _is_bulk_storage _began_bulk_work
- _bulk_disabled_due_to_coderef_connect_info_warned
- _identity_method/
+ /
);
+
my @also_proxy_to_extra_storages = qw/
connect_call_set_auto_cast auto_cast connect_call_blob_setup
connect_call_datetime_setup
without doing a C<SELECT MAX(col)>. This is done safely in a transaction
(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
-A recommended L<DBIx::Class::Storage::DBI/connect_info> setting:
+A recommended L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting:
on_connect_call => [['datetime_setup'], ['blob_setup', log_on_update => 0]]
my $no_bind_vars = __PACKAGE__ . '::NoBindVars';
- if ($self->using_freetds) {
- carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
+ if ($self->_using_freetds) {
+ carp_once <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
You are using FreeTDS with Sybase.
variable.
EOF
- if (not $self->_typeless_placeholders_supported) {
- if ($self->_placeholders_supported) {
+ if (not $self->_use_typeless_placeholders) {
+ if ($self->_use_placeholders) {
$self->auto_cast(1);
}
else {
$self->_rebless;
}
# this is highly unlikely, but we check just in case
- elsif (not $self->_typeless_placeholders_supported) {
+ elsif (not $self->_use_typeless_placeholders) {
$self->auto_cast(1);
}
}
sub _init {
my $self = shift;
+
+ $self->next::method(@_);
+
+ if ($self->_using_freetds && (my $ver = $self->_using_freetds_version||999) > 0.82) {
+ carp_once(
+ "Buggy FreeTDS version $ver detected, statement caching will not work and "
+ . 'will be disabled.'
+ );
+ $self->disable_sth_caching(1);
+ }
+
$self->_set_max_connect(256);
# create storage for insert/(update blob) transactions,
# unless this is that storage
- return if $self->_is_extra_storage;
+ return if $self->_parent_storage;
my $writer_storage = (ref $self)->new;
- $writer_storage->_is_extra_storage(1);
+ $writer_storage->_is_writer_storage(1); # just info
$writer_storage->connect_info($self->connect_info);
$writer_storage->auto_cast($self->auto_cast);
+ weaken ($writer_storage->{_parent_storage} = $self);
$self->_writer_storage($writer_storage);
# create a bulk storage unless connect_info is a coderef
my $bulk_storage = (ref $self)->new;
- $bulk_storage->_is_extra_storage(1);
$bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
$bulk_storage->connect_info($self->connect_info);
# this is why
$bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+ weaken ($bulk_storage->{_parent_storage} = $self);
$self->_bulk_storage($bulk_storage);
}
# Even though we call $sth->finish for uses off the bulk API, there's still an
# "active statement" warning on disconnect, which we throw away here.
-# This is due to the bug described in insert_bulk.
+# This is due to the bug described in _insert_bulk.
# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
- local $SIG{__WARN__} = sub {
- warn $_[0] unless $_[0] =~ /active statement/i;
- } if $self->_is_bulk_storage;
+ local $SIG{__WARN__} = sigwarn_silencer(qr/active statement/i)
+ if $self->_is_bulk_storage;
# so that next transaction gets a dbh
$self->_began_bulk_work(0) if $self->_is_bulk_storage;
$self->next::method;
}
+# This is only invoked for FreeTDS drivers by ::Storage::DBI::Sybase::FreeTDS
+sub _set_autocommit_stmt {
+ my ($self, $on) = @_;
+
+ return 'SET CHAINED ' . ($on ? 'OFF' : 'ON');
+}
+
# Set up session settings for Sybase databases for the connection.
#
# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
# only want when AutoCommit is off.
-#
-# Also SET TEXTSIZE for FreeTDS because LongReadLen doesn't work.
sub _run_connection_actions {
my $self = shift;
if ($self->_is_bulk_storage) {
-# this should be cleared on every reconnect
+ # this should be cleared on every reconnect
$self->_began_bulk_work(0);
return;
}
- if (not $self->using_freetds) {
- $self->_dbh->{syb_chained_txn} = 1;
- } else {
- # based on LongReadLen in connect_info
- $self->set_textsize;
-
- if ($self->_dbh_autocommit) {
- $self->_dbh->do('SET CHAINED OFF');
- } else {
- $self->_dbh->do('SET CHAINED ON');
- }
- }
+ $self->_dbh->{syb_chained_txn} = 1
+ unless $self->_using_freetds;
$self->next::method(@_);
}
if exists $args{log_on_update};
}
-sub _is_lob_type {
- my $self = shift;
- my $type = shift;
- $type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
-}
-
sub _is_lob_column {
my ($self, $source, $column) = @_;
}
sub _prep_for_execute {
- my $self = shift;
- my ($op, $extra_bind, $ident, $args) = @_;
-
- my ($sql, $bind) = $self->next::method (@_);
-
- my $table = Scalar::Util::blessed($ident) ? $ident->from : $ident;
+ my ($self, $op, $ident, $args) = @_;
+
+ #
+### This is commented out because all tests pass. However I am leaving it
+### here as it may prove necessary (can't think through all combinations)
+### BTW it doesn't currently work exactly - need better sensitivity to
+ # currently set value
+ #
+ #my ($op, $ident) = @_;
+ #
+ # inherit these from the parent for the duration of _prep_for_execute
+ # Don't know how to make a localizing loop with if's, otherwise I would
+ #local $self->{_autoinc_supplied_for_op}
+ # = $self->_parent_storage->_autoinc_supplied_for_op
+ #if ($op eq 'insert' or $op eq 'update') and $self->_parent_storage;
+ #local $self->{_perform_autoinc_retrieval}
+ # = $self->_parent_storage->_perform_autoinc_retrieval
+ #if ($op eq 'insert' or $op eq 'update') and $self->_parent_storage;
+
+ my $limit; # extract and use shortcut on limit without offset
+ if ($op eq 'select' and ! $args->[4] and $limit = $args->[3]) {
+ $args = [ @$args ];
+ $args->[3] = undef;
+ }
- my $bind_info = $self->_resolve_column_info(
- $ident, [map $_->[0], @{$bind}]
- );
- my $bound_identity_col = List::Util::first
- { $bind_info->{$_}{is_auto_increment} }
- (keys %$bind_info)
- ;
- my $identity_col = Scalar::Util::blessed($ident) &&
- List::Util::first
- { $ident->column_info($_)->{is_auto_increment} }
- $ident->columns
- ;
+ my ($sql, $bind) = $self->next::method($op, $ident, $args);
- if (($op eq 'insert' && $bound_identity_col) ||
- ($op eq 'update' && exists $args->[0]{$identity_col})) {
- $sql = join ("\n",
- $self->_set_table_identity_sql($op => $table, 'on'),
- $sql,
- $self->_set_table_identity_sql($op => $table, 'off'),
- );
- }
+ # $limit is already sanitized by now
+ $sql = join( "\n",
+ "SET ROWCOUNT $limit",
+ $sql,
+ "SET ROWCOUNT 0",
+ ) if $limit;
- if ($op eq 'insert' && (not $bound_identity_col) && $identity_col &&
- (not $self->{insert_bulk})) {
- $sql =
- "$sql\n" .
- $self->_fetch_identity_sql($ident, $identity_col);
+ if (my $identity_col = $self->_perform_autoinc_retrieval) {
+ $sql .= "\n" . $self->_fetch_identity_sql($ident, $identity_col)
}
return ($sql, $bind);
}
-sub _set_table_identity_sql {
- my ($self, $op, $table, $on_off) = @_;
+sub _fetch_identity_sql {
+ my ($self, $source, $col) = @_;
- return sprintf 'SET IDENTITY_%s %s %s',
- uc($op), $self->sql_maker->_quote($table), uc($on_off);
+ return sprintf ("SELECT MAX(%s) FROM %s",
+ map { $self->sql_maker->_quote ($_) } ($col, $source->from)
+ );
}
# Stolen from SQLT, with some modifications. This is a makeshift
return uc($TYPE_MAPPING{$type} || $type);
}
-sub _fetch_identity_sql {
- my ($self, $source, $col) = @_;
-
- return sprintf ("SELECT MAX(%s) FROM %s",
- map { $self->sql_maker->_quote ($_) } ($col, $source->from)
- );
-}
sub _execute {
my $self = shift;
- my ($op) = @_;
-
- my ($rv, $sth, @bind) = $self->dbh_do($self->can('_dbh_execute'), @_);
+ my ($rv, $sth, @bind) = $self->next::method(@_);
- if ($op eq 'insert') {
- $self->_identity($sth->fetchrow_array);
- $sth->finish;
- }
+ $self->_identity( ($sth->fetchall_arrayref)->[0][0] )
+ if $self->_perform_autoinc_retrieval;
return wantarray ? ($rv, $sth, @bind) : $rv;
}
my $self = shift;
my ($source, $to_insert) = @_;
- my $identity_col = (List::Util::first
- { $source->column_info($_)->{is_auto_increment} }
- $source->columns) || '';
+ my $columns_info = $source->columns_info;
+
+ my $identity_col =
+ (first { $columns_info->{$_}{is_auto_increment} }
+ keys %$columns_info )
+ || '';
+
+ # FIXME - this is duplication from DBI.pm. When refactored towards
+ # the LobWriter this can be folded back where it belongs.
+ local $self->{_autoinc_supplied_for_op} = exists $to_insert->{$identity_col}
+ ? 1
+ : 0
+ ;
+ local $self->{_perform_autoinc_retrieval} =
+ ($identity_col and ! exists $to_insert->{$identity_col})
+ ? $identity_col
+ : undef
+ ;
# check for empty insert
# INSERT INTO foo DEFAULT VALUES -- does not work with Sybase
my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
# do we need the horrific SELECT MAX(COL) hack?
- my $dumb_last_insert_id =
- $identity_col
- && (not exists $to_insert->{$identity_col})
- && ($self->_identity_method||'') ne '@@IDENTITY';
+ my $need_dumb_last_insert_id = (
+ $self->_perform_autoinc_retrieval
+ &&
+ ($self->_identity_method||'') ne '@@IDENTITY'
+ );
my $next = $self->next::can;
# we are already in a transaction, or there are no blobs
# and we don't need the PK - just (try to) do it
if ($self->{transaction_depth}
- || (!$blob_cols && !$dumb_last_insert_id)
+ || (!$blob_cols && !$need_dumb_last_insert_id)
) {
return $self->_insert (
$next, $source, $to_insert, $blob_cols, $identity_col
my $self = shift;
my ($source, $fields, $where, @rest) = @_;
- my $wantarray = wantarray;
-
- my $blob_cols = $self->_remove_blob_cols($source, $fields);
-
- my $table = $source->name;
-
- my $identity_col = List::Util::first
- { $source->column_info($_)->{is_auto_increment} }
- $source->columns;
+ #
+ # When *updating* identities, ASE requires SET IDENTITY_UPDATE called
+ #
+ if (my $blob_cols = $self->_remove_blob_cols($source, $fields)) {
- my $is_identity_update = $identity_col && defined $fields->{$identity_col};
+ # If there are any blobs in $where, Sybase will return a descriptive error
+ # message.
+ # XXX blobs can still be used with a LIKE query, and this should be handled.
- return $self->next::method(@_) unless $blob_cols;
+ # update+blob update(s) done atomically on separate connection
+ $self = $self->_writer_storage;
-# If there are any blobs in $where, Sybase will return a descriptive error
-# message.
-# XXX blobs can still be used with a LIKE query, and this should be handled.
+ my $guard = $self->txn_scope_guard;
-# update+blob update(s) done atomically on separate connection
- $self = $self->_writer_storage;
+ # First update the blob columns to be updated to '' (taken from $fields, where
+ # it is originally put by _remove_blob_cols .)
+ my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
- my $guard = $self->txn_scope_guard;
+ # We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
+ $self->next::method($source, \%blobs_to_empty, $where, @rest);
-# First update the blob columns to be updated to '' (taken from $fields, where
-# it is originally put by _remove_blob_cols .)
- my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
+ # Now update the blobs before the other columns in case the update of other
+ # columns makes the search condition invalid.
+ my $rv = $self->_update_blobs($source, $blob_cols, $where);
-# We can't only update NULL blobs, because blobs cannot be in the WHERE clause.
+ if (keys %$fields) {
- $self->next::method($source, \%blobs_to_empty, $where, @rest);
+ # Now set the identity update flags for the actual update
+ local $self->{_autoinc_supplied_for_op} = (first
+ { $_->{is_auto_increment} }
+ values %{ $source->columns_info([ keys %$fields ]) }
+ ) ? 1 : 0;
-# Now update the blobs before the other columns in case the update of other
-# columns makes the search condition invalid.
- $self->_update_blobs($source, $blob_cols, $where);
-
- my @res;
- if (%$fields) {
- if ($wantarray) {
- @res = $self->next::method(@_);
- }
- elsif (defined $wantarray) {
- $res[0] = $self->next::method(@_);
+ my $next = $self->next::can;
+ my $args = \@_;
+ return preserve_context {
+ $self->$next(@$args);
+ } after => sub { $guard->commit };
}
else {
- $self->next::method(@_);
+ $guard->commit;
+ return $rv;
}
}
-
- $guard->commit;
-
- return $wantarray ? @res : $res[0];
+ else {
+ # Set the identity update flags for the actual update
+ local $self->{_autoinc_supplied_for_op} = (first
+ { $_->{is_auto_increment} }
+ values %{ $source->columns_info([ keys %$fields ]) }
+ ) ? 1 : 0;
+
+ return $self->next::method(@_);
+ }
}
-sub insert_bulk {
+sub _insert_bulk {
my $self = shift;
my ($source, $cols, $data) = @_;
- my $identity_col = List::Util::first
- { $source->column_info($_)->{is_auto_increment} }
- $source->columns;
+ my $columns_info = $source->columns_info;
- my $is_identity_insert = (List::Util::first
- { $_ eq $identity_col }
- @{$cols}
- ) ? 1 : 0;
+ my $identity_col =
+ first { $columns_info->{$_}{is_auto_increment} }
+ keys %$columns_info;
- my @source_columns = $source->columns;
+ # FIXME - this is duplication from DBI.pm. When refactored towards
+ # the LobWriter this can be folded back where it belongs.
+ local $self->{_autoinc_supplied_for_op} =
+ (first { $_ eq $identity_col } @$cols)
+ ? 1
+ : 0
+ ;
my $use_bulk_api =
$self->_bulk_storage &&
$self->_get_dbh->{syb_has_blk};
- if ((not $use_bulk_api)
- &&
- (ref($self->_dbi_connect_info->[0]) eq 'CODE')
- &&
- (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
- carp <<'EOF';
-Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
-regular array inserts.
-EOF
- $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
+ if (! $use_bulk_api and ref($self->_dbi_connect_info->[0]) eq 'CODE') {
+ carp_unique( join ' ',
+ 'Bulk API support disabled due to use of a CODEREF connect_info.',
+ 'Reverting to regular array inserts.',
+ );
}
if (not $use_bulk_api) {
my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
-# _execute_array uses a txn anyway, but it ends too early in case we need to
+# next::method uses a txn anyway, but it ends too early in case we need to
# select max(col) to get the identity for inserting blobs.
($self, my $guard) = $self->{transaction_depth} == 0 ?
($self->_writer_storage, $self->_writer_storage->txn_scope_guard)
:
($self, undef);
- local $self->{insert_bulk} = 1;
-
$self->next::method(@_);
if ($blob_cols) {
- if ($is_identity_insert) {
+ if ($self->_autoinc_supplied_for_op) {
$self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
}
else {
# otherwise, use the bulk API
# rearrange @$data so that columns are in database order
- my %orig_idx;
- @orig_idx{@$cols} = 0..$#$cols;
+# and so we submit a full column list
+ my %orig_order = map { $cols->[$_] => $_ } 0..$#$cols;
+
+ my @source_columns = $source->columns;
- my %new_idx;
- @new_idx{@source_columns} = 0..$#source_columns;
+ # bcp identity index is 1-based
+ my $identity_idx = first { $source_columns[$_] eq $identity_col } (0..$#source_columns);
+ $identity_idx = defined $identity_idx ? $identity_idx + 1 : 0;
my @new_data;
- for my $datum (@$data) {
- my $new_datum = [];
- for my $col (@source_columns) {
-# identity data will be 'undef' if not $is_identity_insert
-# columns with defaults will also be 'undef'
- $new_datum->[ $new_idx{$col} ] =
- exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
- }
- push @new_data, $new_datum;
+ for my $slice_idx (0..$#$data) {
+ push @new_data, [map {
+ # identity data will be 'undef' if not _autoinc_supplied_for_op()
+ # columns with defaults will also be 'undef'
+ exists $orig_order{$_}
+ ? $data->[$slice_idx][$orig_order{$_}]
+ : undef
+ } @source_columns];
}
-# bcp identity index is 1-based
- my $identity_idx = exists $new_idx{$identity_col} ?
- $new_idx{$identity_col} + 1 : 0;
+ my $proto_bind = $self->_resolve_bindattrs(
+ $source,
+ [map {
+ [ { dbic_colname => $source_columns[$_], _bind_data_slice_idx => $_ }
+ => $new_data[0][$_] ]
+ } (0 ..$#source_columns) ],
+ $columns_info
+ );
## Set a client-side conversion error handler, straight from DBD::Sybase docs.
# This ignores any data conversion errors detected by the client side libs, as
# they are usually harmless.
my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
- Sub::Name::subname insert_bulk => sub {
+ Sub::Name::subname _insert_bulk_cslib_errhandler => sub {
my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
return 1 if $errno == 36;
return 0;
});
- eval {
+ my $exception = '';
+ try {
my $bulk = $self->_bulk_storage;
my $guard = $bulk->txn_scope_guard;
+## FIXME - once this is done - address the FIXME on finish() below
## XXX get this to work instead of our own $sth
## will require SQLA or *Hacks changes for ordered columns
# $bulk->next::method($source, \@source_columns, \@new_data, {
# syb_bcp_attribs => {
-# identity_flag => $is_identity_insert,
+# identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0,
# identity_column => $identity_idx,
# }
# });
# 'insert', # op
{
syb_bcp_attribs => {
- identity_flag => $is_identity_insert,
+ identity_flag => $self->_autoinc_supplied_for_op ? 1 : 0,
identity_column => $identity_idx,
}
}
);
- my @bind = do {
- my $idx = 0;
- map [ $_, $idx++ ], @source_columns;
- };
+ {
+ # FIXME the $sth->finish in _execute_array does a rollback for some
+ # reason. Disable it temporarily until we fix the SQLMaker thing above
+ no warnings 'redefine';
+ no strict 'refs';
+ local *{ref($sth).'::finish'} = sub {};
- $self->_execute_array(
- $source, $sth, \@bind, \@source_columns, \@new_data, sub {
- $guard->commit
- }
- );
+ $self->_dbh_execute_for_fetch(
+ $source, $sth, $proto_bind, \@source_columns, \@new_data
+ );
+ }
+
+ $guard->commit;
$bulk->_query_end($sql);
+ } catch {
+ $exception = shift;
};
- my $exception = $@;
DBD::Sybase::set_cslib_cb($orig_cslib_cb);
if ($exception =~ /-Y option/) {
- carp <<"EOF";
-
-Sybase bulk API operation failed due to character set incompatibility, reverting
-to regular array inserts:
+ my $w = 'Sybase bulk API operation failed due to character set incompatibility, '
+ . 'reverting to regular array inserts. Try unsetting the LANG environment variable'
+ ;
+ $w .= "\n$exception" if $self->debug;
+ carp $w;
-*** Try unsetting the LANG environment variable.
-
-$exception
-EOF
$self->_bulk_storage(undef);
unshift @_, $self;
- goto \&insert_bulk;
+ goto \&_insert_bulk;
}
elsif ($exception) {
# rollback makes the bulkLogin connection unusable
}
}
-sub _dbh_execute_array {
- my ($self, $sth, $tuple_status, $cb) = @_;
-
- my $rv = $self->next::method($sth, $tuple_status);
- $cb->() if $cb;
-
- return $rv;
-}
-
# Make sure blobs are not bound as placeholders, and return any non-empty ones
# as a hash.
sub _remove_blob_cols {
return %blob_cols ? \%blob_cols : undef;
}
-# same for insert_bulk
+# same for _insert_bulk
sub _remove_blob_cols_array {
my ($self, $source, $cols, $data) = @_;
sub _update_blobs {
my ($self, $source, $blob_cols, $where) = @_;
- my (@primary_cols) = $source->primary_columns;
-
- $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
- unless @primary_cols;
-
-# check if we're updating a single row by PK
- my $pk_cols_in_where = 0;
- for my $col (@primary_cols) {
- $pk_cols_in_where++ if defined $where->{$col};
- }
- my @rows;
+ my @primary_cols = try
+ { $source->_pri_cols_or_die }
+ catch {
+ $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
+ };
- if ($pk_cols_in_where == @primary_cols) {
+ my @pks_to_update;
+ if (
+ ref $where eq 'HASH'
+ and
+ @primary_cols == grep { defined $where->{$_} } @primary_cols
+ ) {
my %row_to_update;
@row_to_update{@primary_cols} = @{$where}{@primary_cols};
- @rows = \%row_to_update;
- } else {
+ @pks_to_update = \%row_to_update;
+ }
+ else {
my $cursor = $self->select ($source, \@primary_cols, $where, {});
- @rows = map {
+ @pks_to_update = map {
my %row; @row{@primary_cols} = @$_; \%row
} $cursor->all;
}
- for my $row (@rows) {
- $self->_insert_blobs($source, $blob_cols, $row);
+ for my $ident (@pks_to_update) {
+ $self->_insert_blobs($source, $blob_cols, $ident);
}
}
my $table = $source->name;
my %row = %$row;
- my (@primary_cols) = $source->primary_columns;
-
- $self->throw_exception('Cannot update TEXT/IMAGE column(s) without a primary key')
- unless @primary_cols;
+ my @primary_cols = try
+ { $source->_pri_cols_or_die }
+ catch {
+ $self->throw_exception("Cannot update TEXT/IMAGE column(s): $_")
+ };
$self->throw_exception('Cannot update TEXT/IMAGE column(s) without primary key values')
if ((grep { defined $row{$_} } @primary_cols) != @primary_cols);
my $sth = $cursor->sth;
if (not $sth) {
-
$self->throw_exception(
"Could not find row in table '$table' for blob update:\n"
- . Data::Dumper::Concise::Dumper (\%where)
+ . (Dumper \%where)
);
}
- eval {
+ try {
do {
$sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
} while $sth->fetch;
$sth->func($blob, length($blob), 'ct_send_data') or die $sth->errstr;
$sth->func('ct_finish_send') or die $sth->errstr;
- };
- my $exception = $@;
- $sth->finish if $sth;
- if ($exception) {
- if ($self->using_freetds) {
+ }
+ catch {
+ if ($self->_using_freetds) {
$self->throw_exception (
- 'TEXT/IMAGE operation failed, probably because you are using FreeTDS: '
- . $exception
+ "TEXT/IMAGE operation failed, probably because you are using FreeTDS: $_"
);
- } else {
- $self->throw_exception($exception);
+ }
+ else {
+ $self->throw_exception($_);
}
}
+ finally {
+ $sth->finish if $sth;
+ };
}
}
on_connect_call => 'datetime_setup'
-In L<DBIx::Class::Storage::DBI/connect_info> to set:
+In L<connect_info|DBIx::Class::Storage::DBI/connect_info> to set:
$dbh->syb_date_fmt('ISO_strict'); # output fmt: 2004-08-21T14:36:48.080Z
$dbh->do('set dateformat mdy'); # input fmt: 08/13/1979 18:08:55.080
-On connection for use with L<DBIx::Class::InflateColumn::DateTime>, using
-L<DateTime::Format::Sybase>, which you will need to install.
-
-This works for both C<DATETIME> and C<SMALLDATETIME> columns, although
+This works for both C<DATETIME> and C<SMALLDATETIME> columns, note that
C<SMALLDATETIME> columns only have minute precision.
=cut
-{
- my $old_dbd_warned = 0;
+sub connect_call_datetime_setup {
+ my $self = shift;
+ my $dbh = $self->_get_dbh;
- sub connect_call_datetime_setup {
- my $self = shift;
- my $dbh = $self->_get_dbh;
-
- if ($dbh->can('syb_date_fmt')) {
- # amazingly, this works with FreeTDS
- $dbh->syb_date_fmt('ISO_strict');
- } elsif (not $old_dbd_warned) {
- carp "Your DBD::Sybase is too old to support ".
- "DBIx::Class::InflateColumn::DateTime, please upgrade!";
- $old_dbd_warned = 1;
- }
+ if ($dbh->can('syb_date_fmt')) {
+ # amazingly, this works with FreeTDS
+ $dbh->syb_date_fmt('ISO_strict');
+ }
+ else {
+ carp_once
+ 'Your DBD::Sybase is too old to support '
+ .'DBIx::Class::InflateColumn::DateTime, please upgrade!';
+
+ # FIXME - in retrospect this is a rather bad US-centric choice
+ # of format. Not changing as a bugwards compat, though in reality
+ # the only piece that sees the results of $dt object formatting
+ # (as opposed to parsing) is the database itself, so theoretically
+ # changing both this SET command and the formatter definition of
+ # ::S::D::Sybase::ASE::DateTime::Format below should be safe and
+ # transparent
$dbh->do('SET DATEFORMAT mdy');
-
- 1;
}
}
-sub datetime_parser_type { "DateTime::Format::Sybase" }
-
-# ->begin_work and such have no effect with FreeTDS but we run them anyway to
-# let the DBD keep any state it needs to.
-#
-# If they ever do start working, the extra statements will do no harm (because
-# Sybase supports nested transactions.)
-sub _dbh_begin_work {
+sub _exec_txn_begin {
my $self = shift;
# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
$self->next::method(@_);
- if ($self->using_freetds) {
- $self->_get_dbh->do('BEGIN TRAN');
- }
-
$self->_began_bulk_work(1) if $self->_is_bulk_storage;
}
-sub _dbh_commit {
- my $self = shift;
- if ($self->using_freetds) {
- $self->_dbh->do('COMMIT');
- }
- return $self->next::method(@_);
-}
-
-sub _dbh_rollback {
- my $self = shift;
- if ($self->using_freetds) {
- $self->_dbh->do('ROLLBACK');
- }
- return $self->next::method(@_);
-}
-
# savepoint support using ASE syntax
-sub _svp_begin {
+sub _exec_svp_begin {
my ($self, $name) = @_;
- $self->_get_dbh->do("SAVE TRANSACTION $name");
+ $self->_dbh->do("SAVE TRANSACTION $name");
}
# A new SAVE TRANSACTION with the same name releases the previous one.
-sub _svp_release { 1 }
+sub _exec_svp_release { 1 }
-sub _svp_rollback {
+sub _exec_svp_rollback {
my ($self, $name) = @_;
- $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
+ $self->_dbh->do("ROLLBACK TRANSACTION $name");
+}
+
+package # hide from PAUSE
+ DBIx::Class::Storage::DBI::Sybase::ASE::DateTime::Format;
+
+my $datetime_parse_format = '%Y-%m-%dT%H:%M:%S.%3NZ';
+my $datetime_format_format = '%m/%d/%Y %H:%M:%S.%3N';
+
+my ($datetime_parser, $datetime_formatter);
+
+sub parse_datetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $datetime_parser ||= DateTime::Format::Strptime->new(
+ pattern => $datetime_parse_format,
+ on_error => 'croak',
+ );
+ return $datetime_parser->parse_datetime(shift);
+}
+
+sub format_datetime {
+ shift;
+ require DateTime::Format::Strptime;
+ $datetime_formatter ||= DateTime::Format::Strptime->new(
+ pattern => $datetime_format_format,
+ on_error => 'croak',
+ );
+ return $datetime_formatter->format_datetime(shift);
}
1;
=head1 Schema::Loader Support
As of version C<0.05000>, L<DBIx::Class::Schema::Loader> should work well with
-most (if not all) versions of Sybase ASE.
+most versions of Sybase ASE.
=head1 FreeTDS
Sybase ASE for Linux (which comes with the Open Client libraries) may be
downloaded here: L<http://response.sybase.com/forms/ASE_Linux_Download>.
-To see if you're using FreeTDS check C<< $schema->storage->using_freetds >>, or run:
+To see if you're using FreeTDS run:
perl -MDBI -le 'my $dbh = DBI->connect($dsn, $user, $pass); print $dbh->{syb_oc_version}'
-Some versions of the libraries involved will not support placeholders, in which
-case the storage will be reblessed to
+It is recommended to set C<tds version> for your ASE server to C<5.0> in
+C</etc/freetds/freetds.conf>.
+
+Some versions or configurations of the libraries involved will not support
+placeholders, in which case the storage will be reblessed to
L<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars>.
In some configurations, placeholders will work but will throw implicit type
conversion errors for anything that's not expecting a string. In such a case,
the C<auto_cast> option from L<DBIx::Class::Storage::DBI::AutoCast> is
automatically set, which you may enable on connection with
-L<DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>. The type info
-for the C<CAST>s is taken from the L<DBIx::Class::ResultSource/data_type>
-definitions in your Result classes, and are mapped to a Sybase type (if it isn't
-already) using a mapping based on L<SQL::Translator>.
+L<connect_call_set_auto_cast|DBIx::Class::Storage::DBI::AutoCast/connect_call_set_auto_cast>.
+The type info for the C<CAST>s is taken from the
+L<DBIx::Class::ResultSource/data_type> definitions in your Result classes, and
+are mapped to a Sybase type (if it isn't already) using a mapping based on
+L<SQL::Translator>.
-In other configurations, placeholers will work just as they do with the Sybase
+In other configurations, placeholders will work just as they do with the Sybase
Open Client libraries.
Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
have active cursors when doing an insert.
When using C<DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars> transactions
-are disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as
-it's a session variable.
+are unnecessary and not used, as there are no concurrency issues with C<SELECT
+@@IDENTITY> which is a session variable.
=head1 TRANSACTIONS
-Due to limitations of the TDS protocol, L<DBD::Sybase>, or both; you cannot
-begin a transaction while there are active cursors; nor can you use multiple
-active cursors within a transaction. An active cursor is, for example, a
+Due to limitations of the TDS protocol and L<DBD::Sybase>, you cannot begin a
+transaction while there are active cursors, nor can you use multiple active
+cursors within a transaction. An active cursor is, for example, a
L<ResultSet|DBIx::Class::ResultSet> that has been executed using C<next> or
C<first> but has not been exhausted or L<reset|DBIx::Class::ResultSet/reset>.
$schema->txn_do(sub {
my $rs = $schema->resultset('Book');
- while (my $row = $rs->next) {
+ while (my $result = $rs->next) {
$schema->resultset('MetaData')->create({
- book_id => $row->id,
+ book_id => $result->id,
...
});
}
See L</connect_call_datetime_setup> to setup date formats
for L<DBIx::Class::InflateColumn::DateTime>.
+=head1 LIMITED QUERIES
+
+Because ASE does not have a good way to limit results in SQL that works for all
+types of queries, the limit dialect is set to
+L<GenericSubQ|SQL::Abstract::Limit/GenericSubQ>.
+
+Fortunately, ASE and L<DBD::Sybase> support cursors properly, so when
+L<GenericSubQ|SQL::Abstract::Limit/GenericSubQ> is too slow you can use
+the L<software_limit|DBIx::Class::ResultSet/software_limit>
+L<DBIx::Class::ResultSet> attribute to simulate limited queries by skipping over
+records.
+
=head1 TEXT/IMAGE COLUMNS
L<DBD::Sybase> compiled with FreeTDS will B<NOT> allow you to insert or update
instead.
However, the C<LongReadLen> you pass in
-L<DBIx::Class::Storage::DBI/connect_info> is used to execute the equivalent
-C<SET TEXTSIZE> command on connection.
+L<connect_info|DBIx::Class::Storage::DBI/connect_info> is used to execute the
+equivalent C<SET TEXTSIZE> command on connection.
-See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
-setting you need to work with C<IMAGE> columns.
+See L</connect_call_blob_setup> for a
+L<connect_info|DBIx::Class::Storage::DBI/connect_info> setting you need to work
+with C<IMAGE> columns.
=head1 BULK API
B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
calls in your C<Result> classes B<must> list columns in database order for this
to work. Also, you may have to unset the C<LANG> environment variable before
-loading your app, if it doesn't match the character set of your database.
+loading your app, as C<BCP -Y> is not yet supported in DBD::Sybase .
When inserting IMAGE columns using this method, you'll need to use
L</connect_call_blob_setup> as well.
data_type => undef,
default_value => \'getdate()',
is_nullable => 0,
+ inflate_datetime => 1,
}
The C<data_type> must exist and must be C<undef>. Then empty inserts will work
=item *
-Adaptive Server Anywhere (ASA) support, with possible SQLA::Limit support.
-
-=item *
-
Blob update with a LIKE query on a blob, without invalidating the WHERE condition.
=item *
=head1 AUTHOR
-See L<DBIx::Class/CONTRIBUTORS>.
+See L<DBIx::Class/AUTHOR> and L<DBIx::Class/CONTRIBUTORS>.
=head1 LICENSE