use mro 'c3';
use Carp::Clan qw/^DBIx::Class/;
use List::Util ();
+use Sub::Name ();
__PACKAGE__->mk_group_accessors('simple' =>
- qw/_identity _blob_log_on_update insert_txn/
+ qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
+ _bulk_storage _is_bulk_storage _began_bulk_work
+ _bulk_disabled_due_to_coderef_connect_info_warned
+ _identity_method/
);
+my @also_proxy_to_extra_storages = qw/
+ connect_call_set_auto_cast auto_cast connect_call_blob_setup
+ connect_call_datetime_setup
+
+ disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
+ auto_savepoint unsafe cursor_class debug debugobj schema
+/;
+
=head1 NAME
DBIx::Class::Storage::DBI::Sybase - Sybase support for DBIx::Class
With this driver there is unfortunately no way to get the C<last_insert_id>
without doing a C<SELECT MAX(col)>. This is done safely in a transaction
-(locking the table.) The transaction can be turned off if concurrency is not an
-issue, see L<DBIx::Class::Storage::DBI::Sybase/connect_call_unsafe_insert>.
-
-But your queries will be cached.
+(locking the table.) See L</INSERTS WITH PLACEHOLDERS>.
A recommended L<DBIx::Class::Storage::DBI/connect_info> setting:
} else { # real Sybase
my $no_bind_vars = 'DBIx::Class::Storage::DBI::Sybase::NoBindVars';
-# This is reset to 0 in ::NoBindVars, only necessary because we use max(col) to
-# get the identity.
- $self->insert_txn(1);
-
if ($self->using_freetds) {
carp <<'EOF' unless $ENV{DBIC_SYBASE_FREETDS_NOWARN};
$self->_rebless;
}
}
-
- $self->set_textsize; # based on LongReadLen in connect_info
-
}
- elsif (not $self->dbh->{syb_dynamic_supported}) {
+ elsif (not $self->_get_dbh->{syb_dynamic_supported}) {
# not necessarily FreeTDS, but no placeholders nevertheless
$self->ensure_class_loaded($no_bind_vars);
bless $self, $no_bind_vars;
$self->_rebless;
} elsif (not $self->_typeless_placeholders_supported) {
-# this is highly unlikely, but we check just in case
+ # this is highly unlikely, but we check just in case
$self->auto_cast(1);
}
-
- $self->_set_max_connect(256);
}
}
}
+sub _init {
+ my $self = shift;
+ $self->_set_max_connect(256);
+
+ # based on LongReadLen in connect_info
+ $self->set_textsize if $self->using_freetds;
+
+# create storage for insert/(update blob) transactions,
+# unless this is that storage
+ return if $self->_is_extra_storage;
+
+ my $writer_storage = (ref $self)->new;
+
+ $writer_storage->_is_extra_storage(1);
+ $writer_storage->connect_info($self->connect_info);
+ $writer_storage->auto_cast($self->auto_cast);
+
+ $self->_writer_storage($writer_storage);
+
+# create a bulk storage unless connect_info is a coderef
+ return
+ if (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE';
+
+ my $bulk_storage = (ref $self)->new;
+
+ $bulk_storage->_is_extra_storage(1);
+ $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
+ $bulk_storage->connect_info($self->connect_info);
+
+# this is why
+ $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
+
+ $self->_bulk_storage($bulk_storage);
+}
+
+for my $method (@also_proxy_to_extra_storages) {
+ no strict 'refs';
+ no warnings 'redefine';
+
+ my $replaced = __PACKAGE__->can($method);
+
+ *{$method} = Sub::Name::subname $method => sub {
+ my $self = shift;
+ $self->_writer_storage->$replaced(@_) if $self->_writer_storage;
+ $self->_bulk_storage->$replaced(@_) if $self->_bulk_storage;
+ return $self->$replaced(@_);
+ };
+}
+
+sub disconnect {
+ my $self = shift;
+
+# Even though we call $sth->finish for uses off the bulk API, there's still an
+# "active statement" warning on disconnect, which we throw away here.
+# This is due to the bug described in insert_bulk.
+# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
+ local $SIG{__WARN__} = sub {
+ warn $_[0] unless $_[0] =~ /active statement/i;
+ } if $self->_is_bulk_storage;
+
+# so that next transaction gets a dbh
+ $self->_began_bulk_work(0) if $self->_is_bulk_storage;
+
+ $self->next::method;
+}
+
# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
my $self = shift;
$self->next::method(@_);
+
+ if ($self->_is_bulk_storage) {
+# this should be cleared on every reconnect
+ $self->_began_bulk_work(0);
+ return;
+ }
if (not $self->using_freetds) {
$self->_dbh->{syb_chained_txn} = 1;
if exists $args{log_on_update};
}
-=head2 connect_call_unsafe_insert
-
-With placeholders enabled, inserts are done in a transaction so that there are
-no concurrency issues with getting the inserted identity value using
-C<SELECT MAX(col)> when placeholders are enabled.
-
-When using C<DBIx::Class::Storage::DBI::Sybase::NoBindVars> transactions are
-disabled.
-
-To turn off transactions for inserts (for an application that doesn't need
-concurrency, or a loader, for example) use this setting in
-L<DBIx::Class::Storage::DBI/connect_info>,
-
- on_connect_call => ['unsafe_insert']
-
-To manipulate this setting at runtime, use:
-
- $schema->storage->insert_txn(0); # 1 to re-enable
-
-=cut
-
-sub connect_call_unsafe_insert {
- my $self = shift;
- $self->insert_txn(0);
-}
-
sub _is_lob_type {
my $self = shift;
my $type = shift;
$type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
}
+sub _is_lob_column {
+ my ($self, $source, $column) = @_;
+
+ return $self->_is_lob_type($source->column_info($column)->{data_type});
+}
+
sub _prep_for_execute {
my $self = shift;
my ($op, $extra_bind, $ident, $args) = @_;
my ($self, $type) = @_;
$type = lc $type;
- $type =~ s/ identity//;
+ $type =~ s/\s* identity//x;
return uc($TYPE_MAPPING{$type} || $type);
}
sub _fetch_identity_sql {
my ($self, $source, $col) = @_;
- return "SELECT MAX($col) FROM ".$source->from;
+ return sprintf ("SELECT MAX(%s) FROM %s",
+ map { $self->sql_maker->_quote ($_) } ($col, $source->from)
+ );
}
sub _execute {
sub last_insert_id { shift->_identity }
-# override to handle TEXT/IMAGE and to do a transaction if necessary
+# handles TEXT/IMAGE and transaction for last_insert_id
sub insert {
my $self = shift;
- my ($ident, $source, $to_insert) = @_;
+ my ($source, $to_insert) = @_;
my $blob_cols = $self->_remove_blob_cols($source, $to_insert);
- my $need_last_insert_id = 0;
-
- my ($identity_col) =
- map $_->[0],
- grep $_->[1]{is_auto_increment},
- map [ $_, $source->column_info($_) ],
+ my $identity_col = List::Util::first
+ { $source->column_info($_)->{is_auto_increment} }
$source->columns;
- $need_last_insert_id = 1
- if $identity_col && (not exists $to_insert->{$identity_col});
-
- # We have to do the insert in a transaction to avoid race conditions with the
- # SELECT MAX(COL) identity method used when placeholders are enabled.
- my $updated_cols = do {
- if ($need_last_insert_id && $self->insert_txn &&
- (not $self->{transaction_depth})) {
- my $guard = $self->txn_scope_guard;
- my $upd_cols = $self->next::method (@_);
- $guard->commit;
- return $upd_cols;
- }
- else {
- $self->next::method(@_);
- }
+ # do we need the horrific SELECT MAX(COL) hack?
+ my $dumb_last_insert_id =
+ $identity_col
+ && (not exists $to_insert->{$identity_col})
+ && ($self->_identity_method||'') ne '@@IDENTITY';
+
+ my $next = $self->next::can;
+
+ # we are already in a transaction, or there are no blobs
+ # and we don't need the PK - just (try to) do it
+ if ($self->{transaction_depth}
+ || (!$blob_cols && !$dumb_last_insert_id)
+ ) {
+ return $self->_insert (
+ $next, $source, $to_insert, $blob_cols, $identity_col
+ );
+ }
+
+ # otherwise use the _writer_storage to do the insert+transaction on another
+ # connection
+ my $guard = $self->_writer_storage->txn_scope_guard;
+
+ my $updated_cols = $self->_writer_storage->_insert (
+ $next, $source, $to_insert, $blob_cols, $identity_col
+ );
+
+ $self->_identity($self->_writer_storage->_identity);
+
+ $guard->commit;
+
+ return $updated_cols;
+}
+
+sub _insert {
+ my ($self, $next, $source, $to_insert, $blob_cols, $identity_col) = @_;
+
+ my $updated_cols = $self->$next ($source, $to_insert);
+
+ my $final_row = {
+ $identity_col => $self->last_insert_id($source, $identity_col),
+ %$to_insert,
+ %$updated_cols,
};
- $self->_insert_blobs($source, $blob_cols, $to_insert) if %$blob_cols;
+ $self->_insert_blobs ($source, $blob_cols, $final_row) if $blob_cols;
return $updated_cols;
}
sub update {
my $self = shift;
- my ($source, $fields, $ident_cond) = @_;
+ my ($source, $fields, $where, @rest) = @_;
my $wantarray = wantarray;
my $blob_cols = $self->_remove_blob_cols($source, $fields);
+ my $table = $source->name;
+
+ my $identity_col = List::Util::first
+ { $source->column_info($_)->{is_auto_increment} }
+ $source->columns;
+
+ my $is_identity_update = $identity_col && defined $fields->{$identity_col};
+
+ if (not $blob_cols) {
+ $self->_set_identity_insert($table, 'update') if $is_identity_update;
+ return $self->next::method(@_);
+ $self->_unset_identity_insert($table, 'update') if $is_identity_update;
+ }
+
+# check that we're not updating a blob column that's also in $where
+ for my $blob (grep $self->_is_lob_column($source, $_), $source->columns) {
+ if (exists $where->{$blob} && exists $fields->{$blob}) {
+ croak
+'Update of TEXT/IMAGE column that is also in search condition impossible';
+ }
+ }
+
+# update+blob update(s) done atomically on separate connection
+ $self = $self->_writer_storage;
+
+ my $guard = $self->txn_scope_guard;
+
+# First update the blob columns to be updated to '' (taken from $fields, where
+# it is originally put by _remove_blob_cols .)
+ my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
+
+ $self->next::method($source, \%blobs_to_empty, $where, @rest);
+
+# Now update the blobs before the other columns in case the update of other
+# columns makes the search condition invalid.
+ $self->_update_blobs($source, $blob_cols, $where);
+
my @res;
- if ($wantarray) {
- @res = $self->next::method(@_);
+ if (%$fields) {
+ $self->_set_identity_insert($table, 'update') if $is_identity_update;
+
+ if ($wantarray) {
+ @res = $self->next::method(@_);
+ }
+ elsif (defined $wantarray) {
+ $res[0] = $self->next::method(@_);
+ }
+ else {
+ $self->next::method(@_);
+ }
+
+ $self->_unset_identity_insert($table, 'update') if $is_identity_update;
}
- elsif (defined $wantarray) {
- $res[0] = $self->next::method(@_);
+
+ $guard->commit;
+
+ return $wantarray ? @res : $res[0];
+}
+
+### the insert_bulk partially stolen from DBI/MSSQL.pm
+
+sub _set_identity_insert {
+ my ($self, $table, $op) = @_;
+
+ my $sql = sprintf (
+ 'SET IDENTITY_%s %s ON',
+ (uc($op) || 'INSERT'),
+ $self->sql_maker->_quote ($table),
+ );
+
+ $self->_query_start($sql);
+
+ my $dbh = $self->_get_dbh;
+ eval { $dbh->do ($sql) };
+ my $exception = $@;
+
+ $self->_query_end($sql);
+
+ if ($exception) {
+ $self->throw_exception (sprintf "Error executing '%s': %s",
+ $sql,
+ $dbh->errstr,
+ );
}
- else {
+}
+
+sub _unset_identity_insert {
+ my ($self, $table, $op) = @_;
+
+ my $sql = sprintf (
+ 'SET IDENTITY_%s %s OFF',
+ (uc($op) || 'INSERT'),
+ $self->sql_maker->_quote ($table),
+ );
+
+ $self->_query_start($sql);
+
+ my $dbh = $self->_get_dbh;
+ $dbh->do ($sql);
+
+ $self->_query_end($sql);
+}
+
+# for tests
+sub _can_insert_bulk { 1 }
+
+sub insert_bulk {
+ my $self = shift;
+ my ($source, $cols, $data) = @_;
+
+ my $identity_col = List::Util::first
+ { $source->column_info($_)->{is_auto_increment} }
+ $source->columns;
+
+ my $is_identity_insert = (List::Util::first
+ { $source->column_info ($_)->{is_auto_increment} }
+ @{$cols}
+ ) ? 1 : 0;
+
+ my @source_columns = $source->columns;
+
+ my $use_bulk_api =
+ $self->_bulk_storage &&
+ $self->_get_dbh->{syb_has_blk};
+
+ if ((not $use_bulk_api) &&
+ (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE' &&
+ (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
+ carp <<'EOF';
+Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
+array inserts.
+EOF
+ $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
+ }
+
+ if (not $use_bulk_api) {
+ my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
+
+ my $dumb_last_insert_id =
+ $identity_col
+ && (not $is_identity_insert)
+ && ($self->_identity_method||'') ne '@@IDENTITY';
+
+ ($self, my ($guard)) = do {
+ if ($self->{transaction_depth} == 0 &&
+ ($blob_cols || $dumb_last_insert_id)) {
+ ($self->_writer_storage, $self->_writer_storage->txn_scope_guard);
+ }
+ else {
+ ($self, undef);
+ }
+ };
+
+ $self->_set_identity_insert ($source->name) if $is_identity_insert;
$self->next::method(@_);
+ $self->_unset_identity_insert ($source->name) if $is_identity_insert;
+
+ if ($blob_cols) {
+ if ($is_identity_insert) {
+ $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
+ }
+ else {
+ my @cols_with_identities = (@$cols, $identity_col);
+
+ ## calculate identities
+ # XXX This assumes identities always increase by 1, which may or may not
+ # be true.
+ my ($last_identity) =
+ $self->_dbh->selectrow_array (
+ $self->_fetch_identity_sql($source, $identity_col)
+ );
+ my @identities = (($last_identity - @$data + 1) .. $last_identity);
+
+ my @data_with_identities = map [@$_, shift @identities], @$data;
+
+ $self->_insert_blobs_array (
+ $source, $blob_cols, \@cols_with_identities, \@data_with_identities
+ );
+ }
+ }
+
+ $guard->commit if $guard;
+ return;
}
- $self->_update_blobs($source, $blob_cols, $ident_cond) if %$blob_cols;
+# otherwise, use the bulk API
- return $wantarray ? @res : $res[0];
+# rearrange @$data so that columns are in database order
+ my %orig_idx;
+ @orig_idx{@$cols} = 0..$#$cols;
+
+ my %new_idx;
+ @new_idx{@source_columns} = 0..$#source_columns;
+
+ my @new_data;
+ for my $datum (@$data) {
+ my $new_datum = [];
+ for my $col (@source_columns) {
+# identity data will be 'undef' if not $is_identity_insert
+# columns with defaults will also be 'undef'
+ $new_datum->[ $new_idx{$col} ] =
+ exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
+ }
+ push @new_data, $new_datum;
+ }
+
+# bcp identity index is 1-based
+ my $identity_idx = exists $new_idx{$identity_col} ?
+ $new_idx{$identity_col} + 1 : 0;
+
+## Set a client-side conversion error handler, straight from DBD::Sybase docs.
+# This ignores any data conversion errors detected by the client side libs, as
+# they are usually harmless.
+ my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
+ Sub::Name::subname insert_bulk => sub {
+ my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
+
+ return 1 if $errno == 36;
+
+ carp
+ "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
+ ($errmsg ? "\n$errmsg" : '') .
+ ($osmsg ? "\n$osmsg" : '') .
+ ($blkmsg ? "\n$blkmsg" : '');
+
+ return 0;
+ });
+
+ eval {
+ my $bulk = $self->_bulk_storage;
+
+ my $guard = $bulk->txn_scope_guard;
+
+## XXX get this to work instead of our own $sth
+## will require SQLA or *Hacks changes for ordered columns
+# $bulk->next::method($source, \@source_columns, \@new_data, {
+# syb_bcp_attribs => {
+# identity_flag => $is_identity_insert,
+# identity_column => $identity_idx,
+# }
+# });
+ my $sql = 'INSERT INTO ' .
+ $bulk->sql_maker->_quote($source->name) . ' (' .
+# colname list is ignored for BCP, but does no harm
+ (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
+ ' VALUES ('. (join ', ', ('?') x @source_columns) . ')';
+
+## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
+## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
+## better yet the version above. Should be fixed in DBD::Sybase .
+ my $sth = $bulk->_get_dbh->prepare($sql,
+# 'insert', # op
+ {
+ syb_bcp_attribs => {
+ identity_flag => $is_identity_insert,
+ identity_column => $identity_idx,
+ }
+ }
+ );
+
+ my $bind_attributes = $self->source_bind_attributes($source);
+
+ foreach my $slice_idx (0..$#source_columns) {
+ my $col = $source_columns[$slice_idx];
+
+ my $attributes = $bind_attributes->{$col}
+ if $bind_attributes && defined $bind_attributes->{$col};
+
+ my @slice = map $_->[$slice_idx], @new_data;
+
+ $sth->bind_param_array(($slice_idx + 1), \@slice, $attributes);
+ }
+
+ $bulk->_query_start($sql);
+
+# this is stolen from DBI::insert_bulk
+ my $tuple_status = [];
+ my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
+
+ if (my $err = $@ || $sth->errstr) {
+ my $i = 0;
+ ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
+
+ $self->throw_exception("Unexpected populate error: $err")
+ if ($i > $#$tuple_status);
+
+ require Data::Dumper;
+ local $Data::Dumper::Terse = 1;
+ local $Data::Dumper::Indent = 1;
+ local $Data::Dumper::Useqq = 1;
+ local $Data::Dumper::Quotekeys = 0;
+ local $Data::Dumper::Sortkeys = 1;
+
+ $self->throw_exception(sprintf "%s for populate slice:\n%s",
+ ($tuple_status->[$i][1] || $err),
+ Data::Dumper::Dumper(
+ { map { $source_columns[$_] => $new_data[$i][$_] } (0 .. $#$cols) }
+ ),
+ );
+ }
+
+ $guard->commit;
+ $sth->finish;
+
+ $bulk->_query_end($sql);
+ };
+ my $exception = $@;
+ if ($exception =~ /-Y option/) {
+ carp <<"EOF";
+
+Sybase bulk API operation failed due to character set incompatibility, reverting
+to regular array inserts:
+
+*** Try unsetting the LANG environment variable.
+
+$@
+EOF
+ $self->_bulk_storage(undef);
+ DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+ unshift @_, $self;
+ goto \&insert_bulk;
+ }
+ elsif ($exception) {
+ DBD::Sybase::set_cslib_cb($orig_cslib_cb);
+# rollback makes the bulkLogin connection unusable
+ $self->_bulk_storage->disconnect;
+ $self->throw_exception($exception);
+ }
+
+ DBD::Sybase::set_cslib_cb($orig_cslib_cb);
}
+# Make sure blobs are not bound as placeholders, and return any non-empty ones
+# as a hash.
sub _remove_blob_cols {
my ($self, $source, $fields) = @_;
for my $col (keys %$fields) {
if ($self->_is_lob_type($source->column_info($col)->{data_type})) {
- $blob_cols{$col} = delete $fields->{$col};
- $fields->{$col} = \"''";
+ my $blob_val = delete $fields->{$col};
+ if (not defined $blob_val) {
+ $fields->{$col} = \'NULL';
+ }
+ else {
+ $fields->{$col} = \"''";
+ $blob_cols{$col} = $blob_val unless $blob_val eq '';
+ }
+ }
+ }
+
+ return keys %blob_cols ? \%blob_cols : undef;
+}
+
+# same for insert_bulk
+sub _remove_blob_cols_array {
+ my ($self, $source, $cols, $data) = @_;
+
+ my @blob_cols;
+
+ for my $i (0..$#$cols) {
+ my $col = $cols->[$i];
+
+ if ($self->_is_lob_type($source->column_info($col)->{data_type})) {
+ for my $j (0..$#$data) {
+ my $blob_val = delete $data->[$j][$i];
+ if (not defined $blob_val) {
+ $data->[$j][$i] = \'NULL';
+ }
+ else {
+ $data->[$j][$i] = \"''";
+ $blob_cols[$j][$i] = $blob_val
+ unless $blob_val eq '';
+ }
+ }
}
}
- return \%blob_cols;
+ return @blob_cols ? \@blob_cols : undef;
}
sub _update_blobs {
- my ($self, $source, $blob_cols, $ident_cond) = @_;
+ my ($self, $source, $blob_cols, $where) = @_;
my (@primary_cols) = $source->primary_columns;
# check if we're updating a single row by PK
my $pk_cols_in_where = 0;
for my $col (@primary_cols) {
- $pk_cols_in_where++ if defined $ident_cond->{$col};
+ $pk_cols_in_where++ if defined $where->{$col};
}
my @rows;
if ($pk_cols_in_where == @primary_cols) {
my %row_to_update;
- @row_to_update{@primary_cols} = @{$ident_cond}{@primary_cols};
+ @row_to_update{@primary_cols} = @{$where}{@primary_cols};
@rows = \%row_to_update;
} else {
- my $rs = $source->resultset->search(
- $ident_cond,
- {
- result_class => 'DBIx::Class::ResultClass::HashRefInflator',
- select => \@primary_cols
- }
- );
- @rows = $rs->all; # statement must finish
+ my $cursor = $self->select ($source, \@primary_cols, $where, {});
+ @rows = map {
+ my %row; @row{@primary_cols} = @$_; \%row
+ } $cursor->all;
}
for my $row (@rows) {
sub _insert_blobs {
my ($self, $source, $blob_cols, $row) = @_;
- my $dbh = $self->dbh;
+ my $dbh = $self->_get_dbh;
- my $table = $source->from;
+ my $table = $source->name;
my %row = %$row;
my (@primary_cols) = $source->primary_columns;
unless @primary_cols;
if ((grep { defined $row{$_} } @primary_cols) != @primary_cols) {
- if (@primary_cols == 1) {
- my $col = $primary_cols[0];
- $row{$col} = $self->last_insert_id($source, $col);
- } else {
- croak "Cannot update TEXT/IMAGE column(s) without primary key values";
- }
+ croak "Cannot update TEXT/IMAGE column(s) without primary key values";
}
for my $col (keys %$blob_cols) {
my $blob = $blob_cols->{$col};
my %where = map { ($_, $row{$_}) } @primary_cols;
- my $cursor = $source->resultset->search(\%where, {
- select => [$col]
- })->cursor;
+
+ my $cursor = $self->select ($source, [$col], \%where, {});
$cursor->next;
my $sth = $cursor->sth;
+ if (not $sth) {
+ require Data::Dumper;
+ local $Data::Dumper::Terse = 1;
+ local $Data::Dumper::Indent = 1;
+ local $Data::Dumper::Useqq = 1;
+ local $Data::Dumper::Quotekeys = 0;
+ local $Data::Dumper::Sortkeys = 1;
+
+ croak "\nCould not find row in table '$table' for blob update:\n".
+ Data::Dumper::Dumper(\%where)."\n";
+ }
+
eval {
do {
$sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
}
}
+sub _insert_blobs_array {
+ my ($self, $source, $blob_cols, $cols, $data) = @_;
+
+ for my $i (0..$#$data) {
+ my $datum = $data->[$i];
+
+ my %row;
+ @row{ @$cols } = @$datum;
+
+ my %blob_vals;
+ for my $j (0..$#$cols) {
+ if (exists $blob_cols->[$i][$j]) {
+ $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
+ }
+ }
+
+ $self->_insert_blobs ($source, \%blob_vals, \%row);
+ }
+}
+
=head2 connect_call_datetime_setup
Used as:
sub _dbh_begin_work {
my $self = shift;
+
+# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
+# TRAN once. However, we need to make sure there's a $dbh.
+ return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
+
$self->next::method(@_);
+
if ($self->using_freetds) {
- $self->dbh->do('BEGIN TRAN');
+ $self->_get_dbh->do('BEGIN TRAN');
}
+
+ $self->_began_bulk_work(1) if $self->_is_bulk_storage;
}
sub _dbh_commit {
sub _svp_begin {
my ($self, $name) = @_;
- $self->dbh->do("SAVE TRANSACTION $name");
+ $self->_get_dbh->do("SAVE TRANSACTION $name");
}
# A new SAVE TRANSACTION with the same name releases the previous one.
sub _svp_rollback {
my ($self, $name) = @_;
- $self->dbh->do("ROLLBACK TRANSACTION $name");
+ $self->_get_dbh->do("ROLLBACK TRANSACTION $name");
}
1;
Inserts or updates of TEXT/IMAGE columns will B<NOT> work with FreeTDS.
+=head1 INSERTS WITH PLACEHOLDERS
+
+With placeholders enabled, inserts are done in a transaction so that there are
+no concurrency issues with getting the inserted identity value using
+C<SELECT MAX(col)>, which is the only way to get the C<IDENTITY> value in this
+mode.
+
+In addition, they are done on a separate connection so that it's possible to
+have active cursors when doing an insert.
+
+When using C<DBIx::Class::Storage::DBI::Sybase::NoBindVars> transactions are
+disabled, as there are no concurrency issues with C<SELECT @@IDENTITY> as it's a
+session variable.
+
+=head1 TRANSACTIONS
+
+Due to limitations of the TDS protocol, L<DBD::Sybase>, or both; you cannot
+begin a transaction while there are active cursors. An active cursor is, for
+example, a L<ResultSet|DBIx::Class::ResultSet> that has been executed using
+C<next> or C<first> but has not been exhausted or
+L<reset|DBIx::Class::ResultSet/reset>.
+
+For example, this will not work:
+
+ $schema->txn_do(sub {
+ my $rs = $schema->resultset('Book');
+ while (my $row = $rs->next) {
+ $schema->resultset('MetaData')->create({
+ book_id => $row->id,
+ ...
+ });
+ }
+ });
+
+Transactions done for inserts in C<AutoCommit> mode when placeholders are in use
+are not affected, as they are done on an extra database handle.
+
+Some workarounds:
+
+=over 4
+
+=item * use L<DBIx::Class::Storage::DBI::Replicated>
+
+=item * L<connect|DBIx::Class::Schema/connect> another L<Schema|DBIx::Class::Schema>
+
+=item * load the data from your cursor with L<DBIx::Class::ResultSet/all>
+
+=back
+
=head1 MAXIMUM CONNECTIONS
The TDS protocol makes separate connections to the server for active statements
See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
setting you need to work with C<IMAGE> columns.
-=head1 AUTHORS
+=head1 BULK API
+
+The experimental L<DBD::Sybase> Bulk API support is used for
+L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
+on a separate connection.
+
+To use this feature effectively, use a large number of rows for each
+L<populate|DBIx::Class::ResultSet/populate> call, eg.:
+
+ while (my $rows = $data_source->get_100_rows()) {
+ $rs->populate($rows);
+ }
+
+B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
+calls in your C<Result> classes B<must> list columns in database order for this
+to work. Also, you may have to unset the C<LANG> environment variable before
+loading your app, if it doesn't match the character set of your database.
+
+When inserting IMAGE columns using this method, you'll need to use
+L</connect_call_blob_setup> as well.
+
+=head1 AUTHOR
See L<DBIx::Class/CONTRIBUTORS>.