## scalar refs, or at least, all the same type as the first set, the statement is
## only prepped once.
sub insert_bulk {
- my ($self, $source, $cols, $data, $sth_attr) = @_;
+ my ($self, $source, $cols, $data) = @_;
# redispatch to insert_bulk method of storage we reblessed into, if necessary
if (not $self->_driver_determined) {
}
my %colvalues;
+ my $table = $source->from;
@colvalues{@$cols} = (0..$#$cols);
-
- # bind literal sql if it's the same in all slices
- for my $i (0..$#$cols) {
- my $first_val = $data->[0][$i];
- next unless (Scalar::Util::reftype($first_val)||'') eq 'SCALAR';
-
- $colvalues{ $cols->[$i] } = $first_val
- if (grep {
- (Scalar::Util::reftype($_)||'') eq 'SCALAR' &&
- $$_ eq $$first_val
- } map $data->[$_][$i], (1..$#$data)) == (@$data - 1);
- }
-
- my ($sql, $bind) = $self->_prep_for_execute (
- 'insert', undef, $source, [\%colvalues]
- );
- my @bind = @$bind;
-
- my $empty_bind = 1 if (not @bind) &&
- (grep { (Scalar::Util::reftype($_)||'') eq 'SCALAR' } values %colvalues)
- == @$cols;
-
- if ((not @bind) && (not $empty_bind)) {
- croak 'Cannot insert_bulk without support for placeholders';
- }
+ my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues);
$self->_query_start( $sql, @bind );
- my $sth = $self->sth($sql, 'insert', $sth_attr);
-
- if ($empty_bind) {
- # bind_param_array doesn't work if there are no binds
- eval {
- local $self->_get_dbh->{RaiseError} = 1;
- local $self->_get_dbh->{PrintError} = 0;
- foreach (0..$#$data) {
- $sth->execute;
- $sth->fetchall_arrayref;
- }
- };
- my $exception = $@;
- $sth->finish;
- $self->throw_exception($exception) if $exception;
- return;
- }
+ my $sth = $self->sth($sql);
# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
$sth->bind_param_array( $placeholder_index, [@data], $attributes );
$placeholder_index++;
}
-
my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
- $sth->finish;
- if (my $err = $@ || $sth->errstr) {
+ if (my $err = $@) {
my $i = 0;
++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
- $self->throw_exception("Unexpected populate error: $err")
+ $self->throw_exception($sth->errstr || "Unexpected populate error: $err")
if ($i > $#$tuple_status);
require Data::Dumper;
local $Data::Dumper::Sortkeys = 1;
$self->throw_exception(sprintf "%s for populate slice:\n%s",
- ($tuple_status->[$i][1] || $err),
+ $tuple_status->[$i][1],
Data::Dumper::Dumper(
{ map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }
),
);
}
+ $self->throw_exception($sth->errstr) if !$rv;
$self->_query_end( $sql, @bind );
return (wantarray ? ($rv, $sth, @bind) : $rv);
=cut
sub _dbh_sth {
- my ($self, $dbh, $sql, $op, $sth_attr) = @_;
-# $op is ignored right now
-
- $sth_attr ||= {};
+ my ($self, $dbh, $sql) = @_;
# 3 is the if_active parameter which avoids active sth re-use
my $sth = $self->disable_sth_caching
- ? $dbh->prepare($sql, $sth_attr)
- : $dbh->prepare_cached($sql, $sth_attr, 3);
+ ? $dbh->prepare($sql)
+ : $dbh->prepare_cached($sql, {}, 3);
# XXX You would think RaiseError would make this impossible,
# but apparently that's not true :(
}
sub sth {
- my ($self, $sql, $op, $sth_attr) = @_;
- $self->dbh_do('_dbh_sth', $sql, $op, $sth_attr); # retry over disconnects
+ my ($self, $sql) = @_;
+ $self->dbh_do('_dbh_sth', $sql); # retry over disconnects
}
sub _dbh_columns_info_for {
use Sub::Name ();
__PACKAGE__->mk_group_accessors('simple' =>
- qw/_identity _blob_log_on_update _writer_storage _is_extra_storage
- _bulk_storage _is_bulk_storage _began_bulk_work
- _bulk_disabled_due_to_coderef_connect_info_warned
+ qw/_identity _blob_log_on_update _writer_storage _is_writer_storage
_identity_method/
);
-my @also_proxy_to_extra_storages = qw/
- connect_call_set_auto_cast auto_cast connect_call_blob_setup
- connect_call_datetime_setup
-
+my @also_proxy_to_writer_storage = qw/
disconnect _connect_info _sql_maker _sql_maker_opts disable_sth_caching
auto_savepoint unsafe cursor_class debug debugobj schema
/;
bless $self, $no_bind_vars;
$self->_rebless;
} elsif (not $self->_typeless_placeholders_supported) {
- # this is highly unlikely, but we check just in case
+# this is highly unlikely, but we check just in case
$self->auto_cast(1);
}
}
# create storage for insert/(update blob) transactions,
# unless this is that storage
- return if $self->_is_extra_storage;
+ return if $self->_is_writer_storage;
my $writer_storage = (ref $self)->new;
- $writer_storage->_is_extra_storage(1);
+ $writer_storage->_is_writer_storage(1);
$writer_storage->connect_info($self->connect_info);
- $writer_storage->auto_cast($self->auto_cast);
$self->_writer_storage($writer_storage);
-
-# create a bulk storage unless connect_info is a coderef
- return
- if (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE';
-
- my $bulk_storage = (ref $self)->new;
-
- $bulk_storage->_is_extra_storage(1);
- $bulk_storage->_is_bulk_storage(1); # for special ->disconnect acrobatics
- $bulk_storage->connect_info($self->connect_info);
-
-# this is why
- $bulk_storage->_dbi_connect_info->[0] .= ';bulkLogin=1';
-
- $self->_bulk_storage($bulk_storage);
}
-for my $method (@also_proxy_to_extra_storages) {
+for my $method (@also_proxy_to_writer_storage) {
no strict 'refs';
- no warnings 'redefine';
my $replaced = __PACKAGE__->can($method);
- *{$method} = Sub::Name::subname $method => sub {
+ *{$method} = Sub::Name::subname __PACKAGE__."::$method" => sub {
my $self = shift;
$self->_writer_storage->$replaced(@_) if $self->_writer_storage;
- $self->_bulk_storage->$replaced(@_) if $self->_bulk_storage;
return $self->$replaced(@_);
};
}
-sub disconnect {
- my $self = shift;
-
-# Even though we call $sth->finish for uses off the bulk API, there's still an
-# "active statement" warning on disconnect, which we throw away here.
-# This is due to the bug described in insert_bulk.
-# Currently a noop because 'prepare' is used instead of 'prepare_cached'.
- local $SIG{__WARN__} = sub {
- warn $_[0] unless $_[0] =~ /active statement/i;
- } if $self->_is_bulk_storage;
-
-# so that next transaction gets a dbh
- $self->_began_bulk_work(0) if $self->_is_bulk_storage;
-
- $self->next::method;
-}
-
# Make sure we have CHAINED mode turned on if AutoCommit is off in non-FreeTDS
# DBD::Sybase (since we don't know how DBD::Sybase was compiled.) If however
# we're using FreeTDS, CHAINED mode turns on an implicit transaction which we
my $self = shift;
$self->next::method(@_);
-
- if ($self->_is_bulk_storage) {
-# this should be cleared on every reconnect
- $self->_began_bulk_work(0);
- return;
- }
if (not $self->using_freetds) {
$self->_dbh->{syb_chained_txn} = 1;
$type && $type =~ /(?:text|image|lob|bytea|binary|memo)/i;
}
-sub _is_lob_column {
- my ($self, $source, $column) = @_;
-
- return $self->_is_lob_type($source->column_info($column)->{data_type});
-}
-
sub _prep_for_execute {
my $self = shift;
my ($op, $extra_bind, $ident, $args) = @_;
sub update {
my $self = shift;
- my ($source, $fields, $where, @rest) = @_;
+ my ($source, $fields, $where) = @_;
my $wantarray = wantarray;
-
my $blob_cols = $self->_remove_blob_cols($source, $fields);
- my $table = $source->name;
-
- my $identity_col = List::Util::first
- { $source->column_info($_)->{is_auto_increment} }
- $source->columns;
-
- my $is_identity_update = $identity_col && defined $fields->{$identity_col};
-
if (not $blob_cols) {
- $self->_set_identity_insert($table, 'update') if $is_identity_update;
return $self->next::method(@_);
- $self->_unset_identity_insert($table, 'update') if $is_identity_update;
- }
-
-# check that we're not updating a blob column that's also in $where
- for my $blob (grep $self->_is_lob_column($source, $_), $source->columns) {
- if (exists $where->{$blob} && exists $fields->{$blob}) {
- croak
-'Update of TEXT/IMAGE column that is also in search condition impossible';
- }
}
# update+blob update(s) done atomically on separate connection
my $guard = $self->txn_scope_guard;
-# First update the blob columns to be updated to '' (taken from $fields, where
-# it is originally put by _remove_blob_cols .)
- my %blobs_to_empty = map { ($_ => delete $fields->{$_}) } keys %$blob_cols;
-
- $self->next::method($source, \%blobs_to_empty, $where, @rest);
-
-# Now update the blobs before the other columns in case the update of other
-# columns makes the search condition invalid.
- $self->_update_blobs($source, $blob_cols, $where);
-
my @res;
- if (%$fields) {
- $self->_set_identity_insert($table, 'update') if $is_identity_update;
-
- if ($wantarray) {
- @res = $self->next::method(@_);
- }
- elsif (defined $wantarray) {
- $res[0] = $self->next::method(@_);
- }
- else {
- $self->next::method(@_);
- }
-
- $self->_unset_identity_insert($table, 'update') if $is_identity_update;
+ if ($wantarray) {
+ @res = $self->next::method(@_);
}
+ elsif (defined $wantarray) {
+ $res[0] = $self->next::method(@_);
+ }
+ else {
+ $self->next::method(@_);
+ }
+
+ $self->_update_blobs($source, $blob_cols, $where);
$guard->commit;
return $wantarray ? @res : $res[0];
}
-### the insert_bulk partially stolen from DBI/MSSQL.pm
+### the insert_bulk stuff stolen from DBI/MSSQL.pm
sub _set_identity_insert {
- my ($self, $table, $op) = @_;
+ my ($self, $table) = @_;
my $sql = sprintf (
- 'SET IDENTITY_%s %s ON',
- (uc($op) || 'INSERT'),
+ 'SET IDENTITY_INSERT %s ON',
$self->sql_maker->_quote ($table),
);
- $self->_query_start($sql);
-
my $dbh = $self->_get_dbh;
eval { $dbh->do ($sql) };
- my $exception = $@;
-
- $self->_query_end($sql);
-
- if ($exception) {
+ if ($@) {
$self->throw_exception (sprintf "Error executing '%s': %s",
$sql,
$dbh->errstr,
}
sub _unset_identity_insert {
- my ($self, $table, $op) = @_;
+ my ($self, $table) = @_;
my $sql = sprintf (
- 'SET IDENTITY_%s %s OFF',
- (uc($op) || 'INSERT'),
+ 'SET IDENTITY_INSERT %s OFF',
$self->sql_maker->_quote ($table),
);
- $self->_query_start($sql);
-
my $dbh = $self->_get_dbh;
$dbh->do ($sql);
-
- $self->_query_end($sql);
}
-# for tests
-sub _can_insert_bulk { 1 }
-
+# XXX this should use the DBD::Sybase bulk API, where possible
sub insert_bulk {
my $self = shift;
my ($source, $cols, $data) = @_;
- my $identity_col = List::Util::first
- { $source->column_info($_)->{is_auto_increment} }
- $source->columns;
-
my $is_identity_insert = (List::Util::first
- { $source->column_info ($_)->{is_auto_increment} }
- @{$cols}
- ) ? 1 : 0;
-
- my @source_columns = $source->columns;
-
- my $use_bulk_api =
- $self->_bulk_storage &&
- $self->_get_dbh->{syb_has_blk};
-
- if ((not $use_bulk_api) &&
- (Scalar::Util::reftype($self->_dbi_connect_info->[0])||'') eq 'CODE' &&
- (not $self->_bulk_disabled_due_to_coderef_connect_info_warned)) {
- carp <<'EOF';
-Bulk API support disabled due to use of a CODEREF connect_info. Reverting to
-array inserts.
-EOF
- $self->_bulk_disabled_due_to_coderef_connect_info_warned(1);
+ { $source->column_info ($_)->{is_auto_increment} }
+ (@{$cols})
+ )
+ ? 1
+ : 0;
+
+ if ($is_identity_insert) {
+ $self->_set_identity_insert ($source->name);
}
- if (not $use_bulk_api) {
- my $blob_cols = $self->_remove_blob_cols_array($source, $cols, $data);
-
- my $dumb_last_insert_id =
- $identity_col
- && (not $is_identity_insert)
- && ($self->_identity_method||'') ne '@@IDENTITY';
-
- ($self, my ($guard)) = do {
- if ($self->{transaction_depth} == 0 &&
- ($blob_cols || $dumb_last_insert_id)) {
- ($self->_writer_storage, $self->_writer_storage->txn_scope_guard);
- }
- else {
- ($self, undef);
- }
- };
-
- $self->_set_identity_insert ($source->name) if $is_identity_insert;
- $self->next::method(@_);
- $self->_unset_identity_insert ($source->name) if $is_identity_insert;
-
- if ($blob_cols) {
- if ($is_identity_insert) {
- $self->_insert_blobs_array ($source, $blob_cols, $cols, $data);
- }
- else {
- my @cols_with_identities = (@$cols, $identity_col);
-
- ## calculate identities
- # XXX This assumes identities always increase by 1, which may or may not
- # be true.
- my ($last_identity) =
- $self->_dbh->selectrow_array (
- $self->_fetch_identity_sql($source, $identity_col)
- );
- my @identities = (($last_identity - @$data + 1) .. $last_identity);
-
- my @data_with_identities = map [@$_, shift @identities], @$data;
-
- $self->_insert_blobs_array (
- $source, $blob_cols, \@cols_with_identities, \@data_with_identities
- );
- }
- }
-
- $guard->commit if $guard;
- return;
- }
-
-# otherwise, use the bulk API
-
-# rearrange @$data so that columns are in database order
- my %orig_idx;
- @orig_idx{@$cols} = 0..$#$cols;
-
- my %new_idx;
- @new_idx{@source_columns} = 0..$#source_columns;
-
- my @new_data;
- for my $datum (@$data) {
- my $new_datum = [];
- for my $col (@source_columns) {
-# identity data will be 'undef' if not $is_identity_insert
-# columns with defaults will also be 'undef'
- $new_datum->[ $new_idx{$col} ] =
- exists $orig_idx{$col} ? $datum->[ $orig_idx{$col} ] : undef;
- }
- push @new_data, $new_datum;
- }
-
-# bcp identity index is 1-based
- my $identity_idx = exists $new_idx{$identity_col} ?
- $new_idx{$identity_col} + 1 : 0;
-
-## Set a client-side conversion error handler, straight from DBD::Sybase docs.
-# This ignores any data conversion errors detected by the client side libs, as
-# they are usually harmless.
- my $orig_cslib_cb = DBD::Sybase::set_cslib_cb(
- Sub::Name::subname insert_bulk => sub {
- my ($layer, $origin, $severity, $errno, $errmsg, $osmsg, $blkmsg) = @_;
-
- return 1 if $errno == 36;
-
- carp
- "Layer: $layer, Origin: $origin, Severity: $severity, Error: $errno" .
- ($errmsg ? "\n$errmsg" : '') .
- ($osmsg ? "\n$osmsg" : '') .
- ($blkmsg ? "\n$blkmsg" : '');
-
- return 0;
- });
-
- eval {
- my $bulk = $self->_bulk_storage;
-
- my $guard = $bulk->txn_scope_guard;
-
-## XXX get this to work instead of our own $sth
-## will require SQLA or *Hacks changes for ordered columns
-# $bulk->next::method($source, \@source_columns, \@new_data, {
-# syb_bcp_attribs => {
-# identity_flag => $is_identity_insert,
-# identity_column => $identity_idx,
-# }
-# });
- my $sql = 'INSERT INTO ' .
- $bulk->sql_maker->_quote($source->name) . ' (' .
-# colname list is ignored for BCP, but does no harm
- (join ', ', map $bulk->sql_maker->_quote($_), @source_columns) . ') '.
- ' VALUES ('. (join ', ', ('?') x @source_columns) . ')';
-
-## XXX there's a bug in the DBD::Sybase bulk support that makes $sth->finish for
-## a prepare_cached statement ineffective. Replace with ->sth when fixed, or
-## better yet the version above. Should be fixed in DBD::Sybase .
- my $sth = $bulk->_get_dbh->prepare($sql,
-# 'insert', # op
- {
- syb_bcp_attribs => {
- identity_flag => $is_identity_insert,
- identity_column => $identity_idx,
- }
- }
- );
-
- my $bind_attributes = $self->source_bind_attributes($source);
-
- foreach my $slice_idx (0..$#source_columns) {
- my $col = $source_columns[$slice_idx];
-
- my $attributes = $bind_attributes->{$col}
- if $bind_attributes && defined $bind_attributes->{$col};
-
- my @slice = map $_->[$slice_idx], @new_data;
-
- $sth->bind_param_array(($slice_idx + 1), \@slice, $attributes);
- }
-
- $bulk->_query_start($sql);
-
-# this is stolen from DBI::insert_bulk
- my $tuple_status = [];
- my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) };
-
- if (my $err = $@ || $sth->errstr) {
- my $i = 0;
- ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
-
- $self->throw_exception("Unexpected populate error: $err")
- if ($i > $#$tuple_status);
-
- require Data::Dumper;
- local $Data::Dumper::Terse = 1;
- local $Data::Dumper::Indent = 1;
- local $Data::Dumper::Useqq = 1;
- local $Data::Dumper::Quotekeys = 0;
- local $Data::Dumper::Sortkeys = 1;
-
- $self->throw_exception(sprintf "%s for populate slice:\n%s",
- ($tuple_status->[$i][1] || $err),
- Data::Dumper::Dumper(
- { map { $source_columns[$_] => $new_data[$i][$_] } (0 .. $#$cols) }
- ),
- );
- }
-
- $guard->commit;
- $sth->finish;
-
- $bulk->_query_end($sql);
- };
- my $exception = $@;
- if ($exception =~ /-Y option/) {
- carp <<"EOF";
-
-Sybase bulk API operation failed due to character set incompatibility, reverting
-to regular array inserts:
-
-*** Try unsetting the LANG environment variable.
+ $self->next::method(@_);
-$@
-EOF
- $self->_bulk_storage(undef);
- DBD::Sybase::set_cslib_cb($orig_cslib_cb);
- unshift @_, $self;
- goto \&insert_bulk;
+ if ($is_identity_insert) {
+ $self->_unset_identity_insert ($source->name);
}
- elsif ($exception) {
- DBD::Sybase::set_cslib_cb($orig_cslib_cb);
-# rollback makes the bulkLogin connection unusable
- $self->_bulk_storage->disconnect;
- $self->throw_exception($exception);
- }
-
- DBD::Sybase::set_cslib_cb($orig_cslib_cb);
}
-# Make sure blobs are not bound as placeholders, and return any non-empty ones
-# as a hash.
+### end of stolen insert_bulk section
+
sub _remove_blob_cols {
my ($self, $source, $fields) = @_;
for my $col (keys %$fields) {
if ($self->_is_lob_type($source->column_info($col)->{data_type})) {
- my $blob_val = delete $fields->{$col};
- if (not defined $blob_val) {
- $fields->{$col} = \'NULL';
- }
- else {
- $fields->{$col} = \"''";
- $blob_cols{$col} = $blob_val unless $blob_val eq '';
- }
+ $blob_cols{$col} = delete $fields->{$col};
+ $fields->{$col} = \"''";
}
}
return keys %blob_cols ? \%blob_cols : undef;
}
-# same for insert_bulk
-sub _remove_blob_cols_array {
- my ($self, $source, $cols, $data) = @_;
-
- my @blob_cols;
-
- for my $i (0..$#$cols) {
- my $col = $cols->[$i];
-
- if ($self->_is_lob_type($source->column_info($col)->{data_type})) {
- for my $j (0..$#$data) {
- my $blob_val = delete $data->[$j][$i];
- if (not defined $blob_val) {
- $data->[$j][$i] = \'NULL';
- }
- else {
- $data->[$j][$i] = \"''";
- $blob_cols[$j][$i] = $blob_val
- unless $blob_val eq '';
- }
- }
- }
- }
-
- return @blob_cols ? \@blob_cols : undef;
-}
-
sub _update_blobs {
my ($self, $source, $blob_cols, $where) = @_;
my ($self, $source, $blob_cols, $row) = @_;
my $dbh = $self->_get_dbh;
- my $table = $source->name;
+ my $table = $source->from;
my %row = %$row;
my (@primary_cols) = $source->primary_columns;
$cursor->next;
my $sth = $cursor->sth;
- if (not $sth) {
- require Data::Dumper;
- local $Data::Dumper::Terse = 1;
- local $Data::Dumper::Indent = 1;
- local $Data::Dumper::Useqq = 1;
- local $Data::Dumper::Quotekeys = 0;
- local $Data::Dumper::Sortkeys = 1;
-
- croak "\nCould not find row in table '$table' for blob update:\n".
- Data::Dumper::Dumper(\%where)."\n";
- }
-
eval {
do {
$sth->func('CS_GET', 1, 'ct_data_info') or die $sth->errstr;
}
}
-sub _insert_blobs_array {
- my ($self, $source, $blob_cols, $cols, $data) = @_;
-
- for my $i (0..$#$data) {
- my $datum = $data->[$i];
-
- my %row;
- @row{ @$cols } = @$datum;
-
- my %blob_vals;
- for my $j (0..$#$cols) {
- if (exists $blob_cols->[$i][$j]) {
- $blob_vals{ $cols->[$j] } = $blob_cols->[$i][$j];
- }
- }
-
- $self->_insert_blobs ($source, \%blob_vals, \%row);
- }
-}
-
=head2 connect_call_datetime_setup
Used as:
sub _dbh_begin_work {
my $self = shift;
-
-# bulkLogin=1 connections are always in a transaction, and can only call BEGIN
-# TRAN once. However, we need to make sure there's a $dbh.
- return if $self->_is_bulk_storage && $self->_dbh && $self->_began_bulk_work;
-
$self->next::method(@_);
-
if ($self->using_freetds) {
$self->_get_dbh->do('BEGIN TRAN');
}
-
- $self->_began_bulk_work(1) if $self->_is_bulk_storage;
}
sub _dbh_commit {
See L</connect_call_blob_setup> for a L<DBIx::Class::Storage::DBI/connect_info>
setting you need to work with C<IMAGE> columns.
-=head1 BULK API
-
-The experimental L<DBD::Sybase> Bulk API support is used for
-L<populate|DBIx::Class::ResultSet/populate> in B<void> context, in a transaction
-on a separate connection.
-
-To use this feature effectively, use a large number of rows for each
-L<populate|DBIx::Class::ResultSet/populate> call, eg.:
-
- while (my $rows = $data_source->get_100_rows()) {
- $rs->populate($rows);
- }
-
-B<NOTE:> the L<add_columns|DBIx::Class::ResultSource/add_columns>
-calls in your C<Result> classes B<must> list columns in database order for this
-to work. Also, you may have to unset the C<LANG> environment variable before
-loading your app, if it doesn't match the character set of your database.
-
-When inserting IMAGE columns using this method, you'll need to use
-L</connect_call_blob_setup> as well.
-
=head1 AUTHOR
See L<DBIx::Class/CONTRIBUTORS>.
use Test::Exception;
use lib qw(t/lib);
use DBICTest;
-
-require DBIx::Class::Storage::DBI::Sybase;
-require DBIx::Class::Storage::DBI::Sybase::NoBindVars;
+use DBIx::Class::Storage::DBI::Sybase;
+use DBIx::Class::Storage::DBI::Sybase::NoBindVars;
my ($dsn, $user, $pass) = @ENV{map { "DBICTEST_SYBASE_${_}" } qw/DSN USER PASS/};
-my $TESTS = 58 + 2;
+my $TESTS = 48 + 2;
if (not ($dsn && $user)) {
plan skip_all =>
is( $it->count, 7, 'COUNT of GROUP_BY ok' );
-# do an IDENTITY_INSERT
+# do an identity insert (which should happen with no txn when using
+# placeholders.)
{
no warnings 'redefine';
$schema->resultset('Artist')
->create({ artistid => 999, name => 'mtfnpy' });
- ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT used');
+ ok((grep /IDENTITY_INSERT/i, @debug_out), 'IDENTITY_INSERT');
SKIP: {
skip 'not testing lack of txn on IDENTITY_INSERT with NoBindVars', 1
}
}
-# do an IDENTITY_UPDATE
- {
- my @debug_out;
- local $schema->storage->{debug} = 1;
- local $schema->storage->debugobj->{callback} = sub {
- push @debug_out, $_[1];
- };
-
- lives_and {
- $schema->resultset('Artist')
- ->find(999)->update({ artistid => 555 });
- ok((grep /IDENTITY_UPDATE/i, @debug_out));
- } 'IDENTITY_UPDATE used';
- $ping_count-- if $@;
- }
+# test insert_bulk using populate, this should always pass whether or not it
+# does anything Sybase specific or not. Just here to aid debugging.
+ lives_ok {
+ $schema->resultset('Artist')->populate([
+ {
+ name => 'bulk artist 1',
+ charfield => 'foo',
+ },
+ {
+ name => 'bulk artist 2',
+ charfield => 'foo',
+ },
+ {
+ name => 'bulk artist 3',
+ charfield => 'foo',
+ },
+ ]);
+ } 'insert_bulk via populate';
my $bulk_rs = $schema->resultset('Artist')->search({
name => { -like => 'bulk artist %' }
});
-# test insert_bulk using populate.
- SKIP: {
- skip 'insert_bulk not supported', 4
- unless $schema->storage->_can_insert_bulk;
+ is $bulk_rs->count, 3, 'correct number inserted via insert_bulk';
- lives_ok {
- $schema->resultset('Artist')->populate([
- {
- name => 'bulk artist 1',
- charfield => 'foo',
- },
- {
- name => 'bulk artist 2',
- charfield => 'foo',
- },
- {
- name => 'bulk artist 3',
- charfield => 'foo',
- },
- ]);
- } 'insert_bulk via populate';
-
- is $bulk_rs->count, 3, 'correct number inserted via insert_bulk';
-
- is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
- 'column set correctly via insert_bulk');
-
- my %bulk_ids;
- @bulk_ids{map $_->artistid, $bulk_rs->all} = ();
-
- is ((scalar keys %bulk_ids), 3,
- 'identities generated correctly in insert_bulk');
-
- $bulk_rs->delete;
- }
+ is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
+ 'column set correctly via insert_bulk');
-# make sure insert_bulk works a second time on the same connection
- SKIP: {
- skip 'insert_bulk not supported', 3
- unless $schema->storage->_can_insert_bulk;
+ my %bulk_ids;
+ @bulk_ids{map $_->artistid, $bulk_rs->all} = ();
- lives_ok {
- $schema->resultset('Artist')->populate([
- {
- name => 'bulk artist 1',
- charfield => 'bar',
- },
- {
- name => 'bulk artist 2',
- charfield => 'bar',
- },
- {
- name => 'bulk artist 3',
- charfield => 'bar',
- },
- ]);
- } 'insert_bulk via populate called a second time';
-
- is $bulk_rs->count, 3,
- 'correct number inserted via insert_bulk';
-
- is ((grep $_->charfield eq 'bar', $bulk_rs->all), 3,
- 'column set correctly via insert_bulk');
-
- $bulk_rs->delete;
- }
+ is ((scalar keys %bulk_ids), 3,
+ 'identities generated correctly in insert_bulk');
-# test invalid insert_bulk (missing required column)
-#
-# There should be a rollback, reconnect and the next valid insert_bulk should
-# succeed.
- throws_ok {
+ $bulk_rs->delete;
+
+# now test insert_bulk with IDENTITY_INSERT
+ lives_ok {
$schema->resultset('Artist')->populate([
{
+ artistid => 2001,
+ name => 'bulk artist 1',
charfield => 'foo',
- }
+ },
+ {
+ artistid => 2002,
+ name => 'bulk artist 2',
+ charfield => 'foo',
+ },
+ {
+ artistid => 2003,
+ name => 'bulk artist 3',
+ charfield => 'foo',
+ },
]);
- } qr/no value or default|does not allow null|placeholders/i,
-# The second pattern is the error from fallback to regular array insert on
-# incompatible charset.
-# The third is for ::NoBindVars with no syb_has_blk.
- 'insert_bulk with missing required column throws error';
+ } 'insert_bulk with IDENTITY_INSERT via populate';
-# now test insert_bulk with IDENTITY_INSERT
- SKIP: {
- skip 'insert_bulk not supported', 3
- unless $schema->storage->_can_insert_bulk;
+ is $bulk_rs->count, 3,
+ 'correct number inserted via insert_bulk with IDENTITY_INSERT';
- lives_ok {
- $schema->resultset('Artist')->populate([
- {
- artistid => 2001,
- name => 'bulk artist 1',
- charfield => 'foo',
- },
- {
- artistid => 2002,
- name => 'bulk artist 2',
- charfield => 'foo',
- },
- {
- artistid => 2003,
- name => 'bulk artist 3',
- charfield => 'foo',
- },
- ]);
- } 'insert_bulk with IDENTITY_INSERT via populate';
-
- is $bulk_rs->count, 3,
- 'correct number inserted via insert_bulk with IDENTITY_INSERT';
-
- is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
- 'column set correctly via insert_bulk with IDENTITY_INSERT');
-
- $bulk_rs->delete;
- }
+ is ((grep $_->charfield eq 'foo', $bulk_rs->all), 3,
+ 'column set correctly via insert_bulk with IDENTITY_INSERT');
+
+ $bulk_rs->delete;
# test correlated subquery
my $subq = $schema->resultset('Artist')->search({ artistid => { '>' => 3 } })
# mostly stolen from the blob stuff Nniuq wrote for t/73oracle.t
SKIP: {
- skip 'TEXT/IMAGE support does not work with FreeTDS', 18
+ skip 'TEXT/IMAGE support does not work with FreeTDS', 13
if $schema->storage->using_freetds;
my $dbh = $schema->storage->_dbh;
CREATE TABLE bindtype_test
(
id INT IDENTITY PRIMARY KEY,
- bytea IMAGE NULL,
+ bytea INT NULL,
blob IMAGE NULL,
clob TEXT NULL
)
foreach my $size (qw(small large)) {
no warnings 'uninitialized';
- my $created;
- lives_ok {
- $created = $rs->create( { $type => $binstr{$size} } )
- } "inserted $size $type without dying";
+ my $created = eval { $rs->create( { $type => $binstr{$size} } ) };
+ ok(!$@, "inserted $size $type without dying");
+ diag $@ if $@;
$last_id = $created->id if $created;
- lives_and {
- ok($rs->find($last_id)->$type eq $binstr{$size})
- } "verified inserted $size $type";
+ my $got = eval {
+ $rs->find($last_id)->$type
+ };
+ diag $@ if $@;
+ ok($got eq $binstr{$size}, "verified inserted $size $type");
}
}
- $rs->delete;
-
# blob insert with explicit PK
# also a good opportunity to test IDENTITY_INSERT
- lives_ok {
- $rs->create( { id => 1, blob => $binstr{large} } )
- } 'inserted large blob without dying with manual PK';
+ {
+ local $SIG{__WARN__} = sub {};
+ eval { $dbh->do('DROP TABLE bindtype_test') };
- lives_and {
- ok($rs->find(1)->blob eq $binstr{large})
- } 'verified inserted large blob with manual PK';
+ $dbh->do(qq[
+ CREATE TABLE bindtype_test
+ (
+ id INT IDENTITY PRIMARY KEY,
+ bytea INT NULL,
+ blob IMAGE NULL,
+ clob TEXT NULL
+ )
+ ],{ RaiseError => 1, PrintError => 0 });
+ }
+ my $created = eval { $rs->create( { id => 1, blob => $binstr{large} } ) };
+ ok(!$@, "inserted large blob without dying with manual PK");
+ diag $@ if $@;
+
+ my $got = eval {
+ $rs->find(1)->blob
+ };
+ diag $@ if $@;
+ ok($got eq $binstr{large}, "verified inserted large blob with manual PK");
# try a blob update
my $new_str = $binstr{large} . 'mtfnpy';
$schema = get_schema();
}
- lives_ok {
- $rs->search({ id => 1 })->update({ blob => $new_str })
- } 'updated blob successfully';
-
- lives_and {
- ok($rs->find(1)->blob eq $new_str)
- } 'verified updated blob';
-
- # try a blob update with IDENTITY_UPDATE
- lives_and {
- $new_str = $binstr{large} . 'hlagh';
- $rs->find(1)->update({ id => 999, blob => $new_str });
- ok($rs->find(999)->blob eq $new_str);
- } 'verified updated blob with IDENTITY_UPDATE';
+ eval { $rs->search({ id => 1 })->update({ blob => $new_str }) };
+ ok !$@, 'updated blob successfully';
+ diag $@ if $@;
+ $got = eval {
+ $rs->find(1)->blob
+ };
+ diag $@ if $@;
+ ok($got eq $new_str, "verified updated blob");
## try multi-row blob update
# first insert some blobs
+ $rs->find(1)->delete;
+ $rs->create({ blob => $binstr{large} }) for (1..3);
$new_str = $binstr{large} . 'foo';
- lives_and {
- $rs->delete;
- $rs->create({ blob => $binstr{large} }) for (1..2);
- $rs->update({ blob => $new_str });
- is((grep $_->blob eq $new_str, $rs->all), 2);
- } 'multi-row blob update';
-
- $rs->delete;
-
- # now try insert_bulk with blobs
- $new_str = $binstr{large} . 'bar';
- lives_ok {
- $rs->populate([
- {
- bytea => 1,
- blob => $binstr{large},
- clob => $new_str,
- },
- {
- bytea => 1,
- blob => $binstr{large},
- clob => $new_str,
- },
- ]);
- } 'insert_bulk with blobs does not die';
-
- is((grep $_->blob eq $binstr{large}, $rs->all), 2,
- 'IMAGE column set correctly via insert_bulk');
-
- is((grep $_->clob eq $new_str, $rs->all), 2,
- 'TEXT column set correctly via insert_bulk');
-
- # make sure impossible blob update throws
- throws_ok {
- $rs->update({ clob => 'foo' });
- $rs->create({ clob => 'bar' });
- $rs->search({ clob => 'foo' })->update({ clob => 'bar' });
- } qr/impossible/, 'impossible blob update throws';
+ $rs->update({ blob => $new_str });
+ is((grep $_->blob eq $new_str, $rs->all), 3, 'multi-row blob update');
}
# test MONEY column support
});
# test insert transaction when there's an active cursor
- {
+ SKIP: {
+ skip 'not testing insert with active cursor if using ::NoBindVars', 1
+ if $storage_type =~ /NoBindVars/i;
+
my $artist_rs = $schema->resultset('Artist');
$artist_rs->first;
lives_ok {