use Carp::Clan qw/^DBIx::Class|^Try::Tiny/;
use DBI;
use DBIx::Class::Storage::DBI::Cursor;
-use DBIx::Class::Storage::Statistics;
use Scalar::Util qw/refaddr weaken reftype blessed/;
+use List::Util qw/first/;
use Data::Dumper::Concise 'Dumper';
use Sub::Name 'subname';
use Try::Tiny;
use File::Path 'make_path';
+use overload ();
use namespace::clean;
__PACKAGE__->mk_group_accessors('simple' => qw/
_connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
- _dbh _dbh_details _conn_pid _conn_tid _sql_maker _sql_maker_opts
+ _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts
transaction_depth _dbh_autocommit savepoints
/);
# will get the same rdbms version). _determine_supports_X does not need to
# exist on a driver, as we ->can for it before calling.
-my @capabilities = (qw/insert_returning placeholders typeless_placeholders join_optimizer/);
+my @capabilities = (qw/
+ insert_returning
+ insert_returning_bound
+ placeholders
+ typeless_placeholders
+ join_optimizer
+/);
__PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities );
__PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) );
# of a fork()ed child to kill the parent's shared DBI handle,
# *before perl reaches the DESTROY in this package*
# Yes, it is ugly and effective.
+# Additionally this registry is used by the CLONE method to
+# make sure no handles are shared between threads
{
my %seek_and_destroy;
local $?; # just in case the DBI destructor changes it somehow
# destroy just the object if not native to this process/thread
- $_->_preserve_foreign_dbh for (grep
+ $_->_verify_pid for (grep
{ defined $_ }
values %seek_and_destroy
);
}
+
+ sub CLONE {
+ # As per DBI's recommendation, DBIC disconnects all handles as
+ # soon as possible (DBIC will reconnect only on demand from within
+ # the thread)
+ for (values %seek_and_destroy) {
+ next unless $_;
+ $_->{_dbh_gen}++; # so that existing cursors will drop as well
+ $_->_dbh(undef);
+ }
+ }
}
sub DESTROY {
# some databases spew warnings on implicit disconnect
local $SIG{__WARN__} = sub {};
$self->_dbh(undef);
-}
-
-sub _preserve_foreign_dbh {
- my $self = shift;
-
- return unless $self->_dbh;
-
- $self->_verify_tid;
-
- return unless $self->_dbh;
-
- $self->_verify_pid;
+ # this op is necessary, since the very last perl runtime statement
+ # triggers a global destruction shootout, and the $SIG localization
+ # may very well be destroyed before perl actually gets to do the
+ # $dbh undef
+ 1;
}
# handle pid changes correctly - do not destroy parent's connection
sub _verify_pid {
my $self = shift;
- return if ( defined $self->_conn_pid and $self->_conn_pid == $$ );
-
- $self->_dbh->{InactiveDestroy} = 1;
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
-
- return;
-}
-
-# very similar to above, but seems to FAIL if I set InactiveDestroy
-sub _verify_tid {
- my $self = shift;
-
- if ( ! defined $self->_conn_tid ) {
- return; # no threads
- }
- elsif ( $self->_conn_tid == threads->tid ) {
- return; # same thread
+ my $pid = $self->_conn_pid;
+ if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
+ $dbh->{InactiveDestroy} = 1;
+ $self->{_dbh_gen}++;
+ $self->_dbh(undef);
}
- #$self->_dbh->{InactiveDestroy} = 1; # why does t/51threads.t fail...?
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
-
return;
}
-
=head2 connect_info
This method is normally called by L<DBIx::Class::Schema/connection>, which
my @args = @{ $info->{arguments} };
- $self->_dbi_connect_info([@args,
- %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
+ if (keys %attrs and ref $args[0] ne 'CODE') {
+ carp
+ 'You provided explicit AutoCommit => 0 in your connection_info. '
+ . 'This is almost universally a bad idea (see the footnotes of '
+ . 'DBIx::Class::Storage::DBI for more info). If you still want to '
+ . 'do this you can set $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK} to disable '
+ . 'this warning.'
+ if ! $attrs{AutoCommit} and ! $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK};
+
+ push @args, \%attrs if keys %attrs;
+ }
+ $self->_dbi_connect_info(\@args);
# FIXME - dirty:
# save attributes them in a separate accessor so they are always
return \%info;
}
-sub _default_dbi_connect_attributes {
- return {
+sub _default_dbi_connect_attributes () {
+ +{
AutoCommit => 1,
- RaiseError => 1,
PrintError => 0,
+ RaiseError => 1,
+ ShowErrorStatement => 1,
};
}
local $self->{_in_dbh_do} = 1;
my @result;
- my $want_array = wantarray;
+ my $want = wantarray;
my $tried = 0;
while(1) {
try {
$self->txn_begin;
my $txn_start_depth = $self->transaction_depth;
- if($want_array) {
+ if($want) {
@result = $coderef->(@$args);
}
- elsif(defined $want_array) {
+ elsif(defined $want) {
$result[0] = $coderef->(@$args);
}
else {
$exception = $_;
};
- if(! defined $exception) { return $want_array ? @result : $result[0] }
+ if(! defined $exception) { return wantarray ? @result : $result[0] }
if($self->transaction_depth > 1 || $tried++ || $self->connected) {
my $rollback_exception;
sub _seems_connected {
my $self = shift;
- $self->_preserve_foreign_dbh;
+ $self->_verify_pid;
my $dbh = $self->_dbh
or return 0;
# this is the internal "get dbh or connect (don't check)" method
sub _get_dbh {
my $self = shift;
- $self->_preserve_foreign_dbh;
+ $self->_verify_pid;
$self->_populate_dbh unless $self->_dbh;
return $self->_dbh;
}
$self->_dbh($self->_connect(@info));
- $self->_conn_pid($$);
- $self->_conn_tid(threads->tid) if $INC{'threads.pm'};
+ $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads
$self->_determine_driver;
unless ($self->unsafe) {
+ $self->throw_exception(
+ 'Refusing clobbering of {HandleError} installed on externally supplied '
+ ."DBI handle $dbh. Either remove the handler or use the 'unsafe' attribute."
+ ) if $dbh->{HandleError} and ref $dbh->{HandleError} ne '__DBIC__DBH__ERROR__HANDLER__';
+
+ # Default via _default_dbi_connect_attributes is 1, hence it was an explicit
+ # request, or an external handle. Complain and set anyway
+ unless ($dbh->{RaiseError}) {
+ carp( ref $info[0] eq 'CODE'
+
+ ? "The 'RaiseError' of the externally supplied DBI handle is set to false. "
+ ."DBIx::Class will toggle it back to true, unless the 'unsafe' connect "
+ .'attribute has been supplied'
+
+ : 'RaiseError => 0 supplied in your connection_info, without an explicit '
+ .'unsafe => 1. Toggling RaiseError back to true'
+ );
+
+ $dbh->{RaiseError} = 1;
+ }
+
# this odd anonymous coderef dereference is in fact really
# necessary to avoid the unwanted effect described in perl5
# RT#75792
my $weak_self = $_[0];
weaken $weak_self;
- $_[1]->{HandleError} = sub {
+ # the coderef is blessed so we can distinguish it from externally
+ # supplied handles (which must be preserved)
+ $_[1]->{HandleError} = bless sub {
if ($weak_self) {
$weak_self->throw_exception("DBI Exception: $_[0]");
}
# the scope of DBIC
croak ("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]");
}
- };
+ }, '__DBIC__DBH__ERROR__HANDLER__';
}->($self, $dbh);
-
- $dbh->{ShowErrorStatement} = 1;
- $dbh->{RaiseError} = 1;
- $dbh->{PrintError} = 0;
}
}
catch {
sub txn_commit {
my $self = shift;
- if ($self->{transaction_depth} == 1) {
+ if (! $self->_dbh) {
+ $self->throw_exception('cannot COMMIT on a disconnected handle');
+ }
+ elsif ($self->{transaction_depth} == 1) {
$self->debugobj->txn_commit()
if ($self->debug);
$self->_dbh_commit;
$self->svp_release
if $self->auto_savepoint;
}
+ elsif (! $self->_dbh->FETCH('AutoCommit') ) {
+
+ carp "Storage transaction_depth $self->{transaction_depth} does not match "
+ ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway";
+
+ $self->debugobj->txn_commit()
+ if ($self->debug);
+ $self->_dbh_commit;
+ $self->{transaction_depth} = 0
+ if $self->_dbh_autocommit;
+ }
else {
$self->throw_exception( 'Refusing to commit without a started transaction' );
}
foreach my $data (@data) {
my $ref = ref $data;
- $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs)
- $sth->bind_param($placeholder_index, $data, $attributes);
- $placeholder_index++;
+ if ($ref and overload::Method($data, '""') ) {
+ $data = "$data";
+ }
+ elsif ($ref eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts
+ $sth->bind_param_inout(
+ $placeholder_index++,
+ $data,
+ $self->_max_column_bytesize($ident, $column_name),
+ $attributes
+ );
+ next;
+ }
+
+ $sth->bind_param($placeholder_index++, $data, $attributes);
}
}
$self->dbh_do('_dbh_execute', @_); # retry over disconnects
}
-sub insert {
+sub _prefetch_autovalues {
my ($self, $source, $to_insert) = @_;
my $colinfo = $source->columns_info;
- # mix with auto-nextval marked values (a bit of a speed hit, but
- # no saner way to handle this yet)
- my $auto_nextvals = {} ;
+ my %values;
for my $col (keys %$colinfo) {
if (
$colinfo->{$col}{auto_nextval}
ref $to_insert->{$col} eq 'SCALAR'
)
) {
- $auto_nextvals->{$col} = $self->_sequence_fetch(
- 'nextval',
+ $values{$col} = $self->_sequence_fetch(
+ 'NEXTVAL',
( $colinfo->{$col}{sequence} ||=
$self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col)
),
}
}
+ \%values;
+}
+
+sub insert {
+ my ($self, $source, $to_insert) = @_;
+
+ my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert);
+
# fuse the values
- $to_insert = { %$to_insert, %$auto_nextvals };
+ $to_insert = { %$to_insert, %$prefetched_values };
# list of primary keys we try to fetch from the database
# both not-exsists and scalarrefs are considered
my %fetch_pks;
- %fetch_pks = ( map
- { $_ => scalar keys %fetch_pks } # so we can preserve order for prettyness
- grep
- { ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR' }
- $source->primary_columns
- );
+ for ($source->primary_columns) {
+ $fetch_pks{$_} = scalar keys %fetch_pks # so we can preserve order for prettyness
+ if ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR';
+ }
- my $sqla_opts;
+ my ($sqla_opts, @ir_container);
if ($self->_use_insert_returning) {
# retain order as declared in the resultsource
for (sort { $fetch_pks{$a} <=> $fetch_pks{$b} } keys %fetch_pks ) {
push @{$sqla_opts->{returning}}, $_;
+ $sqla_opts->{returning_container} = \@ir_container
+ if $self->_use_insert_returning_bound;
}
}
my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $sqla_opts);
- my %returned_cols = %$auto_nextvals;
+ my %returned_cols;
if (my $retlist = $sqla_opts->{returning}) {
- my @ret_vals = try {
+ @ir_container = try {
local $SIG{__WARN__} = sub {};
my @r = $sth->fetchrow_array;
$sth->finish;
@r;
- };
+ } unless @ir_container;
- @returned_cols{@$retlist} = @ret_vals if @ret_vals;
+ @returned_cols{@$retlist} = @ir_container if @ir_container;
}
- return \%returned_cols;
+ return { %$prefetched_values, %returned_cols };
}
my ($sql, $bind) = $self->_prep_for_execute (
'insert', undef, $source, [\%colvalues]
);
- my @bind = @$bind;
- my $empty_bind = 1 if (not @bind) &&
- (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
+ if (! @$bind) {
+ # if the bindlist is empty - make sure all "values" are in fact
+ # literal scalarrefs. If not the case this means the storage ate
+ # them away (e.g. the NoBindVars component) and interpolated them
+ # directly into the SQL. This obviosly can't be good for multi-inserts
- if ((not @bind) && (not $empty_bind)) {
- $self->throw_exception(
- 'Cannot insert_bulk without support for placeholders'
- );
+ $self->throw_exception('Cannot insert_bulk without support for placeholders')
+ if first { ref $_ ne 'SCALAR' } values %colvalues;
}
# neither _execute_array, nor _execute_inserts_with_no_binds are
# scope guard
my $guard = $self->txn_scope_guard;
- $self->_query_start( $sql, [ dummy => '__BULK_INSERT__' ] );
+ $self->_query_start( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
my $sth = $self->sth($sql);
my $rv = do {
- if ($empty_bind) {
- # bind_param_array doesn't work if there are no binds
- $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
+ if (@$bind) {
+ #@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
+ $self->_execute_array( $source, $sth, $bind, $cols, $data );
}
else {
-# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
- $self->_execute_array( $source, $sth, \@bind, $cols, $data );
+ # bind_param_array doesn't work if there are no binds
+ $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
}
};
- $self->_query_end( $sql, [ dummy => '__BULK_INSERT__' ] );
+ $self->_query_end( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () );
$guard->commit;
- return (wantarray ? ($rv, $sth, @bind) : $rv);
+ return (wantarray ? ($rv, $sth, @$bind) : $rv);
}
sub _execute_array {
$err = shift;
};
+ # Not all DBDs are create equal. Some throw on error, some return
+ # an undef $rv, and some set $sth->err - try whatever we can
+ $err = ($sth->errstr || 'UNKNOWN ERROR ($sth->errstr is unset)') if (
+ ! defined $err
+ and
+ ( !defined $rv or $sth->err )
+ );
+
# Statement must finish even if there was an exception.
try {
$sth->finish
$err = shift unless defined $err
};
- $err = $sth->errstr
- if (! defined $err and $sth->err);
-
if (defined $err) {
my $i = 0;
++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
}
catch {
$err = shift;
+ };
+
+ # Make sure statement is finished even if there was an exception.
+ try {
+ $sth->finish
}
- finally {
- # Make sure statement is finished even if there was an exception.
- try {
- $sth->finish
- }
- catch {
- $err = shift unless defined $err;
- };
+ catch {
+ $err = shift unless defined $err;
};
$self->throw_exception($err) if defined $err;
from => $ident,
where => $where,
$rs_alias && $alias2source->{$rs_alias}
- ? ( _rsroot_source_handle => $alias2source->{$rs_alias}->handle )
+ ? ( _rsroot_rsrc => $alias2source->{$rs_alias} )
: ()
,
};
&&
@{$attrs->{group_by}}
&&
- $attrs->{_prefetch_select}
- &&
- @{$attrs->{_prefetch_select}}
+ $attrs->{_prefetch_selector_range}
)
) {
($ident, $select, $where, $attrs)
);
my @ret;
- my $wa = wantarray;
- if ($wa) {
+ if (wantarray) {
@ret = $tr->translate;
}
else {
$self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
unless (@ret && defined $ret[0]);
- return $wa ? @ret : $ret[0];
+ return wantarray ? @ret : $ret[0];
}
sub deploy {
return $alias;
}
+# The size in bytes to use for DBI's ->bind_param_inout, this is the generic
+# version and it may be necessary to amend or override it for a specific storage
+# if such binds are necessary.
+sub _max_column_bytesize {
+ my ($self, $source, $col) = @_;
+
+ my $inf = $source->column_info($col);
+ return $inf->{_max_bytesize} ||= do {
+
+ my $max_size;
+
+ if (my $data_type = $inf->{data_type}) {
+ $data_type = lc($data_type);
+
+ # String/sized-binary types
+ if ($data_type =~ /^(?:l?(?:var)?char(?:acter)?(?:\s*varying)?
+ |(?:var)?binary(?:\s*varying)?|raw)\b/x
+ ) {
+ $max_size = $inf->{size};
+ }
+ # Other charset/unicode types, assume scale of 4
+ elsif ($data_type =~ /^(?:national\s*character(?:\s*varying)?|nchar
+ |univarchar
+ |nvarchar)\b/x
+ ) {
+ $max_size = $inf->{size} * 4 if $inf->{size};
+ }
+ # Blob types
+ elsif ($self->_is_lob_type($data_type)) {
+ # default to longreadlen
+ }
+ else {
+ $max_size = 100; # for all other (numeric?) datatypes
+ }
+ }
+
+ $max_size ||= $self->_get_dbh->{LongReadLen} || 8000;
+ };
+}
+
+# Determine if a data_type is some type of BLOB
+sub _is_lob_type {
+ my ($self, $data_type) = @_;
+ $data_type && ($data_type =~ /(?:lob|bfile|text|image|bytea|memo)/i
+ || $data_type =~ /^long(?:\s*(?:raw|bit\s*varying|varbit|binary
+ |varchar|character\s*varying|nvarchar
+ |national\s*character\s*varying))?$/xi);
+}
+
1;
=head1 USAGE NOTES