X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=6b6f7dd55218ee9e7282d429af04c57ce146fe5a;hb=07cda1c5a7df6656772dfd65c488c19c15126168;hp=5818e53faa0391a20a7a001137de356952005f84;hpb=6cad253bcdfd6e265fdde937408564030d0f77a3;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 5818e53..6b6f7dd 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -10,8 +10,8 @@ use mro 'c3'; use Carp::Clan qw/^DBIx::Class|^Try::Tiny/; use DBI; use DBIx::Class::Storage::DBI::Cursor; -use DBIx::Class::Storage::Statistics; use Scalar::Util qw/refaddr weaken reftype blessed/; +use List::Util qw/first/; use Data::Dumper::Concise 'Dumper'; use Sub::Name 'subname'; use Try::Tiny; @@ -27,7 +27,7 @@ __PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker'); __PACKAGE__->mk_group_accessors('simple' => qw/ _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined - _dbh _dbh_details _conn_pid _conn_tid _sql_maker _sql_maker_opts + _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts transaction_depth _dbh_autocommit savepoints /); @@ -166,25 +166,38 @@ sub new { # of a fork()ed child to kill the parent's shared DBI handle, # *before perl reaches the DESTROY in this package* # Yes, it is ugly and effective. +# Additionally this registry is used by the CLONE method to +# make sure no handles are shared between threads { my %seek_and_destroy; sub _arm_global_destructor { my $self = shift; - my $key = Scalar::Util::refaddr ($self); + my $key = refaddr ($self); $seek_and_destroy{$key} = $self; - Scalar::Util::weaken ($seek_and_destroy{$key}); + weaken ($seek_and_destroy{$key}); } END { local $?; # just in case the DBI destructor changes it somehow # destroy just the object if not native to this process/thread - $_->_preserve_foreign_dbh for (grep + $_->_verify_pid for (grep { defined $_ } values %seek_and_destroy ); } + + sub CLONE { + # As per DBI's recommendation, DBIC disconnects all handles as + # soon as possible (DBIC will reconnect only on demand from within + # the thread) + for (values %seek_and_destroy) { + next unless $_; + $_->{_dbh_gen}++; # so that existing cursors will drop as well + $_->_dbh(undef); + } + } } sub DESTROY { @@ -193,53 +206,28 @@ sub DESTROY { # some databases spew warnings on implicit disconnect local $SIG{__WARN__} = sub {}; $self->_dbh(undef); -} - -sub _preserve_foreign_dbh { - my $self = shift; - - return unless $self->_dbh; - - $self->_verify_tid; - - return unless $self->_dbh; - - $self->_verify_pid; + # this op is necessary, since the very last perl runtime statement + # triggers a global destruction shootout, and the $SIG localization + # may very well be destroyed before perl actually gets to do the + # $dbh undef + 1; } # handle pid changes correctly - do not destroy parent's connection sub _verify_pid { my $self = shift; - return if ( defined $self->_conn_pid and $self->_conn_pid == $$ ); - - $self->_dbh->{InactiveDestroy} = 1; - $self->_dbh(undef); - $self->{_dbh_gen}++; - - return; -} - -# very similar to above, but seems to FAIL if I set InactiveDestroy -sub _verify_tid { - my $self = shift; - - if ( ! defined $self->_conn_tid ) { - return; # no threads - } - elsif ( $self->_conn_tid == threads->tid ) { - return; # same thread + my $pid = $self->_conn_pid; + if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) { + $dbh->{InactiveDestroy} = 1; + $self->{_dbh_gen}++; + $self->_dbh(undef); } - #$self->_dbh->{InactiveDestroy} = 1; # why does t/51threads.t fail...? - $self->_dbh(undef); - $self->{_dbh_gen}++; - return; } - =head2 connect_info This method is normally called by L, which @@ -921,7 +909,7 @@ sub connected { sub _seems_connected { my $self = shift; - $self->_preserve_foreign_dbh; + $self->_verify_pid; my $dbh = $self->_dbh or return 0; @@ -969,7 +957,7 @@ sub dbh { # this is the internal "get dbh or connect (don't check)" method sub _get_dbh { my $self = shift; - $self->_preserve_foreign_dbh; + $self->_verify_pid; $self->_populate_dbh unless $self->_dbh; return $self->_dbh; } @@ -1023,8 +1011,7 @@ sub _populate_dbh { $self->_dbh($self->_connect(@info)); - $self->_conn_pid($$); - $self->_conn_tid(threads->tid) if $INC{'threads.pm'}; + $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads $self->_determine_driver; @@ -1590,41 +1577,68 @@ sub _execute { $self->dbh_do('_dbh_execute', @_); # retry over disconnects } -sub _prefetch_insert_auto_nextvals { +sub _prefetch_autovalues { my ($self, $source, $to_insert) = @_; - my $upd = {}; - - foreach my $col ( $source->columns ) { - if ( !defined $to_insert->{$col} ) { - my $col_info = $source->column_info($col); - - if ( $col_info->{auto_nextval} ) { - $upd->{$col} = $to_insert->{$col} = $self->_sequence_fetch( - 'nextval', - $col_info->{sequence} ||= + my $colinfo = $source->columns_info; + + my %values; + for my $col (keys %$colinfo) { + if ( + $colinfo->{$col}{auto_nextval} + and + ( + ! exists $to_insert->{$col} + or + ref $to_insert->{$col} eq 'SCALAR' + ) + ) { + $values{$col} = $self->_sequence_fetch( + 'NEXTVAL', + ( $colinfo->{$col}{sequence} ||= $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col) - ); - } + ), + ); } } - return $upd; + \%values; } sub insert { - my $self = shift; - my ($source, $to_insert, $opts) = @_; + my ($self, $source, $to_insert) = @_; + + my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert); - my $updated_cols = $self->_prefetch_insert_auto_nextvals (@_); + # fuse the values + $to_insert = { %$to_insert, %$prefetched_values }; + + # list of primary keys we try to fetch from the database + # both not-exsists and scalarrefs are considered + my %fetch_pks; + %fetch_pks = ( map + { $_ => scalar keys %fetch_pks } # so we can preserve order for prettyness + grep + { ! exists $to_insert->{$_} or ref $to_insert->{$_} eq 'SCALAR' } + $source->primary_columns + ); + + my $sqla_opts; + if ($self->_use_insert_returning) { + + # retain order as declared in the resultsource + for (sort { $fetch_pks{$a} <=> $fetch_pks{$b} } keys %fetch_pks ) { + push @{$sqla_opts->{returning}}, $_; + } + } my $bind_attributes = $self->source_bind_attributes($source); - my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $opts); + my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $sqla_opts); - if ($opts->{returning}) { - my @ret_cols = @{$opts->{returning}}; + my %returned_cols; + if (my $retlist = $sqla_opts->{returning}) { my @ret_vals = try { local $SIG{__WARN__} = sub {}; my @r = $sth->fetchrow_array; @@ -1632,18 +1646,13 @@ sub insert { @r; }; - my %ret; - @ret{@ret_cols} = @ret_vals if (@ret_vals); - - $updated_cols = { - %$updated_cols, - %ret, - }; + @returned_cols{@$retlist} = @ret_vals if @ret_vals; } - return $updated_cols; + return { %$prefetched_values, %returned_cols }; } + ## Currently it is assumed that all values passed will be "normal", i.e. not ## scalar refs, or at least, all the same type as the first set, the statement is ## only prepped once. @@ -1712,15 +1721,15 @@ sub insert_bulk { my ($sql, $bind) = $self->_prep_for_execute ( 'insert', undef, $source, [\%colvalues] ); - my @bind = @$bind; - my $empty_bind = 1 if (not @bind) && - (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols; + if (! @$bind) { + # if the bindlist is empty - make sure all "values" are in fact + # literal scalarrefs. If not the case this means the storage ate + # them away (e.g. the NoBindVars component) and interpolated them + # directly into the SQL. This obviosly can't be good for multi-inserts - if ((not @bind) && (not $empty_bind)) { - $self->throw_exception( - 'Cannot insert_bulk without support for placeholders' - ); + $self->throw_exception('Cannot insert_bulk without support for placeholders') + if first { ref $_ ne 'SCALAR' } values %colvalues; } # neither _execute_array, nor _execute_inserts_with_no_binds are @@ -1728,24 +1737,24 @@ sub insert_bulk { # scope guard my $guard = $self->txn_scope_guard; - $self->_query_start( $sql, [ dummy => '__BULK_INSERT__' ] ); + $self->_query_start( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () ); my $sth = $self->sth($sql); my $rv = do { - if ($empty_bind) { - # bind_param_array doesn't work if there are no binds - $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data ); + if (@$bind) { + #@bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args + $self->_execute_array( $source, $sth, $bind, $cols, $data ); } else { -# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args - $self->_execute_array( $source, $sth, \@bind, $cols, $data ); + # bind_param_array doesn't work if there are no binds + $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data ); } }; - $self->_query_end( $sql, [ dummy => '__BULK_INSERT__' ] ); + $self->_query_end( $sql, @$bind ? [ dummy => '__BULK_INSERT__' ] : () ); $guard->commit; - return (wantarray ? ($rv, $sth, @bind) : $rv); + return (wantarray ? ($rv, $sth, @$bind) : $rv); } sub _execute_array { @@ -1834,15 +1843,14 @@ sub _dbh_execute_inserts_with_no_binds { } catch { $err = shift; + }; + + # Make sure statement is finished even if there was an exception. + try { + $sth->finish } - finally { - # Make sure statement is finished even if there was an exception. - try { - $sth->finish - } - catch { - $err = shift unless defined $err; - }; + catch { + $err = shift unless defined $err; }; $self->throw_exception($err) if defined $err; @@ -2103,11 +2111,13 @@ sub source_bind_attributes { my ($self, $source) = @_; my $bind_attributes; - foreach my $column ($source->columns) { - my $data_type = $source->column_info($column)->{data_type} || ''; - $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type) - if $data_type; + my $colinfo = $source->columns_info; + + for my $col (keys %$colinfo) { + if (my $dt = $colinfo->{$col}{data_type} ) { + $bind_attributes->{$col} = $self->bind_attribute_by_data_type($dt) + } } return $bind_attributes;