X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=d46522b307f19d0e7954ebbae24769fd942d7910;hb=8c21eef750db35dbc8f363ffa7218ef71073d656;hp=cc59419bad1f0c247352c586257a8bc9342efee5;hpb=ad7c50fc26e1304855438776d88f4dd074d2fe05;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index cc59419..d46522b 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -8,7 +8,6 @@ use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/; use mro 'c3'; use DBIx::Class::Carp; -use DBIx::Class::Exception; use Scalar::Util qw/refaddr weaken reftype blessed/; use List::Util qw/first/; use Sub::Name 'subname'; @@ -87,7 +86,6 @@ sub _determine_supports_join_optimizer { 1 }; # class, as _use_X may be hardcoded class-wide, and _supports_X calls # _determine_supports_X which obv. needs a correct driver as well my @rdbms_specific_methods = qw/ - deployment_statements sqlt_type sql_maker build_datetime_parser @@ -795,11 +793,25 @@ sub dbh_do { return $self->$run_target($self->_get_dbh, @_) if $self->{_in_do_block} or $self->transaction_depth; - my $args = \@_; + my $cref = (ref $run_target eq 'CODE') + ? $run_target + : $self->can($run_target) || $self->throw_exception(sprintf ( + 'Can\'t locate object method "%s" via package "%s"', + $run_target, + (ref $self || $self), + )) + ; + + # take a ref instead of a copy, to preserve @_ aliasing + # semantics within the coderef, but only if needed + # (pseudoforking doesn't like this trick much) + my $args = @_ ? \@_ : []; + unshift @$args, $self, $self->_get_dbh; DBIx::Class::Storage::BlockRunner->new( storage => $self, - run_code => sub { $self->$run_target ($self->_get_dbh, @$args ) }, + run_code => $cref, + run_args => $args, wrap_txn => 0, retry_handler => sub { ! ( $_[0]->retried_count or $_[0]->storage->connected ) }, )->run; @@ -1387,10 +1399,17 @@ sub _connect { $dbh = DBI->connect(@info); } - if (!$dbh) { - die $DBI::errstr; - } + die $DBI::errstr unless $dbh; + + die sprintf ("%s fresh DBI handle with a *false* 'Active' attribute. " + . 'This handle is disconnected as far as DBIC is concerned, and we can ' + . 'not continue', + ref $info[0] eq 'CODE' + ? "Connection coderef $info[0] returned a" + : 'DBI->connect($schema->storage->connect_info) resulted in a' + ) unless $dbh->FETCH('Active'); + # sanity checks unless asked otherwise unless ($self->unsafe) { $self->throw_exception( @@ -1560,10 +1579,13 @@ sub _prep_for_execute { sub _gen_sql_bind { my ($self, $op, $ident, $args) = @_; - my ($sql, @bind) = $self->sql_maker->$op( - blessed($ident) ? $ident->from : $ident, - @$args, - ); + my ($colinfos, $from); + if ( blessed($ident) ) { + $from = $ident->from; + $colinfos = $ident->columns_info; + } + + my ($sql, @bind) = $self->sql_maker->$op( ($from || $ident), @$args ); if ( ! $ENV{DBIC_DT_SEARCH_OK} @@ -1580,7 +1602,7 @@ sub _gen_sql_bind { } return( $sql, $self->_resolve_bindattrs( - $ident, [ @{$args->[2]{bind}||[]}, @bind ] + $ident, [ @{$args->[2]{bind}||[]}, @bind ], $colinfos )); } @@ -1743,9 +1765,7 @@ sub _dbh_execute { } sub _prefetch_autovalues { - my ($self, $source, $to_insert) = @_; - - my $colinfo = $source->columns_info; + my ($self, $source, $colinfo, $to_insert) = @_; my %values; for my $col (keys %$colinfo) { @@ -1775,7 +1795,9 @@ sub _prefetch_autovalues { sub insert { my ($self, $source, $to_insert) = @_; - my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert); + my $col_infos = $source->columns_info; + + my $prefetched_values = $self->_prefetch_autovalues($source, $col_infos, $to_insert); # fuse the values, but keep a separate list of prefetched_values so that # they can be fused once again with the final return @@ -1783,7 +1805,6 @@ sub insert { # FIXME - we seem to assume undef values as non-supplied. This is wrong. # Investigate what does it take to s/defined/exists/ - my $col_infos = $source->columns_info; my %pcols = map { $_ => 1 } $source->primary_columns; my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col); for my $col ($source->columns) { @@ -1917,7 +1938,7 @@ sub insert_bulk { # can't just hand SQLA a set of some known "values" (e.g. hashrefs that # can be later matched up by address), because we want to supply a real # value on which perhaps e.g. datatype checks will be performed - my ($proto_data, $value_type_idx); + my ($proto_data, $value_type_by_col_idx); for my $i (@col_range) { my $colname = $cols->[$i]; if (ref $data->[0][$i] eq 'SCALAR') { @@ -1936,18 +1957,18 @@ sub insert_bulk { # store value-less (attrs only) bind info - we will be comparing all # supplied binds against this for sanity - $value_type_idx->{$i} = [ map { $_->[0] } @$resolved_bind ]; + $value_type_by_col_idx->{$i} = [ map { $_->[0] } @$resolved_bind ]; $proto_data->{$colname} = \[ $sql, map { [ # inject slice order to use for $proto_bind construction - { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i } + { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i, _literal_bind_subindex => $_+1 } => $resolved_bind->[$_][1] ] } (0 .. $#bind) ]; } else { - $value_type_idx->{$i} = 0; + $value_type_by_col_idx->{$i} = undef; $proto_data->{$colname} = \[ '?', [ { dbic_colname => $colname, _bind_data_slice_idx => $i } @@ -1963,7 +1984,7 @@ sub insert_bulk { [ $proto_data ], ); - if (! @$proto_bind and keys %$value_type_idx) { + if (! @$proto_bind and keys %$value_type_by_col_idx) { # if the bindlist is empty and we had some dynamic binds, this means the # storage ate them away (e.g. the NoBindVars component) and interpolated # them directly into the SQL. This obviously can't be good for multi-inserts @@ -1997,7 +2018,7 @@ sub insert_bulk { for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1 my $val = $data->[$row_idx][$col_idx]; - if (! exists $value_type_idx->{$col_idx}) { # literal no binds + if (! exists $value_type_by_col_idx->{$col_idx}) { # literal no binds if (ref $val ne 'SCALAR') { $bad_slice_report_cref->( "Incorrect value (expecting SCALAR-ref \\'$$reference_val')", @@ -2013,7 +2034,7 @@ sub insert_bulk { ); } } - elsif (! $value_type_idx->{$col_idx} ) { # regular non-literal value + elsif (! defined $value_type_by_col_idx->{$col_idx} ) { # regular non-literal value if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) { $bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx); } @@ -2042,7 +2063,7 @@ sub insert_bulk { # need to check the bind attrs - a bind will happen only once for # the entire dataset, so any changes further down will be ignored. elsif (! Data::Compare::Compare( - $value_type_idx->{$col_idx}, + $value_type_by_col_idx->{$col_idx}, [ map { $_->[0] } @@ -2119,23 +2140,17 @@ sub _dbh_execute_for_fetch { # alphabetical ordering by colname). We actually do want to # preserve this behavior so that prepare_cached has a better # chance of matching on unrelated calls - my %data_reorder = map { $proto_bind->[$_][0]{_bind_data_slice_idx} => $_ } @idx_range; my $fetch_row_idx = -1; # saner loop this way my $fetch_tuple = sub { return undef if ++$fetch_row_idx > $#$data; - return [ map - { (ref $_ eq 'REF' and ref $$_ eq 'ARRAY') - ? map { $_->[-1] } @{$$_}[1 .. $#$$_] - : $_ - } - map - { $data->[$fetch_row_idx][$_]} - sort - { $data_reorder{$a} <=> $data_reorder{$b} } - keys %data_reorder - ]; + return [ map { defined $_->{_literal_bind_subindex} + ? ${ $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ]} + ->[ $_->{_literal_bind_subindex} ] + ->[1] + : $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ] + } map { $_->[0] } @$proto_bind]; }; my $tuple_status = [];