X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=8abcc6ece5d694ed4e8d67c569900ec708cffded;hb=be64931c710bcde7abe7334349b4c8a123645332;hp=1c7ea768ed483587faf8aae9383d142871e191b9;hpb=8b9473f535c78bab500ff7e1258bd9fcab3e4ff6;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 1c7ea76..8abcc6e 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -14,6 +14,7 @@ use List::Util qw/first/; use Sub::Name 'subname'; use Try::Tiny; use overload (); +use Data::Compare (); # no imports!!! guard against insane architecture use namespace::clean; # default cursor class, overridable in connect_info attributes @@ -32,8 +33,7 @@ __PACKAGE__->sql_name_sep('.'); __PACKAGE__->mk_group_accessors('simple' => qw/ _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined - _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts - transaction_depth _dbh_autocommit savepoints + _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit /); # the values for these accessors are picked out (and deleted) from @@ -62,8 +62,12 @@ __PACKAGE__->mk_group_accessors('simple' => @storage_options); my @capabilities = (qw/ insert_returning insert_returning_bound + + multicolumn_in + placeholders typeless_placeholders + join_optimizer /); __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities ); @@ -86,12 +90,14 @@ my @rdbms_specific_methods = qw/ build_datetime_parser datetime_parser_type + txn_begin insert insert_bulk update delete select select_single + with_deferred_fk_checks get_use_dbms_capability get_dbms_capability @@ -131,7 +137,6 @@ for my $meth (@rdbms_specific_methods) { }; } - =head1 NAME DBIx::Class::Storage::DBI - DBI storage handler @@ -167,11 +172,9 @@ documents DBI-specific methods and behaviors. sub new { my $new = shift->next::method(@_); - $new->transaction_depth(0); $new->_sql_maker_opts({}); $new->_dbh_details({}); - $new->{savepoints} = []; - $new->{_in_dbh_do} = 0; + $new->{_in_do_block} = 0; $new->{_dbh_gen} = 0; # read below to see what this does @@ -216,6 +219,9 @@ sub new { next unless $_; $_->{_dbh_gen}++; # so that existing cursors will drop as well $_->_dbh(undef); + + $_->transaction_depth(0); + $_->savepoints([]); } } } @@ -243,6 +249,8 @@ sub _verify_pid { $dbh->{InactiveDestroy} = 1; $self->{_dbh_gen}++; $self->_dbh(undef); + $self->transaction_depth(0); + $self->savepoints([]); } return; @@ -322,8 +330,8 @@ for most DBDs. See L for details. =head3 DBIx::Class specific connection attributes -In addition to the standard L -L attributes, DBIx::Class recognizes +In addition to the standard L +L attributes, DBIx::Class recognizes the following connection options. These options can be mixed in with your other L connection attributes, or placed in a separate hashref (C<\%extra_attributes>) as shown above. @@ -775,98 +783,33 @@ sub dbh_do { my $dbh = $self->_get_dbh; return $self->$code($dbh, @_) - if ( $self->{_in_dbh_do} || $self->{transaction_depth} ); + if ( $self->{_in_do_block} || $self->{transaction_depth} ); - local $self->{_in_dbh_do} = 1; + local $self->{_in_do_block} = 1; # take a ref instead of a copy, to preserve coderef @_ aliasing semantics my $args = \@_; - return try { + + try { $self->$code ($dbh, @$args); } catch { $self->throw_exception($_) if $self->connected; # We were not connected - reconnect and retry, but let any # exception fall right through this time - carp "Retrying $code after catching disconnected exception: $_" - if $ENV{DBIC_DBIRETRY_DEBUG}; + carp "Retrying dbh_do($code) after catching disconnected exception: $_" + if $ENV{DBIC_STORAGE_RETRY_DEBUG}; $self->_populate_dbh; $self->$code($self->_dbh, @$args); }; } -# This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do. -# It also informs dbh_do to bypass itself while under the direction of txn_do, -# via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc) sub txn_do { - my $self = shift; - my $coderef = shift; - - ref $coderef eq 'CODE' or $self->throw_exception - ('$coderef must be a CODE reference'); - - local $self->{_in_dbh_do} = 1; - - my @result; - my $want = wantarray; - - my $tried = 0; - while(1) { - my $exception; - - # take a ref instead of a copy, to preserve coderef @_ aliasing semantics - my $args = \@_; - - try { - $self->txn_begin; - my $txn_start_depth = $self->transaction_depth; - if($want) { - @result = $coderef->(@$args); - } - elsif(defined $want) { - $result[0] = $coderef->(@$args); - } - else { - $coderef->(@$args); - } - - my $delta_txn = $txn_start_depth - $self->transaction_depth; - if ($delta_txn == 0) { - $self->txn_commit; - } - elsif ($delta_txn != 1) { - # an off-by-one would mean we fired a rollback - carp "Unexpected reduction of transaction depth by $delta_txn after execution of $coderef"; - } - } catch { - $exception = $_; - }; - - if(! defined $exception) { return wantarray ? @result : $result[0] } - - if($self->transaction_depth > 1 || $tried++ || $self->connected) { - my $rollback_exception; - try { $self->txn_rollback } catch { $rollback_exception = shift }; - if(defined $rollback_exception) { - my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; - $self->throw_exception($exception) # propagate nested rollback - if $rollback_exception =~ /$exception_class/; - - $self->throw_exception( - "Transaction aborted: ${exception}. " - . "Rollback failed: ${rollback_exception}" - ); - } - $self->throw_exception($exception) - } - - # We were not connected, and was first try - reconnect and retry - # via the while loop - carp "Retrying $coderef after catching disconnected exception: $exception" - if $ENV{DBIC_TXNRETRY_DEBUG}; - $self->_populate_dbh; - } + # connects or reconnects on pid change, necessary to grab correct txn_depth + $_[0]->_get_dbh; + local $_[0]->{_in_do_block} = 1; + shift->next::method(@_); } =head2 disconnect @@ -887,7 +830,8 @@ sub disconnect { $self->_do_connection_actions(disconnect_call_ => $_) for @actions; - $self->_dbh_rollback unless $self->_dbh_autocommit; + # stops the "implicit rollback on disconnect" warning + $self->_exec_txn_rollback unless $self->_dbh_autocommit; %{ $self->_dbh->{CachedKids} } = (); $self->_dbh->disconnect; @@ -1370,118 +1314,23 @@ sub _connect { $dbh; } -sub svp_begin { - my ($self, $name) = @_; - - $name = $self->_svp_generate_name - unless defined $name; - - $self->throw_exception ("You can't use savepoints outside a transaction") - if $self->{transaction_depth} == 0; - - $self->throw_exception ("Your Storage implementation doesn't support savepoints") - unless $self->can('_svp_begin'); - - push @{ $self->{savepoints} }, $name; - - $self->debugobj->svp_begin($name) if $self->debug; - - return $self->_svp_begin($name); -} - -sub svp_release { - my ($self, $name) = @_; - - $self->throw_exception ("You can't use savepoints outside a transaction") - if $self->{transaction_depth} == 0; - - $self->throw_exception ("Your Storage implementation doesn't support savepoints") - unless $self->can('_svp_release'); - - if (defined $name) { - $self->throw_exception ("Savepoint '$name' does not exist") - unless grep { $_ eq $name } @{ $self->{savepoints} }; - - # Dig through the stack until we find the one we are releasing. This keeps - # the stack up to date. - my $svp; - - do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name; - } else { - $name = pop @{ $self->{savepoints} }; - } - - $self->debugobj->svp_release($name) if $self->debug; - - return $self->_svp_release($name); -} - -sub svp_rollback { - my ($self, $name) = @_; - - $self->throw_exception ("You can't use savepoints outside a transaction") - if $self->{transaction_depth} == 0; - - $self->throw_exception ("Your Storage implementation doesn't support savepoints") - unless $self->can('_svp_rollback'); - - if (defined $name) { - # If they passed us a name, verify that it exists in the stack - unless(grep({ $_ eq $name } @{ $self->{savepoints} })) { - $self->throw_exception("Savepoint '$name' does not exist!"); - } - - # Dig through the stack until we find the one we are releasing. This keeps - # the stack up to date. - while(my $s = pop(@{ $self->{savepoints} })) { - last if($s eq $name); - } - # Add the savepoint back to the stack, as a rollback doesn't remove the - # named savepoint, only everything after it. - push(@{ $self->{savepoints} }, $name); - } else { - # We'll assume they want to rollback to the last savepoint - $name = $self->{savepoints}->[-1]; - } - - $self->debugobj->svp_rollback($name) if $self->debug; - - return $self->_svp_rollback($name); -} - -sub _svp_generate_name { - my ($self) = @_; - return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); -} - sub txn_begin { my $self = shift; # this means we have not yet connected and do not know the AC status - # (e.g. coderef $dbh) + # (e.g. coderef $dbh), need a full-fledged connection check if (! defined $self->_dbh_autocommit) { $self->ensure_connected; } - # otherwise re-connect on pid changes, so - # that the txn_depth is adjusted properly - # the lightweight _get_dbh is good enoug here - # (only superficial handle check, no pings) + # Otherwise simply connect or re-connect on pid changes else { $self->_get_dbh; } - if($self->transaction_depth == 0) { - $self->debugobj->txn_begin() - if $self->debug; - $self->_dbh_begin_work; - } - elsif ($self->auto_savepoint) { - $self->svp_begin; - } - $self->{transaction_depth}++; + $self->next::method(@_); } -sub _dbh_begin_work { +sub _exec_txn_begin { my $self = shift; # if the user is utilizing txn_do - good for him, otherwise we need to @@ -1489,7 +1338,7 @@ sub _dbh_begin_work { # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping" # will be replaced by a failure of begin_work itself (which will be # then retried on reconnect) - if ($self->{_in_dbh_do}) { + if ($self->{_in_do_block}) { $self->_dbh->begin_work; } else { $self->dbh_do(sub { $_[1]->begin_work }); @@ -1498,89 +1347,87 @@ sub _dbh_begin_work { sub txn_commit { my $self = shift; - if (! $self->_dbh) { - $self->throw_exception('cannot COMMIT on a disconnected handle'); - } - elsif ($self->{transaction_depth} == 1) { - $self->debugobj->txn_commit() - if ($self->debug); - $self->_dbh_commit; - $self->{transaction_depth} = 0 - if $self->_dbh_autocommit; - } - elsif($self->{transaction_depth} > 1) { - $self->{transaction_depth}--; - $self->svp_release - if $self->auto_savepoint; - } - elsif (! $self->_dbh->FETCH('AutoCommit') ) { - carp "Storage transaction_depth $self->{transaction_depth} does not match " - ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway"; + $self->_verify_pid if $self->_dbh; + $self->throw_exception("Unable to txn_commit() on a disconnected storage") + unless $self->_dbh; - $self->debugobj->txn_commit() - if ($self->debug); - $self->_dbh_commit; - $self->{transaction_depth} = 0 - if $self->_dbh_autocommit; - } - else { - $self->throw_exception( 'Refusing to commit without a started transaction' ); + # esoteric case for folks using external $dbh handles + if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) { + carp "Storage transaction_depth 0 does not match " + ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway"; + $self->transaction_depth(1); } + + $self->next::method(@_); + + # if AutoCommit is disabled txn_depth never goes to 0 + # as a new txn is started immediately on commit + $self->transaction_depth(1) if ( + !$self->transaction_depth + and + defined $self->_dbh_autocommit + and + ! $self->_dbh_autocommit + ); } -sub _dbh_commit { - my $self = shift; - my $dbh = $self->_dbh - or $self->throw_exception('cannot COMMIT on a disconnected handle'); - $dbh->commit; +sub _exec_txn_commit { + shift->_dbh->commit; } sub txn_rollback { my $self = shift; - my $dbh = $self->_dbh; - try { - if ($self->{transaction_depth} == 1) { - $self->debugobj->txn_rollback() - if ($self->debug); - $self->{transaction_depth} = 0 - if $self->_dbh_autocommit; - $self->_dbh_rollback; - } - elsif($self->{transaction_depth} > 1) { - $self->{transaction_depth}--; - if ($self->auto_savepoint) { - $self->svp_rollback; - $self->svp_release; - } - } - else { - die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new; - } + + $self->_verify_pid if $self->_dbh; + $self->throw_exception("Unable to txn_rollback() on a disconnected storage") + unless $self->_dbh; + + # esoteric case for folks using external $dbh handles + if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) { + carp "Storage transaction_depth 0 does not match " + ."false AutoCommit of $self->{_dbh}, attempting ROLLBACK anyway"; + $self->transaction_depth(1); } - catch { - my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; - if ($_ !~ /$exception_class/) { - # ensure that a failed rollback resets the transaction depth - $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; - } + $self->next::method(@_); - $self->throw_exception($_) - }; + # if AutoCommit is disabled txn_depth never goes to 0 + # as a new txn is started immediately on commit + $self->transaction_depth(1) if ( + !$self->transaction_depth + and + defined $self->_dbh_autocommit + and + ! $self->_dbh_autocommit + ); } -sub _dbh_rollback { - my $self = shift; - my $dbh = $self->_dbh - or $self->throw_exception('cannot ROLLBACK on a disconnected handle'); - $dbh->rollback; +sub _exec_txn_rollback { + shift->_dbh->rollback; +} + +# generate some identical methods +for my $meth (qw/svp_begin svp_release svp_rollback/) { + no strict qw/refs/; + *{__PACKAGE__ ."::$meth"} = subname $meth => sub { + my $self = shift; + $self->_verify_pid if $self->_dbh; + $self->throw_exception("Unable to $meth() on a disconnected storage") + unless $self->_dbh; + $self->next::method(@_); + }; } # This used to be the top-half of _execute. It was split out to make it # easier to override in NoBindVars without duping the rest. It takes up # all of _execute's args, and emits $sql, @bind. sub _prep_for_execute { + #my ($self, $op, $ident, $args) = @_; + return shift->_gen_sql_bind(@_) +} + +sub _gen_sql_bind { my ($self, $op, $ident, $args) = @_; my ($sql, @bind) = $self->sql_maker->$op( @@ -1588,42 +1435,70 @@ sub _prep_for_execute { @$args, ); - my (@final_bind, $colinfos); + if ( + ! $ENV{DBIC_DT_SEARCH_OK} + and + $op eq 'select' + and + first { blessed($_->[1]) && $_->[1]->isa('DateTime') } @bind + ) { + carp_unique 'DateTime objects passed to search() are not supported ' + . 'properly (InflateColumn::DateTime formats and settings are not ' + . 'respected.) See "Formatting DateTime objects in queries" in ' + . 'DBIx::Class::Manual::Cookbook. To disable this warning for good ' + . 'set $ENV{DBIC_DT_SEARCH_OK} to true' + } + + return( $sql, $self->_resolve_bindattrs( + $ident, [ @{$args->[2]{bind}||[]}, @bind ] + )); +} + +sub _resolve_bindattrs { + my ($self, $ident, $bind, $colinfos) = @_; + + $colinfos ||= {}; + my $resolve_bindinfo = sub { - $colinfos ||= $self->_resolve_column_info($ident); - if (my $col = $_[1]->{dbic_colname}) { - $_[1]->{sqlt_datatype} ||= $colinfos->{$col}{data_type} + #my $infohash = shift; + + %$colinfos = %{ $self->_resolve_column_info($ident) } + unless keys %$colinfos; + + my $ret; + if (my $col = $_[0]->{dbic_colname}) { + $ret = { %{$_[0]} }; + + $ret->{sqlt_datatype} ||= $colinfos->{$col}{data_type} if $colinfos->{$col}{data_type}; - $_[1]->{sqlt_size} ||= $colinfos->{$col}{size} + + $ret->{sqlt_size} ||= $colinfos->{$col}{size} if $colinfos->{$col}{size}; } - $_[1]; - }; - for my $e (@{$args->[2]{bind}||[]}, @bind) { - push @final_bind, [ do { - if (ref $e ne 'ARRAY') { - ({}, $e) - } - elsif (! defined $e->[0]) { - ({}, $e->[1]) - } - elsif (ref $e->[0] eq 'HASH') { - ( - (first { $e->[0]{$_} } qw/dbd_attrs sqlt_datatype/) ? $e->[0] : $self->$resolve_bindinfo($e->[0]), - $e->[1] - ) - } - elsif (ref $e->[0] eq 'SCALAR') { - ( { sqlt_datatype => ${$e->[0]} }, $e->[1] ) - } - else { - ( $self->$resolve_bindinfo({ dbic_colname => $e->[0] }), $e->[1] ) - } - }]; - } + $ret || $_[0]; + }; - ($sql, \@final_bind); + return [ map { + if (ref $_ ne 'ARRAY') { + [{}, $_] + } + elsif (! defined $_->[0]) { + [{}, $_->[1]] + } + elsif (ref $_->[0] eq 'HASH') { + [ + ($_->[0]{dbd_attrs} or $_->[0]{sqlt_datatype}) ? $_->[0] : $resolve_bindinfo->($_->[0]), + $_->[1] + ] + } + elsif (ref $_->[0] eq 'SCALAR') { + [ { sqlt_datatype => ${$_->[0]} }, $_->[1] ] + } + else { + [ $resolve_bindinfo->({ dbic_colname => $_->[0] }), $_->[1] ] + } + } @$bind ]; } sub _format_for_trace { @@ -1683,11 +1558,17 @@ sub _dbi_attrs_for_bind { for (map { $_->[0] } @$bind) { push @attrs, do { - if ($_->{dbd_attrs}) { + if (exists $_->{dbd_attrs}) { $_->{dbd_attrs} } elsif($_->{sqlt_datatype}) { - $self->bind_attribute_by_data_type($_->{sqlt_datatype}) || undef; + # cache the result in the dbh_details hash, as it can not change unless + # we connect to something else + my $cache = $self->_dbh_details->{_datatype_map_cache} ||= {}; + if (not exists $cache->{$_->{sqlt_datatype}}) { + $cache->{$_->{sqlt_datatype}} = $self->bind_attribute_by_data_type($_->{sqlt_datatype}) || undef; + } + $cache->{$_->{sqlt_datatype}}; } elsif ($sba_attrs and $_->{dbic_colname}) { $sba_attrs->{$_->{dbic_colname}} || undef; @@ -1875,9 +1756,12 @@ sub insert { sub insert_bulk { my ($self, $source, $cols, $data) = @_; + my @col_range = (0..$#$cols); + # FIXME - perhaps this is not even needed? does DBI stringify? # # forcibly stringify whatever is stringifiable + # ResultSet::populate() hands us a copy - safe to mangle for my $r (0 .. $#$data) { for my $c (0 .. $#{$data->[$r]}) { $data->[$r][$c] = "$data->[$r][$c]" @@ -1885,118 +1769,176 @@ sub insert_bulk { } } - # check the data for consistency - # report a sensible error on bad data + my $colinfo_cache = {}; # since we will run _resolve_bindattrs on the same $source a lot + + # get a slice type index based on first row of data + # a "column" in this context may refer to more than one bind value + # e.g. \[ '?, ?', [...], [...] ] + # + # construct the value type index - a description of values types for every + # per-column slice of $data: + # + # nonexistent - nonbind literal + # 0 - regular value + # [] of bindattrs - resolved attribute(s) of bind(s) passed via literal+bind \[] combo # - # also create a list of dynamic binds (ones that will be changing - # for each row) - my $dyn_bind_idx; - for my $col_idx (0..$#$cols) { + # also construct the column hash to pass to the SQL generator. For plain + # (non literal) values - convert the members of the first row into a + # literal+bind combo, with extra positional info in the bind attr hashref. + # This will allow us to match the order properly, and is so contrived + # because a user-supplied literal/bind (or something else specific to a + # resultsource and/or storage driver) can inject extra binds along the + # way, so one can't rely on "shift positions" ordering at all. Also we + # can't just hand SQLA a set of some known "values" (e.g. hashrefs that + # can be later matched up by address), because we want to supply a real + # value on which perhaps e.g. datatype checks will be performed + my ($proto_data, $value_type_idx); + for my $i (@col_range) { + my $colname = $cols->[$i]; + if (ref $data->[0][$i] eq 'SCALAR') { + # no bind value at all - no type + + $proto_data->{$colname} = $data->[0][$i]; + } + elsif (ref $data->[0][$i] eq 'REF' and ref ${$data->[0][$i]} eq 'ARRAY' ) { + # repack, so we don't end up mangling the original \[] + my ($sql, @bind) = @${$data->[0][$i]}; - # the first "row" is used as a point of reference - my $reference_val = $data->[0][$col_idx]; - my $is_literal = ref $reference_val eq 'SCALAR'; - my $is_literal_bind = ( !$is_literal and ( - ref $reference_val eq 'REF' - and - ref $$reference_val eq 'ARRAY' - ) ); - - $dyn_bind_idx->{$col_idx} = 1 - if (!$is_literal and !$is_literal_bind); - - # use a closure for convenience (less to pass) - my $bad_slice = sub { - my ($msg, $slice_idx) = @_; - $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", - $msg, - $cols->[$col_idx], - do { - require Data::Dumper::Concise; - local $Data::Dumper::Maxdepth = 2; - Data::Dumper::Concise::Dumper ({ - map { $cols->[$_] => - $data->[$slice_idx][$_] - } (0 .. $#$cols) - }), - } + # normalization of user supplied stuff + my $resolved_bind = $self->_resolve_bindattrs( + $source, \@bind, $colinfo_cache, ); - }; + + # store value-less (attrs only) bind info - we will be comparing all + # supplied binds against this for sanity + $value_type_idx->{$i} = [ map { $_->[0] } @$resolved_bind ]; + + $proto_data->{$colname} = \[ $sql, map { [ + # inject slice order to use for $proto_bind construction + { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i } + => + $resolved_bind->[$_][1] + ] } (0 .. $#bind) + ]; + } + else { + $value_type_idx->{$i} = 0; + + $proto_data->{$colname} = \[ '?', [ + { dbic_colname => $colname, _bind_data_slice_idx => $i } + => + $data->[0][$i] + ] ]; + } + } + + my ($sql, $proto_bind) = $self->_prep_for_execute ( + 'insert', + $source, + [ $proto_data ], + ); + + if (! @$proto_bind and keys %$value_type_idx) { + # if the bindlist is empty and we had some dynamic binds, this means the + # storage ate them away (e.g. the NoBindVars component) and interpolated + # them directly into the SQL. This obviously can't be good for multi-inserts + $self->throw_exception('Cannot insert_bulk without support for placeholders'); + } + + # sanity checks + # FIXME - devise a flag "no babysitting" or somesuch to shut this off + # + # use an error reporting closure for convenience (less to pass) + my $bad_slice_report_cref = sub { + my ($msg, $r_idx, $c_idx) = @_; + $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", + $msg, + $cols->[$c_idx], + do { + require Data::Dumper::Concise; + local $Data::Dumper::Maxdepth = 5; + Data::Dumper::Concise::Dumper ({ + map { $cols->[$_] => + $data->[$r_idx][$_] + } @col_range + }), + } + ); + }; + + for my $col_idx (@col_range) { + my $reference_val = $data->[0][$col_idx]; for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1 my $val = $data->[$row_idx][$col_idx]; - if ($is_literal) { + if (! exists $value_type_idx->{$col_idx}) { # literal no binds if (ref $val ne 'SCALAR') { - $bad_slice->( + $bad_slice_report_cref->( "Incorrect value (expecting SCALAR-ref \\'$$reference_val')", - $row_idx + $row_idx, + $col_idx, ); } elsif ($$val ne $$reference_val) { - $bad_slice->( + $bad_slice_report_cref->( "Inconsistent literal SQL value (expecting \\'$$reference_val')", - $row_idx + $row_idx, + $col_idx, ); } } - elsif ($is_literal_bind) { + elsif (! $value_type_idx->{$col_idx} ) { # regular non-literal value + if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) { + $bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx); + } + } + else { # binds from a \[], compare type and attrs if (ref $val ne 'REF' or ref $$val ne 'ARRAY') { - $bad_slice->( + $bad_slice_report_cref->( "Incorrect value (expecting ARRAYREF-ref \\['${$reference_val}->[0]', ... ])", - $row_idx - ); - } - elsif (${$val}->[0] ne ${$reference_val}->[0]) { - $bad_slice->( - "Inconsistent literal SQL-bind value (expecting \\['${$reference_val}->[0]', ... ])", - $row_idx + $row_idx, + $col_idx, ); } - } - elsif (ref $val) { - if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) { - $bad_slice->("Literal SQL found where a plain bind value is expected", $row_idx); - } - else { - $bad_slice->("$val reference found where bind expected", $row_idx); + # start drilling down and bail out early on identical refs + elsif ( + $reference_val != $val + or + $$reference_val != $$val + ) { + if (${$val}->[0] ne ${$reference_val}->[0]) { + $bad_slice_report_cref->( + "Inconsistent literal/bind SQL (expecting \\['${$reference_val}->[0]', ... ])", + $row_idx, + $col_idx, + ); + } + # need to check the bind attrs - a bind will happen only once for + # the entire dataset, so any changes further down will be ignored. + elsif (! Data::Compare::Compare( + $value_type_idx->{$col_idx}, + [ + map + { $_->[0] } + @{$self->_resolve_bindattrs( + $source, [ @{$$val}[1 .. $#$$val] ], $colinfo_cache, + )} + ], + )) { + $bad_slice_report_cref->( + 'Differing bind attributes on literal/bind values not supported', + $row_idx, + $col_idx, + ); + } } } } } - # Get the sql with bind values interpolated where necessary. For dynamic - # binds convert the values of the first row into a literal+bind combo, with - # extra positional info in the bind attr hashref. This will allow us to match - # the order properly, and is so contrived because a user-supplied literal - # bind (or something else specific to a resultsource and/or storage driver) - # can inject extra binds along the way, so one can't rely on "shift - # positions" ordering at all. Also we can't just hand SQLA a set of some - # known "values" (e.g. hashrefs that can be later matched up by address), - # because we want to supply a real value on which perhaps e.g. datatype - # checks will be performed - my ($sql, $proto_bind) = $self->_prep_for_execute ( - 'insert', - $source, - [ { map { $cols->[$_] => $dyn_bind_idx->{$_} - ? \[ '?', [ - { dbic_colname => $cols->[$_], _bind_data_slice_idx => $_ } - => - $data->[0][$_] - ] ] - : $data->[0][$_] - } (0..$#$cols) } ], - ); - - if (! @$proto_bind and keys %$dyn_bind_idx) { - # if the bindlist is empty and we had some dynamic binds, this means the - # storage ate them away (e.g. the NoBindVars component) and interpolated - # them directly into the SQL. This obviosly can't be good for multi-inserts - $self->throw_exception('Cannot insert_bulk without support for placeholders'); - } - - # neither _execute_array, nor _execute_inserts_with_no_binds are - # atomic (even if _execute _array is a single call). Thus a safety + # neither _dbh_execute_for_fetch, nor _dbh_execute_inserts_with_no_binds + # are atomic (even if execute_for_fetch is a single call). Thus a safety # scope guard my $guard = $self->txn_scope_guard; @@ -2006,7 +1948,7 @@ sub insert_bulk { if (@$proto_bind) { # proto bind contains the information on which pieces of $data to pull # $cols is passed in only for prettier error-reporting - $self->_execute_array( $source, $sth, $proto_bind, $cols, $data ); + $self->_dbh_execute_for_fetch( $source, $sth, $proto_bind, $cols, $data ); } else { # bind_param_array doesn't work if there are no binds @@ -2021,37 +1963,63 @@ sub insert_bulk { return (wantarray ? ($rv, $sth, @$proto_bind) : $rv); } -sub _execute_array { - my ($self, $source, $sth, $proto_bind, $cols, $data, @extra) = @_; +# execute_for_fetch is capable of returning data just fine (it means it +# can be used for INSERT...RETURNING and UPDATE...RETURNING. Since this +# is the void-populate fast-path we will just ignore this altogether +# for the time being. +sub _dbh_execute_for_fetch { + my ($self, $source, $sth, $proto_bind, $cols, $data) = @_; - ## This must be an arrayref, else nothing works! - my $tuple_status = []; + my @idx_range = ( 0 .. $#$proto_bind ); - my $bind_attrs = $self->_dbi_attrs_for_bind($source, $proto_bind); + # If we have any bind attributes to take care of, we will bind the + # proto-bind data (which will never be used by execute_for_fetch) + # However since column bindtypes are "sticky", this is sufficient + # to get the DBD to apply the bindtype to all values later on - # Bind the values by column slices - for my $i (0 .. $#$proto_bind) { - my $data_slice_idx = ( - ref $proto_bind->[$i][0] eq 'HASH' - and - exists $proto_bind->[$i][0]{_bind_data_slice_idx} - ) ? $proto_bind->[$i][0]{_bind_data_slice_idx} : undef; + my $bind_attrs = $self->_dbi_attrs_for_bind($source, $proto_bind); - $sth->bind_param_array( + for my $i (@idx_range) { + $sth->bind_param ( $i+1, # DBI bind indexes are 1-based - defined $data_slice_idx - # either get a "column" of dynamic values, or just repeat the same - # bind over and over - ? [ map { $_->[$data_slice_idx] } @$data ] - : [ ($proto_bind->[$i][1]) x @$data ] - , - defined $bind_attrs->[$i] ? $bind_attrs->[$i] : (), # some DBDs throw up when given an undef - ); - } + $proto_bind->[$i][1], + $bind_attrs->[$i], + ) if defined $bind_attrs->[$i]; + } + + # At this point $data slots named in the _bind_data_slice_idx of + # each piece of $proto_bind are either \[]s or plain values to be + # passed in. Construct the dispensing coderef. *NOTE* the order + # of $data will differ from this of the ?s in the SQL (due to + # alphabetical ordering by colname). We actually do want to + # preserve this behavior so that prepare_cached has a better + # chance of matching on unrelated calls + my %data_reorder = map { $proto_bind->[$_][0]{_bind_data_slice_idx} => $_ } @idx_range; + + my $fetch_row_idx = -1; # saner loop this way + my $fetch_tuple = sub { + return undef if ++$fetch_row_idx > $#$data; + + return [ map + { (ref $_ eq 'REF' and ref $$_ eq 'ARRAY') + ? map { $_->[-1] } @{$$_}[1 .. $#$$_] + : $_ + } + map + { $data->[$fetch_row_idx][$_]} + sort + { $data_reorder{$a} <=> $data_reorder{$b} } + keys %data_reorder + ]; + }; + my $tuple_status = []; my ($rv, $err); try { - $rv = $self->_dbh_execute_array($sth, $tuple_status, @extra); + $rv = $sth->execute_for_fetch( + $fetch_tuple, + $tuple_status, + ); } catch { $err = shift; @@ -2081,7 +2049,7 @@ sub _execute_array { if ($i > $#$tuple_status); require Data::Dumper::Concise; - $self->throw_exception(sprintf "execute_array() aborted with '%s' at populate slice:\n%s", + $self->throw_exception(sprintf "execute_for_fetch() aborted with '%s' at populate slice:\n%s", ($tuple_status->[$i][1] || $err), Data::Dumper::Concise::Dumper( { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } ), ); @@ -2090,11 +2058,6 @@ sub _execute_array { return $rv; } -sub _dbh_execute_array { - #my ($self, $sth, $tuple_status, @extra) = @_; - return $_[1]->execute_array({ArrayTupleStatus => $_[2]}); -} - sub _dbh_execute_inserts_with_no_binds { my ($self, $sth, $count) = @_; @@ -2134,103 +2097,6 @@ sub delete { shift->_execute('delete', @_); } -# We were sent here because the $rs contains a complex search -# which will require a subquery to select the correct rows -# (i.e. joined or limited resultsets, or non-introspectable conditions) -# -# Generating a single PK column subquery is trivial and supported -# by all RDBMS. However if we have a multicolumn PK, things get ugly. -# Look at _multipk_update_delete() -sub _subq_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - - # quick check if we got a sane rs on our hands - my @pcols = $rsrc->_pri_cols; - - my $sel = $rs->_resolved_attrs->{select}; - $sel = [ $sel ] unless ref $sel eq 'ARRAY'; - - if ( - join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols) - ne - join ("\x00", sort @$sel ) - ) { - $self->throw_exception ( - '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys' - ); - } - - if (@pcols == 1) { - return $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - { $pcols[0] => { -in => $rs->as_query } }, - ); - } - - else { - return $self->_multipk_update_delete (@_); - } -} - -# ANSI SQL does not provide a reliable way to perform a multicol-PK -# resultset update/delete involving subqueries. So by default resort -# to simple (and inefficient) delete_all style per-row opearations, -# while allowing specific storages to override this with a faster -# implementation. -# -sub _multipk_update_delete { - return shift->_per_row_update_delete (@_); -} - -# This is the default loop used to delete/update rows for multi PK -# resultsets, and used by mysql exclusively (because it can't do anything -# else). -# -# We do not use $row->$op style queries, because resultset update/delete -# is not expected to cascade (this is what delete_all/update_all is for). -# -# There should be no race conditions as the entire operation is rolled -# in a transaction. -# -sub _per_row_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - my @pcols = $rsrc->_pri_cols; - - my $guard = $self->txn_scope_guard; - - # emulate the return value of $sth->execute for non-selects - my $row_cnt = '0E0'; - - my $subrs_cur = $rs->cursor; - my @all_pk = $subrs_cur->all; - for my $pks ( @all_pk) { - - my $cond; - for my $i (0.. $#pcols) { - $cond->{$pcols[$i]} = $pks->[$i]; - } - - $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - $cond, - ); - - $row_cnt++; - } - - $guard->commit; - - return $row_cnt; -} - sub _select { my $self = shift; $self->_execute($self->_select_args(@_)); @@ -2244,8 +2110,8 @@ sub _select_args_to_query { my ($op, $ident, @args) = $self->_select_args(@_); - # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); - my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $ident, \@args); + # my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); + my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, \@args); $prepared_bind ||= []; return wantarray @@ -2385,16 +2251,6 @@ storage driver. Can be overridden by supplying an explicit L to L. For a list of available limit dialects see L. -=head2 sth - -=over 4 - -=item Arguments: $sql - -=back - -Returns a L sth (statement handle) for the supplied SQL. - =cut sub _dbh_sth { @@ -3031,7 +2887,7 @@ sub _max_column_bytesize { if ($data_type =~ /^(?: l? (?:var)? char(?:acter)? (?:\s*varying)? | - (?:var)? binary (?:\s*varying)? + (?:var)? binary (?:\s*varying)? | raw )\b/x @@ -3091,7 +2947,8 @@ sub _is_text_lob_type { DBIx::Class can do some wonderful magic with handling exceptions, disconnections, and transactions when you use C<< AutoCommit => 1 >> -(the default) combined with C for transaction support. +(the default) combined with L for +transaction support. If you set C<< AutoCommit => 0 >> in your connect info, then you are always in an assumed transaction between commits, and you're telling us you'd