use Sub::Name 'subname';
use Try::Tiny;
use overload ();
+use Data::Compare (); # no imports!!! guard against insane architecture
use namespace::clean;
# default cursor class, overridable in connect_info attributes
my @capabilities = (qw/
insert_returning
insert_returning_bound
+
+ multicolumn_in
+
placeholders
typeless_placeholders
+
join_optimizer
/);
__PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities );
my $self = shift;
# some databases spew warnings on implicit disconnect
+ $self->_verify_pid;
local $SIG{__WARN__} = sub {};
$self->_dbh(undef);
sub dbh_do {
my $self = shift;
- my $code = shift;
+ my $run_target = shift;
- my $dbh = $self->_get_dbh;
-
- return $self->$code($dbh, @_)
- if ( $self->{_in_do_block} || $self->{transaction_depth} );
-
- local $self->{_in_do_block} = 1;
+ # short circuit when we know there is no need for a runner
+ #
+ # FIXME - asumption may be wrong
+ # the rationale for the txn_depth check is that if this block is a part
+ # of a larger transaction, everything up to that point is screwed anyway
+ return $self->$run_target($self->_get_dbh, @_)
+ if $self->{_in_do_block} or $self->transaction_depth;
- # take a ref instead of a copy, to preserve coderef @_ aliasing semantics
my $args = \@_;
- try {
- $self->$code ($dbh, @$args);
- } catch {
- $self->throw_exception($_) if $self->connected;
-
- # We were not connected - reconnect and retry, but let any
- # exception fall right through this time
- carp "Retrying dbh_do($code) after catching disconnected exception: $_"
- if $ENV{DBIC_STORAGE_RETRY_DEBUG};
-
- $self->_populate_dbh;
- $self->$code($self->_dbh, @$args);
- };
+ DBIx::Class::Storage::BlockRunner->new(
+ storage => $self,
+ run_code => sub { $self->$run_target ($self->_get_dbh, @$args ) },
+ wrap_txn => 0,
+ retry_handler => sub { ! ( $_[0]->retried_count or $_[0]->storage->connected ) },
+ )->run;
}
sub txn_do {
- # connects or reconnects on pid change, necessary to grab correct txn_depth
- $_[0]->_get_dbh;
- local $_[0]->{_in_do_block} = 1;
+ $_[0]->_get_dbh; # connects or reconnects on pid change, necessary to grab correct txn_depth
shift->next::method(@_);
}
sub insert_bulk {
my ($self, $source, $cols, $data) = @_;
+ my @col_range = (0..$#$cols);
+
# FIXME - perhaps this is not even needed? does DBI stringify?
#
# forcibly stringify whatever is stringifiable
+ # ResultSet::populate() hands us a copy - safe to mangle
for my $r (0 .. $#$data) {
for my $c (0 .. $#{$data->[$r]}) {
$data->[$r][$c] = "$data->[$r][$c]"
}
}
- # check the data for consistency
- # report a sensible error on bad data
+ my $colinfo_cache = {}; # since we will run _resolve_bindattrs on the same $source a lot
+
+ # get a slice type index based on first row of data
+ # a "column" in this context may refer to more than one bind value
+ # e.g. \[ '?, ?', [...], [...] ]
+ #
+ # construct the value type index - a description of values types for every
+ # per-column slice of $data:
#
- # also create a list of dynamic binds (ones that will be changing
- # for each row)
- my $dyn_bind_idx;
- for my $col_idx (0..$#$cols) {
+ # nonexistent - nonbind literal
+ # 0 - regular value
+ # [] of bindattrs - resolved attribute(s) of bind(s) passed via literal+bind \[] combo
+ #
+ # also construct the column hash to pass to the SQL generator. For plain
+ # (non literal) values - convert the members of the first row into a
+ # literal+bind combo, with extra positional info in the bind attr hashref.
+ # This will allow us to match the order properly, and is so contrived
+ # because a user-supplied literal/bind (or something else specific to a
+ # resultsource and/or storage driver) can inject extra binds along the
+ # way, so one can't rely on "shift positions" ordering at all. Also we
+ # can't just hand SQLA a set of some known "values" (e.g. hashrefs that
+ # can be later matched up by address), because we want to supply a real
+ # value on which perhaps e.g. datatype checks will be performed
+ my ($proto_data, $value_type_idx);
+ for my $i (@col_range) {
+ my $colname = $cols->[$i];
+ if (ref $data->[0][$i] eq 'SCALAR') {
+ # no bind value at all - no type
+
+ $proto_data->{$colname} = $data->[0][$i];
+ }
+ elsif (ref $data->[0][$i] eq 'REF' and ref ${$data->[0][$i]} eq 'ARRAY' ) {
+ # repack, so we don't end up mangling the original \[]
+ my ($sql, @bind) = @${$data->[0][$i]};
- # the first "row" is used as a point of reference
- my $reference_val = $data->[0][$col_idx];
- my $is_literal = ref $reference_val eq 'SCALAR';
- my $is_literal_bind = ( !$is_literal and (
- ref $reference_val eq 'REF'
- and
- ref $$reference_val eq 'ARRAY'
- ) );
-
- $dyn_bind_idx->{$col_idx} = 1
- if (!$is_literal and !$is_literal_bind);
-
- # use a closure for convenience (less to pass)
- my $bad_slice = sub {
- my ($msg, $slice_idx) = @_;
- $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
- $msg,
- $cols->[$col_idx],
- do {
- require Data::Dumper::Concise;
- local $Data::Dumper::Maxdepth = 2;
- Data::Dumper::Concise::Dumper ({
- map { $cols->[$_] =>
- $data->[$slice_idx][$_]
- } (0 .. $#$cols)
- }),
- }
+ # normalization of user supplied stuff
+ my $resolved_bind = $self->_resolve_bindattrs(
+ $source, \@bind, $colinfo_cache,
);
- };
+
+ # store value-less (attrs only) bind info - we will be comparing all
+ # supplied binds against this for sanity
+ $value_type_idx->{$i} = [ map { $_->[0] } @$resolved_bind ];
+
+ $proto_data->{$colname} = \[ $sql, map { [
+ # inject slice order to use for $proto_bind construction
+ { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i }
+ =>
+ $resolved_bind->[$_][1]
+ ] } (0 .. $#bind)
+ ];
+ }
+ else {
+ $value_type_idx->{$i} = 0;
+
+ $proto_data->{$colname} = \[ '?', [
+ { dbic_colname => $colname, _bind_data_slice_idx => $i }
+ =>
+ $data->[0][$i]
+ ] ];
+ }
+ }
+
+ my ($sql, $proto_bind) = $self->_prep_for_execute (
+ 'insert',
+ $source,
+ [ $proto_data ],
+ );
+
+ if (! @$proto_bind and keys %$value_type_idx) {
+ # if the bindlist is empty and we had some dynamic binds, this means the
+ # storage ate them away (e.g. the NoBindVars component) and interpolated
+ # them directly into the SQL. This obviously can't be good for multi-inserts
+ $self->throw_exception('Cannot insert_bulk without support for placeholders');
+ }
+
+ # sanity checks
+ # FIXME - devise a flag "no babysitting" or somesuch to shut this off
+ #
+ # use an error reporting closure for convenience (less to pass)
+ my $bad_slice_report_cref = sub {
+ my ($msg, $r_idx, $c_idx) = @_;
+ $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
+ $msg,
+ $cols->[$c_idx],
+ do {
+ require Data::Dumper::Concise;
+ local $Data::Dumper::Maxdepth = 5;
+ Data::Dumper::Concise::Dumper ({
+ map { $cols->[$_] =>
+ $data->[$r_idx][$_]
+ } @col_range
+ }),
+ }
+ );
+ };
+
+ for my $col_idx (@col_range) {
+ my $reference_val = $data->[0][$col_idx];
for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1
my $val = $data->[$row_idx][$col_idx];
- if ($is_literal) {
+ if (! exists $value_type_idx->{$col_idx}) { # literal no binds
if (ref $val ne 'SCALAR') {
- $bad_slice->(
+ $bad_slice_report_cref->(
"Incorrect value (expecting SCALAR-ref \\'$$reference_val')",
- $row_idx
+ $row_idx,
+ $col_idx,
);
}
elsif ($$val ne $$reference_val) {
- $bad_slice->(
+ $bad_slice_report_cref->(
"Inconsistent literal SQL value (expecting \\'$$reference_val')",
- $row_idx
+ $row_idx,
+ $col_idx,
);
}
}
- elsif ($is_literal_bind) {
+ elsif (! $value_type_idx->{$col_idx} ) { # regular non-literal value
+ if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) {
+ $bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx);
+ }
+ }
+ else { # binds from a \[], compare type and attrs
if (ref $val ne 'REF' or ref $$val ne 'ARRAY') {
- $bad_slice->(
+ $bad_slice_report_cref->(
"Incorrect value (expecting ARRAYREF-ref \\['${$reference_val}->[0]', ... ])",
- $row_idx
+ $row_idx,
+ $col_idx,
);
}
- elsif (${$val}->[0] ne ${$reference_val}->[0]) {
- $bad_slice->(
- "Inconsistent literal SQL-bind value (expecting \\['${$reference_val}->[0]', ... ])",
- $row_idx
- );
- }
- }
- elsif (ref $val) {
- if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) {
- $bad_slice->("Literal SQL found where a plain bind value is expected", $row_idx);
- }
- else {
- $bad_slice->("$val reference found where bind expected", $row_idx);
+ # start drilling down and bail out early on identical refs
+ elsif (
+ $reference_val != $val
+ or
+ $$reference_val != $$val
+ ) {
+ if (${$val}->[0] ne ${$reference_val}->[0]) {
+ $bad_slice_report_cref->(
+ "Inconsistent literal/bind SQL (expecting \\['${$reference_val}->[0]', ... ])",
+ $row_idx,
+ $col_idx,
+ );
+ }
+ # need to check the bind attrs - a bind will happen only once for
+ # the entire dataset, so any changes further down will be ignored.
+ elsif (! Data::Compare::Compare(
+ $value_type_idx->{$col_idx},
+ [
+ map
+ { $_->[0] }
+ @{$self->_resolve_bindattrs(
+ $source, [ @{$$val}[1 .. $#$$val] ], $colinfo_cache,
+ )}
+ ],
+ )) {
+ $bad_slice_report_cref->(
+ 'Differing bind attributes on literal/bind values not supported',
+ $row_idx,
+ $col_idx,
+ );
+ }
}
}
}
}
- # Get the sql with bind values interpolated where necessary. For dynamic
- # binds convert the values of the first row into a literal+bind combo, with
- # extra positional info in the bind attr hashref. This will allow us to match
- # the order properly, and is so contrived because a user-supplied literal
- # bind (or something else specific to a resultsource and/or storage driver)
- # can inject extra binds along the way, so one can't rely on "shift
- # positions" ordering at all. Also we can't just hand SQLA a set of some
- # known "values" (e.g. hashrefs that can be later matched up by address),
- # because we want to supply a real value on which perhaps e.g. datatype
- # checks will be performed
- my ($sql, $proto_bind) = $self->_prep_for_execute (
- 'insert',
- $source,
- [ { map { $cols->[$_] => $dyn_bind_idx->{$_}
- ? \[ '?', [
- { dbic_colname => $cols->[$_], _bind_data_slice_idx => $_ }
- =>
- $data->[0][$_]
- ] ]
- : $data->[0][$_]
- } (0..$#$cols) } ],
- );
-
- if (! @$proto_bind and keys %$dyn_bind_idx) {
- # if the bindlist is empty and we had some dynamic binds, this means the
- # storage ate them away (e.g. the NoBindVars component) and interpolated
- # them directly into the SQL. This obviosly can't be good for multi-inserts
- $self->throw_exception('Cannot insert_bulk without support for placeholders');
- }
-
# neither _dbh_execute_for_fetch, nor _dbh_execute_inserts_with_no_binds
# are atomic (even if execute_for_fetch is a single call). Thus a safety
# scope guard
) if defined $bind_attrs->[$i];
}
- my $data_slice_idx = [ map {
- (
- ref $proto_bind->[$_][0] eq 'HASH'
- and
- exists $proto_bind->[$_][0]{_bind_data_slice_idx}
- ) ? $proto_bind->[$_][0]{_bind_data_slice_idx} : undef;
- } @idx_range ];
+ # At this point $data slots named in the _bind_data_slice_idx of
+ # each piece of $proto_bind are either \[]s or plain values to be
+ # passed in. Construct the dispensing coderef. *NOTE* the order
+ # of $data will differ from this of the ?s in the SQL (due to
+ # alphabetical ordering by colname). We actually do want to
+ # preserve this behavior so that prepare_cached has a better
+ # chance of matching on unrelated calls
+ my %data_reorder = map { $proto_bind->[$_][0]{_bind_data_slice_idx} => $_ } @idx_range;
my $fetch_row_idx = -1; # saner loop this way
my $fetch_tuple = sub {
return undef if ++$fetch_row_idx > $#$data;
- return [ map {
- defined $data_slice_idx->[$_]
- ? $data->[$fetch_row_idx][$data_slice_idx->[$_]]
- : $proto_bind->[$_][1]
- } @idx_range ];
+ return [ map
+ { (ref $_ eq 'REF' and ref $$_ eq 'ARRAY')
+ ? map { $_->[-1] } @{$$_}[1 .. $#$$_]
+ : $_
+ }
+ map
+ { $data->[$fetch_row_idx][$_]}
+ sort
+ { $data_reorder{$a} <=> $data_reorder{$b} }
+ keys %data_reorder
+ ];
};
my $tuple_status = [];
shift->_execute('delete', @_);
}
-# We were sent here because the $rs contains a complex search
-# which will require a subquery to select the correct rows
-# (i.e. joined or limited resultsets, or non-introspectable conditions)
-#
-# Generating a single PK column subquery is trivial and supported
-# by all RDBMS. However if we have a multicolumn PK, things get ugly.
-# Look at _multipk_update_delete()
-sub _subq_update_delete {
- my $self = shift;
- my ($rs, $op, $values) = @_;
-
- my $rsrc = $rs->result_source;
-
- # quick check if we got a sane rs on our hands
- my @pcols = $rsrc->_pri_cols;
-
- my $sel = $rs->_resolved_attrs->{select};
- $sel = [ $sel ] unless ref $sel eq 'ARRAY';
-
- if (
- join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
- ne
- join ("\x00", sort @$sel )
- ) {
- $self->throw_exception (
- '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
- );
- }
-
- if (@pcols == 1) {
- return $self->$op (
- $rsrc,
- $op eq 'update' ? $values : (),
- { $pcols[0] => { -in => $rs->as_query } },
- );
- }
-
- else {
- return $self->_multipk_update_delete (@_);
- }
-}
-
-# ANSI SQL does not provide a reliable way to perform a multicol-PK
-# resultset update/delete involving subqueries. So by default resort
-# to simple (and inefficient) delete_all style per-row opearations,
-# while allowing specific storages to override this with a faster
-# implementation.
-#
-sub _multipk_update_delete {
- return shift->_per_row_update_delete (@_);
-}
-
-# This is the default loop used to delete/update rows for multi PK
-# resultsets, and used by mysql exclusively (because it can't do anything
-# else).
-#
-# We do not use $row->$op style queries, because resultset update/delete
-# is not expected to cascade (this is what delete_all/update_all is for).
-#
-# There should be no race conditions as the entire operation is rolled
-# in a transaction.
-#
-sub _per_row_update_delete {
- my $self = shift;
- my ($rs, $op, $values) = @_;
-
- my $rsrc = $rs->result_source;
- my @pcols = $rsrc->_pri_cols;
-
- my $guard = $self->txn_scope_guard;
-
- # emulate the return value of $sth->execute for non-selects
- my $row_cnt = '0E0';
-
- my $subrs_cur = $rs->cursor;
- my @all_pk = $subrs_cur->all;
- for my $pks ( @all_pk) {
-
- my $cond;
- for my $i (0.. $#pcols) {
- $cond->{$pcols[$i]} = $pks->[$i];
- }
-
- $self->$op (
- $rsrc,
- $op eq 'update' ? $values : (),
- $cond,
- );
-
- $row_cnt++;
- }
-
- $guard->commit;
-
- return $row_cnt;
-}
-
sub _select {
my $self = shift;
$self->_execute($self->_select_args(@_));