X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=fdfd16e60235b6510f203df26ba18be7b967749a;hb=0c11ad0ee5c8407f6b87d6e15c62a1b445076dc0;hp=607b1ef17d1fcaf14ac92904031e621b8bcfc39c;hpb=52cef7e30a43620553dc38ce52a10946b76a814c;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 607b1ef..fdfd16e 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -12,8 +12,11 @@ use DBIx::Class::Exception; use Scalar::Util qw/refaddr weaken reftype blessed/; use List::Util qw/first/; use Sub::Name 'subname'; +use Context::Preserve 'preserve_context'; use Try::Tiny; use overload (); +use Data::Compare (); # no imports!!! guard against insane architecture +use DBI::Const::GetInfoType (); # no import of retarded global hash use namespace::clean; # default cursor class, overridable in connect_info attributes @@ -33,6 +36,7 @@ __PACKAGE__->sql_name_sep('.'); __PACKAGE__->mk_group_accessors('simple' => qw/ _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit + _perform_autoinc_retrieval _autoinc_supplied_for_op /); # the values for these accessors are picked out (and deleted) from @@ -61,8 +65,12 @@ __PACKAGE__->mk_group_accessors('simple' => @storage_options); my @capabilities = (qw/ insert_returning insert_returning_bound + + multicolumn_in + placeholders typeless_placeholders + join_optimizer /); __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities ); @@ -190,16 +198,15 @@ sub new { my %seek_and_destroy; sub _arm_global_destructor { - my $self = shift; - my $key = refaddr ($self); - $seek_and_destroy{$key} = $self; - weaken ($seek_and_destroy{$key}); + weaken ( + $seek_and_destroy{ refaddr($_[0]) } = $_[0] + ); } END { local $?; # just in case the DBI destructor changes it somehow - # destroy just the object if not native to this process/thread + # destroy just the object if not native to this process $_->_verify_pid for (grep { defined $_ } values %seek_and_destroy @@ -210,14 +217,18 @@ sub new { # As per DBI's recommendation, DBIC disconnects all handles as # soon as possible (DBIC will reconnect only on demand from within # the thread) - for (values %seek_and_destroy) { - next unless $_; + my @instances = grep { defined $_ } values %seek_and_destroy; + for (@instances) { $_->{_dbh_gen}++; # so that existing cursors will drop as well $_->_dbh(undef); $_->transaction_depth(0); $_->savepoints([]); } + + # properly renumber all existing refs + %seek_and_destroy = (); + $_->_arm_global_destructor for @instances; } } @@ -225,6 +236,7 @@ sub DESTROY { my $self = shift; # some databases spew warnings on implicit disconnect + $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; local $SIG{__WARN__} = sub {}; $self->_dbh(undef); @@ -773,37 +785,28 @@ Example: sub dbh_do { my $self = shift; - my $code = shift; + my $run_target = shift; - my $dbh = $self->_get_dbh; - - return $self->$code($dbh, @_) - if ( $self->{_in_do_block} || $self->{transaction_depth} ); - - local $self->{_in_do_block} = 1; + # short circuit when we know there is no need for a runner + # + # FIXME - asumption may be wrong + # the rationale for the txn_depth check is that if this block is a part + # of a larger transaction, everything up to that point is screwed anyway + return $self->$run_target($self->_get_dbh, @_) + if $self->{_in_do_block} or $self->transaction_depth; - # take a ref instead of a copy, to preserve coderef @_ aliasing semantics my $args = \@_; - try { - $self->$code ($dbh, @$args); - } catch { - $self->throw_exception($_) if $self->connected; - - # We were not connected - reconnect and retry, but let any - # exception fall right through this time - carp "Retrying dbh_do($code) after catching disconnected exception: $_" - if $ENV{DBIC_STORAGE_RETRY_DEBUG}; - - $self->_populate_dbh; - $self->$code($self->_dbh, @$args); - }; + DBIx::Class::Storage::BlockRunner->new( + storage => $self, + run_code => sub { $self->$run_target ($self->_get_dbh, @$args ) }, + wrap_txn => 0, + retry_handler => sub { ! ( $_[0]->retried_count or $_[0]->storage->connected ) }, + )->run; } sub txn_do { - # connects or reconnects on pid change, necessary to grab correct txn_depth - $_[0]->_get_dbh; - local $_[0]->{_in_do_block} = 1; + $_[0]->_get_dbh; # connects or reconnects on pid change, necessary to grab correct txn_depth shift->next::method(@_); } @@ -885,7 +888,7 @@ sub connected { sub _seems_connected { my $self = shift; - $self->_verify_pid; + $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; my $dbh = $self->_dbh or return 0; @@ -933,7 +936,7 @@ sub dbh { # this is the internal "get dbh or connect (don't check)" method sub _get_dbh { my $self = shift; - $self->_verify_pid; + $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; $self->_populate_dbh unless $self->_dbh; return $self->_dbh; } @@ -1007,7 +1010,7 @@ sub _populate_dbh { $self->_dbh($self->_connect(@info)); - $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads + $self->_conn_pid($$) unless DBIx::Class::_ENV_::BROKEN_FORK; # on win32 these are in fact threads $self->_determine_driver; @@ -1075,7 +1078,16 @@ sub _server_info { $info = {}; - my $server_version = try { $self->_get_server_version }; + my $server_version = try { + $self->_get_server_version + } catch { + # driver determination *may* use this codepath + # in which case we must rethrow + $self->throw_exception($_) if $self->{_in_determine_driver}; + + # $server_version on failure + undef; + }; if (defined $server_version) { $info->{dbms_version} = $server_version; @@ -1107,13 +1119,76 @@ sub _server_info { } sub _get_server_version { - shift->_dbh_get_info(18); + shift->_dbh_get_info('SQL_DBMS_VER'); } sub _dbh_get_info { my ($self, $info) = @_; - return try { $self->_get_dbh->get_info($info) } || undef; + if ($info =~ /[^0-9]/) { + $info = $DBI::Const::GetInfoType::GetInfoType{$info}; + $self->throw_exception("Info type '$_[1]' not provided by DBI::Const::GetInfoType") + unless defined $info; + } + + return $self->_get_dbh->get_info($info); +} + +sub _describe_connection { + require DBI::Const::GetInfoReturn; + + my $self = shift; + $self->ensure_connected; + + my $res = { + DBIC_DSN => $self->_dbi_connect_info->[0], + DBI_VER => DBI->VERSION, + DBIC_VER => DBIx::Class->VERSION, + DBIC_DRIVER => ref $self, + }; + + for my $inf ( + #keys %DBI::Const::GetInfoType::GetInfoType, + qw/ + SQL_CURSOR_COMMIT_BEHAVIOR + SQL_CURSOR_ROLLBACK_BEHAVIOR + SQL_CURSOR_SENSITIVITY + SQL_DATA_SOURCE_NAME + SQL_DBMS_NAME + SQL_DBMS_VER + SQL_DEFAULT_TXN_ISOLATION + SQL_DM_VER + SQL_DRIVER_NAME + SQL_DRIVER_ODBC_VER + SQL_DRIVER_VER + SQL_EXPRESSIONS_IN_ORDERBY + SQL_GROUP_BY + SQL_IDENTIFIER_CASE + SQL_IDENTIFIER_QUOTE_CHAR + SQL_MAX_CATALOG_NAME_LEN + SQL_MAX_COLUMN_NAME_LEN + SQL_MAX_IDENTIFIER_LEN + SQL_MAX_TABLE_NAME_LEN + SQL_MULTIPLE_ACTIVE_TXN + SQL_MULT_RESULT_SETS + SQL_NEED_LONG_DATA_LEN + SQL_NON_NULLABLE_COLUMNS + SQL_ODBC_VER + SQL_QUALIFIER_NAME_SEPARATOR + SQL_QUOTED_IDENTIFIER_CASE + SQL_TXN_CAPABLE + SQL_TXN_ISOLATION_OPTION + / + ) { + my $v = $self->_dbh_get_info($inf); + next unless defined $v; + + #my $key = sprintf( '%s(%s)', $inf, $DBI::Const::GetInfoType::GetInfoType{$inf} ); + my $expl = DBI::Const::GetInfoReturn::Explain($inf, $v); + $res->{$inf} = DBI::Const::GetInfoReturn::Format($inf, $v) . ( $expl ? " ($expl)" : '' ); + } + + $res; } sub _determine_driver { @@ -1128,7 +1203,8 @@ sub _determine_driver { if ($self->_dbh) { # we are connected $driver = $self->_dbh->{Driver}{Name}; $started_connected = 1; - } else { + } + else { # if connect_info is a CODEREF, we have no choice but to connect if (ref $self->_dbi_connect_info->[0] && reftype $self->_dbi_connect_info->[0] eq 'CODE') { @@ -1152,6 +1228,18 @@ sub _determine_driver { bless $self, $storage_class; $self->_rebless(); } + else { + $self->_warn_undetermined_driver( + 'This version of DBIC does not yet seem to supply a driver for ' + . "your particular RDBMS and/or connection method ('$driver')." + ); + } + } + else { + $self->_warn_undetermined_driver( + 'Unable to extract a driver name from connect info - this ' + . 'should not have happened.' + ); } } @@ -1166,6 +1254,48 @@ sub _determine_driver { } } +sub _determine_connector_driver { + my ($self, $conn) = @_; + + my $dbtype = $self->_dbh_get_info('SQL_DBMS_NAME'); + + if (not $dbtype) { + $self->_warn_undetermined_driver( + 'Unable to retrieve RDBMS type (SQL_DBMS_NAME) of the engine behind your ' + . "$conn connector - this should not have happened." + ); + return; + } + + $dbtype =~ s/\W/_/gi; + + my $subclass = "DBIx::Class::Storage::DBI::${conn}::${dbtype}"; + return if $self->isa($subclass); + + if ($self->load_optional_class($subclass)) { + bless $self, $subclass; + $self->_rebless; + } + else { + $self->_warn_undetermined_driver( + 'This version of DBIC does not yet seem to supply a driver for ' + . "your particular RDBMS and/or connection method ('$conn/$dbtype')." + ); + } +} + +sub _warn_undetermined_driver { + my ($self, $msg) = @_; + + require Data::Dumper::Concise; + + carp_once ($msg . ' While we will attempt to continue anyway, the results ' + . 'are likely to be underwhelming. Please upgrade DBIC, and if this message ' + . "does not go away, file a bugreport including the following info:\n" + . Data::Dumper::Concise::Dumper($self->_describe_connection) + ); +} + sub _do_connection_actions { my $self = shift; my $method_prefix = shift; @@ -1219,9 +1349,11 @@ sub _do_query { my $attrs = shift @do_args; my @bind = map { [ undef, $_ ] } @do_args; - $self->_query_start($sql, \@bind); - $self->_get_dbh->do($sql, $attrs, @do_args); - $self->_query_end($sql, \@bind); + $self->dbh_do(sub { + $_[0]->_query_start($sql, \@bind); + $_[1]->do($sql, $attrs, @do_args); + $_[0]->_query_end($sql, \@bind); + }); } return $self; @@ -1235,10 +1367,7 @@ sub _connect { my ($old_connect_via, $dbh); - if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) { - $old_connect_via = $DBI::connect_via; - $DBI::connect_via = 'connect'; - } + local $DBI::connect_via = 'connect' if $INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}; try { if(ref $info[0] eq 'CODE') { @@ -1300,9 +1429,6 @@ sub _connect { } catch { $self->throw_exception("DBI Connection failed: $_") - } - finally { - $DBI::connect_via = $old_connect_via if $old_connect_via; }; $self->_dbh_autocommit($dbh->{AutoCommit}); @@ -1343,7 +1469,7 @@ sub _exec_txn_begin { sub txn_commit { my $self = shift; - $self->_verify_pid if $self->_dbh; + $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; $self->throw_exception("Unable to txn_commit() on a disconnected storage") unless $self->_dbh; @@ -1374,7 +1500,7 @@ sub _exec_txn_commit { sub txn_rollback { my $self = shift; - $self->_verify_pid if $self->_dbh; + $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; $self->throw_exception("Unable to txn_rollback() on a disconnected storage") unless $self->_dbh; @@ -1407,7 +1533,7 @@ for my $meth (qw/svp_begin svp_release svp_rollback/) { no strict qw/refs/; *{__PACKAGE__ ."::$meth"} = subname $meth => sub { my $self = shift; - $self->_verify_pid if $self->_dbh; + $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; $self->throw_exception("Unable to $meth() on a disconnected storage") unless $self->_dbh; $self->next::method(@_); @@ -1667,10 +1793,17 @@ sub insert { # they can be fused once again with the final return $to_insert = { %$to_insert, %$prefetched_values }; + # FIXME - we seem to assume undef values as non-supplied. This is wrong. + # Investigate what does it take to s/defined/exists/ my $col_infos = $source->columns_info; my %pcols = map { $_ => 1 } $source->primary_columns; - my %retrieve_cols; + my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col); for my $col ($source->columns) { + if ($col_infos->{$col}{is_auto_increment}) { + $autoinc_supplied ||= 1 if defined $to_insert->{$col}; + $retrieve_autoinc_col ||= $col unless $autoinc_supplied; + } + # nothing to retrieve when explicit values are supplied next if (defined $to_insert->{$col} and ! ( ref $to_insert->{$col} eq 'SCALAR' @@ -1686,6 +1819,9 @@ sub insert { ); }; + local $self->{_autoinc_supplied_for_op} = $autoinc_supplied; + local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col; + my ($sqla_opts, @ir_container); if (%retrieve_cols and $self->_use_insert_returning) { $sqla_opts->{returning_container} = \@ir_container @@ -1751,9 +1887,12 @@ sub insert { sub insert_bulk { my ($self, $source, $cols, $data) = @_; + my @col_range = (0..$#$cols); + # FIXME - perhaps this is not even needed? does DBI stringify? # # forcibly stringify whatever is stringifiable + # ResultSet::populate() hands us a copy - safe to mangle for my $r (0 .. $#$data) { for my $c (0 .. $#{$data->[$r]}) { $data->[$r][$c] = "$data->[$r][$c]" @@ -1761,116 +1900,180 @@ sub insert_bulk { } } - # check the data for consistency - # report a sensible error on bad data + my $colinfos = $source->columns_info($cols); + + local $self->{_autoinc_supplied_for_op} = + (first { $_->{is_auto_increment} } values %$colinfos) + ? 1 + : 0 + ; + + # get a slice type index based on first row of data + # a "column" in this context may refer to more than one bind value + # e.g. \[ '?, ?', [...], [...] ] + # + # construct the value type index - a description of values types for every + # per-column slice of $data: # - # also create a list of dynamic binds (ones that will be changing - # for each row) - my $dyn_bind_idx; - for my $col_idx (0..$#$cols) { + # nonexistent - nonbind literal + # 0 - regular value + # [] of bindattrs - resolved attribute(s) of bind(s) passed via literal+bind \[] combo + # + # also construct the column hash to pass to the SQL generator. For plain + # (non literal) values - convert the members of the first row into a + # literal+bind combo, with extra positional info in the bind attr hashref. + # This will allow us to match the order properly, and is so contrived + # because a user-supplied literal/bind (or something else specific to a + # resultsource and/or storage driver) can inject extra binds along the + # way, so one can't rely on "shift positions" ordering at all. Also we + # can't just hand SQLA a set of some known "values" (e.g. hashrefs that + # can be later matched up by address), because we want to supply a real + # value on which perhaps e.g. datatype checks will be performed + my ($proto_data, $value_type_idx); + for my $i (@col_range) { + my $colname = $cols->[$i]; + if (ref $data->[0][$i] eq 'SCALAR') { + # no bind value at all - no type + + $proto_data->{$colname} = $data->[0][$i]; + } + elsif (ref $data->[0][$i] eq 'REF' and ref ${$data->[0][$i]} eq 'ARRAY' ) { + # repack, so we don't end up mangling the original \[] + my ($sql, @bind) = @${$data->[0][$i]}; - # the first "row" is used as a point of reference - my $reference_val = $data->[0][$col_idx]; - my $is_literal = ref $reference_val eq 'SCALAR'; - my $is_literal_bind = ( !$is_literal and ( - ref $reference_val eq 'REF' - and - ref $$reference_val eq 'ARRAY' - ) ); - - $dyn_bind_idx->{$col_idx} = 1 - if (!$is_literal and !$is_literal_bind); - - # use a closure for convenience (less to pass) - my $bad_slice = sub { - my ($msg, $slice_idx) = @_; - $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", - $msg, - $cols->[$col_idx], - do { - require Data::Dumper::Concise; - local $Data::Dumper::Maxdepth = 2; - Data::Dumper::Concise::Dumper ({ - map { $cols->[$_] => - $data->[$slice_idx][$_] - } (0 .. $#$cols) - }), - } + # normalization of user supplied stuff + my $resolved_bind = $self->_resolve_bindattrs( + $source, \@bind, $colinfos, ); - }; + + # store value-less (attrs only) bind info - we will be comparing all + # supplied binds against this for sanity + $value_type_idx->{$i} = [ map { $_->[0] } @$resolved_bind ]; + + $proto_data->{$colname} = \[ $sql, map { [ + # inject slice order to use for $proto_bind construction + { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i } + => + $resolved_bind->[$_][1] + ] } (0 .. $#bind) + ]; + } + else { + $value_type_idx->{$i} = 0; + + $proto_data->{$colname} = \[ '?', [ + { dbic_colname => $colname, _bind_data_slice_idx => $i } + => + $data->[0][$i] + ] ]; + } + } + + my ($sql, $proto_bind) = $self->_prep_for_execute ( + 'insert', + $source, + [ $proto_data ], + ); + + if (! @$proto_bind and keys %$value_type_idx) { + # if the bindlist is empty and we had some dynamic binds, this means the + # storage ate them away (e.g. the NoBindVars component) and interpolated + # them directly into the SQL. This obviously can't be good for multi-inserts + $self->throw_exception('Cannot insert_bulk without support for placeholders'); + } + + # sanity checks + # FIXME - devise a flag "no babysitting" or somesuch to shut this off + # + # use an error reporting closure for convenience (less to pass) + my $bad_slice_report_cref = sub { + my ($msg, $r_idx, $c_idx) = @_; + $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", + $msg, + $cols->[$c_idx], + do { + require Data::Dumper::Concise; + local $Data::Dumper::Maxdepth = 5; + Data::Dumper::Concise::Dumper ({ + map { $cols->[$_] => + $data->[$r_idx][$_] + } @col_range + }), + } + ); + }; + + for my $col_idx (@col_range) { + my $reference_val = $data->[0][$col_idx]; for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1 my $val = $data->[$row_idx][$col_idx]; - if ($is_literal) { + if (! exists $value_type_idx->{$col_idx}) { # literal no binds if (ref $val ne 'SCALAR') { - $bad_slice->( + $bad_slice_report_cref->( "Incorrect value (expecting SCALAR-ref \\'$$reference_val')", - $row_idx + $row_idx, + $col_idx, ); } elsif ($$val ne $$reference_val) { - $bad_slice->( + $bad_slice_report_cref->( "Inconsistent literal SQL value (expecting \\'$$reference_val')", - $row_idx + $row_idx, + $col_idx, ); } } - elsif ($is_literal_bind) { + elsif (! $value_type_idx->{$col_idx} ) { # regular non-literal value + if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) { + $bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx); + } + } + else { # binds from a \[], compare type and attrs if (ref $val ne 'REF' or ref $$val ne 'ARRAY') { - $bad_slice->( + $bad_slice_report_cref->( "Incorrect value (expecting ARRAYREF-ref \\['${$reference_val}->[0]', ... ])", - $row_idx + $row_idx, + $col_idx, ); } - elsif (${$val}->[0] ne ${$reference_val}->[0]) { - $bad_slice->( - "Inconsistent literal SQL-bind value (expecting \\['${$reference_val}->[0]', ... ])", - $row_idx - ); - } - } - elsif (ref $val) { - if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) { - $bad_slice->("Literal SQL found where a plain bind value is expected", $row_idx); - } - else { - $bad_slice->("$val reference found where bind expected", $row_idx); + # start drilling down and bail out early on identical refs + elsif ( + $reference_val != $val + or + $$reference_val != $$val + ) { + if (${$val}->[0] ne ${$reference_val}->[0]) { + $bad_slice_report_cref->( + "Inconsistent literal/bind SQL (expecting \\['${$reference_val}->[0]', ... ])", + $row_idx, + $col_idx, + ); + } + # need to check the bind attrs - a bind will happen only once for + # the entire dataset, so any changes further down will be ignored. + elsif (! Data::Compare::Compare( + $value_type_idx->{$col_idx}, + [ + map + { $_->[0] } + @{$self->_resolve_bindattrs( + $source, [ @{$$val}[1 .. $#$$val] ], $colinfos, + )} + ], + )) { + $bad_slice_report_cref->( + 'Differing bind attributes on literal/bind values not supported', + $row_idx, + $col_idx, + ); + } } } } } - # Get the sql with bind values interpolated where necessary. For dynamic - # binds convert the values of the first row into a literal+bind combo, with - # extra positional info in the bind attr hashref. This will allow us to match - # the order properly, and is so contrived because a user-supplied literal - # bind (or something else specific to a resultsource and/or storage driver) - # can inject extra binds along the way, so one can't rely on "shift - # positions" ordering at all. Also we can't just hand SQLA a set of some - # known "values" (e.g. hashrefs that can be later matched up by address), - # because we want to supply a real value on which perhaps e.g. datatype - # checks will be performed - my ($sql, $proto_bind) = $self->_prep_for_execute ( - 'insert', - $source, - [ { map { $cols->[$_] => $dyn_bind_idx->{$_} - ? \[ '?', [ - { dbic_colname => $cols->[$_], _bind_data_slice_idx => $_ } - => - $data->[0][$_] - ] ] - : $data->[0][$_] - } (0..$#$cols) } ], - ); - - if (! @$proto_bind and keys %$dyn_bind_idx) { - # if the bindlist is empty and we had some dynamic binds, this means the - # storage ate them away (e.g. the NoBindVars component) and interpolated - # them directly into the SQL. This obviosly can't be good for multi-inserts - $self->throw_exception('Cannot insert_bulk without support for placeholders'); - } - # neither _dbh_execute_for_fetch, nor _dbh_execute_inserts_with_no_binds # are atomic (even if execute_for_fetch is a single call). Thus a safety # scope guard @@ -1894,7 +2097,7 @@ sub insert_bulk { $guard->commit; - return (wantarray ? ($rv, $sth, @$proto_bind) : $rv); + return wantarray ? ($rv, $sth, @$proto_bind) : $rv; } # execute_for_fetch is capable of returning data just fine (it means it @@ -1921,23 +2124,30 @@ sub _dbh_execute_for_fetch { ) if defined $bind_attrs->[$i]; } - my $data_slice_idx = [ map { - ( - ref $proto_bind->[$_][0] eq 'HASH' - and - exists $proto_bind->[$_][0]{_bind_data_slice_idx} - ) ? $proto_bind->[$_][0]{_bind_data_slice_idx} : undef; - } @idx_range ]; + # At this point $data slots named in the _bind_data_slice_idx of + # each piece of $proto_bind are either \[]s or plain values to be + # passed in. Construct the dispensing coderef. *NOTE* the order + # of $data will differ from this of the ?s in the SQL (due to + # alphabetical ordering by colname). We actually do want to + # preserve this behavior so that prepare_cached has a better + # chance of matching on unrelated calls + my %data_reorder = map { $proto_bind->[$_][0]{_bind_data_slice_idx} => $_ } @idx_range; my $fetch_row_idx = -1; # saner loop this way my $fetch_tuple = sub { return undef if ++$fetch_row_idx > $#$data; - return [ map { - defined $data_slice_idx->[$_] - ? $data->[$fetch_row_idx][$data_slice_idx->[$_]] - : $proto_bind->[$_][1] - } @idx_range ]; + return [ map + { (ref $_ eq 'REF' and ref $$_ eq 'ARRAY') + ? map { $_->[-1] } @{$$_}[1 .. $#$$_] + : $_ + } + map + { $data->[$fetch_row_idx][$_]} + sort + { $data_reorder{$a} <=> $data_reorder{$b} } + keys %data_reorder + ]; }; my $tuple_status = []; @@ -2024,103 +2234,6 @@ sub delete { shift->_execute('delete', @_); } -# We were sent here because the $rs contains a complex search -# which will require a subquery to select the correct rows -# (i.e. joined or limited resultsets, or non-introspectable conditions) -# -# Generating a single PK column subquery is trivial and supported -# by all RDBMS. However if we have a multicolumn PK, things get ugly. -# Look at _multipk_update_delete() -sub _subq_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - - # quick check if we got a sane rs on our hands - my @pcols = $rsrc->_pri_cols; - - my $sel = $rs->_resolved_attrs->{select}; - $sel = [ $sel ] unless ref $sel eq 'ARRAY'; - - if ( - join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols) - ne - join ("\x00", sort @$sel ) - ) { - $self->throw_exception ( - '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys' - ); - } - - if (@pcols == 1) { - return $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - { $pcols[0] => { -in => $rs->as_query } }, - ); - } - - else { - return $self->_multipk_update_delete (@_); - } -} - -# ANSI SQL does not provide a reliable way to perform a multicol-PK -# resultset update/delete involving subqueries. So by default resort -# to simple (and inefficient) delete_all style per-row opearations, -# while allowing specific storages to override this with a faster -# implementation. -# -sub _multipk_update_delete { - return shift->_per_row_update_delete (@_); -} - -# This is the default loop used to delete/update rows for multi PK -# resultsets, and used by mysql exclusively (because it can't do anything -# else). -# -# We do not use $row->$op style queries, because resultset update/delete -# is not expected to cascade (this is what delete_all/update_all is for). -# -# There should be no race conditions as the entire operation is rolled -# in a transaction. -# -sub _per_row_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - my @pcols = $rsrc->_pri_cols; - - my $guard = $self->txn_scope_guard; - - # emulate the return value of $sth->execute for non-selects - my $row_cnt = '0E0'; - - my $subrs_cur = $rs->cursor; - my @all_pk = $subrs_cur->all; - for my $pks ( @all_pk) { - - my $cond; - for my $i (0.. $#pcols) { - $cond->{$pcols[$i]} = $pks->[$i]; - } - - $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - $cond, - ); - - $row_cnt++; - } - - $guard->commit; - - return $row_cnt; -} - sub _select { my $self = shift; $self->_execute($self->_select_args(@_)); @@ -2129,6 +2242,10 @@ sub _select { sub _select_args_to_query { my $self = shift; + $self->throw_exception( + "Unable to generate limited query representation with 'software_limit' enabled" + ) if ($_[3]->{software_limit} and ($_[3]->{offset} or $_[3]->{rows}) ); + # my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); my ($op, $ident, @args) = @@ -2204,7 +2321,15 @@ sub _select_args { } # try to simplify the joinmap further (prune unreferenced type-single joins) - $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs); + if ( + ref $ident + and + reftype $ident eq 'ARRAY' + and + @$ident != 1 + ) { + $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs); + } ### # This would be the point to deflate anything found in $where @@ -2761,18 +2886,12 @@ sub deployment_statements { data => $schema, ); - my @ret; - if (wantarray) { - @ret = $tr->translate; - } - else { - $ret[0] = $tr->translate; - } - - $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error) - unless (@ret && defined $ret[0]); - - return wantarray ? @ret : $ret[0]; + return preserve_context { + $tr->translate + } after => sub { + $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error) + unless defined $_[0]; + }; } # FIXME deploy() currently does not accurately report sql errors @@ -2963,6 +3082,13 @@ sub _is_text_lob_type { |national\s*character\s*varying))\z/xi); } +# Determine if a data_type is some type of a binary type +sub _is_binary_type { + my ($self, $data_type) = @_; + $data_type && ($self->_is_binary_lob_type($data_type) + || $data_type =~ /(?:var)?(?:binary|bit|graphic)(?:\s*varying)?/i); +} + 1; =head1 USAGE NOTES @@ -2984,11 +3110,9 @@ cases if you choose the C<< AutoCommit => 0 >> path, just as you would be with raw DBI. -=head1 AUTHORS - -Matt S. Trout +=head1 AUTHOR AND CONTRIBUTORS -Andy Grundman +See L and L in DBIx::Class =head1 LICENSE