X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=71c57daf7b1cae22710459922469065bec270df7;hb=87b1255103d7b8873b225416cb381c50011f4c06;hp=1288c4785f687532192d98ffe667ff97a47b2bc7;hpb=e92e86369d8d655c5d62940e9cbd4ed93e4c7985;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 1288c47..71c57da 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -7,24 +7,38 @@ use warnings; use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/; use mro 'c3'; -use Carp::Clan qw/^DBIx::Class/; -use DBI; -use DBIx::Class::Storage::DBI::Cursor; -use DBIx::Class::Storage::Statistics; -use Scalar::Util(); -use List::Util(); -use Data::Dumper::Concise(); -use Sub::Name (); - -# what version of sqlt do we require if deploy() without a ddl_dir is invoked -# when changing also adjust the corresponding author_require in Makefile.PL -my $minimum_sqlt_version = '0.11002'; - - -__PACKAGE__->mk_group_accessors('simple' => - qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid - _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/ +use DBIx::Class::Carp; +use Scalar::Util qw/refaddr weaken reftype blessed/; +use Context::Preserve 'preserve_context'; +use Try::Tiny; +use SQL::Abstract qw(is_plain_value is_literal_value); +use DBIx::Class::_Util qw( + quote_sub perlstring serialize dump_value + dbic_internal_try + detected_reinvoked_destructor scope_guard + mkdir_p ); +use namespace::clean; + +# default cursor class, overridable in connect_info attributes +__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); + +__PACKAGE__->mk_group_accessors('inherited' => qw/ + sql_limit_dialect sql_quote_char sql_name_sep +/); + +__PACKAGE__->mk_group_accessors('component_class' => qw/sql_maker_class datetime_parser_type/); + +__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker'); +__PACKAGE__->datetime_parser_type('DateTime::Format::MySQL'); # historic default + +__PACKAGE__->sql_name_sep('.'); + +__PACKAGE__->mk_group_accessors('simple' => qw/ + _connect_info _dbic_connect_attributes _driver_determined + _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit + _perform_autoinc_retrieval _autoinc_supplied_for_op +/); # the values for these accessors are picked out (and deleted) from # the attribute hashref passed to connect_info @@ -35,44 +49,115 @@ my @storage_options = qw/ __PACKAGE__->mk_group_accessors('simple' => @storage_options); -# default cursor class, overridable in connect_info attributes -__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); +# capability definitions, using a 2-tiered accessor system +# The rationale is: +# +# A driver/user may define _use_X, which blindly without any checks says: +# "(do not) use this capability", (use_dbms_capability is an "inherited" +# type accessor) +# +# If _use_X is undef, _supports_X is then queried. This is a "simple" style +# accessor, which in turn calls _determine_supports_X, and stores the return +# in a special slot on the storage object, which is wiped every time a $dbh +# reconnection takes place (it is not guaranteed that upon reconnection we +# will get the same rdbms version). _determine_supports_X does not need to +# exist on a driver, as we ->can for it before calling. -__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/); -__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks'); +my @capabilities = (qw/ + insert_returning + insert_returning_bound + multicolumn_in + + placeholders + typeless_placeholders + + join_optimizer +/); +__PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities ); +__PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) ); + +# on by default, not strictly a capability (pending rewrite) +__PACKAGE__->_use_join_optimizer (1); +sub _determine_supports_join_optimizer { 1 }; # Each of these methods need _determine_driver called before itself -# in order to function reliably. This is a purely DRY optimization -my @rdbms_specific_methods = qw/ +# in order to function reliably. We also need to separate accessors +# from plain old method calls, since an accessor called as a setter +# does *not* need the driver determination loop fired (and in fact +# can produce hard to find bugs, like e.g. losing on_connect_* +# semantics on fresh connections) +# +# The construct below is simply a parameterized around() +my $storage_accessor_idx = { map { $_ => 1 } qw( sqlt_type - build_datetime_parser datetime_parser_type + sql_maker + cursor_class +)}; +for my $meth (keys %$storage_accessor_idx, qw( + deployment_statements + + build_datetime_parser + + txn_begin + insert - insert_bulk update delete select select_single -/; -for my $meth (@rdbms_specific_methods) { + _insert_bulk - my $orig = __PACKAGE__->can ($meth) - or next; + with_deferred_fk_checks + + get_use_dbms_capability + get_dbms_capability - no strict qw/refs/; - no warnings qw/redefine/; - *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub { - if (not $_[0]->_driver_determined) { + _server_info + _get_server_version +)) { + + my $orig = __PACKAGE__->can ($meth) + or die "$meth is not a ::Storage::DBI method!"; + + my $possibly_a_setter = $storage_accessor_idx->{$meth} ? 1 : 0; + + quote_sub + __PACKAGE__ ."::$meth", sprintf( <<'EOC', $possibly_a_setter, perlstring $meth ), { '$orig' => \$orig }; + + if ( + # if this is an actual *setter* - just set it, no need to connect + # and determine the driver + !( %1$s and @_ > 1 ) + and + # only fire when invoked on an instance, a valid class-based invocation + # would e.g. be setting a default for an inherited accessor + ref $_[0] + and + ! $_[0]->{_driver_determined} + and + ! $_[0]->{_in_determine_driver} + and + # Only try to determine stuff if we have *something* that either is or can + # provide a DSN. Allows for bare $schema's generated with a plain ->connect() + # to still be marginally useful + $_[0]->_dbi_connect_info->[0] + ) { $_[0]->_determine_driver; - goto $_[0]->can($meth); + + # work around http://rt.perl.org/rt3/Public/Bug/Display.html?id=35878 + goto $_[0]->can(%2$s) unless DBIx::Class::_ENV_::BROKEN_GOTO; + + my $cref = $_[0]->can(%2$s); + goto $cref; } - $orig->(@_); - }; -} + goto $orig; +EOC +} =head1 NAME @@ -93,7 +178,7 @@ DBIx::Class::Storage::DBI - DBI storage handler ); $schema->resultset('Book')->search({ - written_on => $schema->storage->datetime_parser(DateTime->now) + written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now) }); =head1 DESCRIPTION @@ -109,15 +194,120 @@ documents DBI-specific methods and behaviors. sub new { my $new = shift->next::method(@_); - $new->transaction_depth(0); $new->_sql_maker_opts({}); - $new->{savepoints} = []; - $new->{_in_dbh_do} = 0; - $new->{_dbh_gen} = 0; + $new->_dbh_details({}); + $new->{_in_do_block} = 0; + + # read below to see what this does + $new->_arm_global_destructor; $new; } +# This is hack to work around perl shooting stuff in random +# order on exit(). If we do not walk the remaining storage +# objects in an END block, there is a *small but real* chance +# of a fork()ed child to kill the parent's shared DBI handle, +# *before perl reaches the DESTROY in this package* +# Yes, it is ugly and effective. +# Additionally this registry is used by the CLONE method to +# make sure no handles are shared between threads +{ + my %seek_and_destroy; + + sub _arm_global_destructor { + + # quick "garbage collection" pass - prevents the registry + # from slowly growing with a bunch of undef-valued keys + defined $seek_and_destroy{$_} or delete $seek_and_destroy{$_} + for keys %seek_and_destroy; + + weaken ( + $seek_and_destroy{ refaddr($_[0]) } = $_[0] + ); + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; + } + + END { + + if( + ! DBIx::Class::_ENV_::BROKEN_FORK + and + my @instances = grep { defined $_ } values %seek_and_destroy + ) { + local $?; # just in case the DBI destructor changes it somehow + + # disarm the handle if not native to this process (see comment on top) + $_->_verify_pid for @instances; + } + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; + } + + sub CLONE { + # As per DBI's recommendation, DBIC disconnects all handles as + # soon as possible (DBIC will reconnect only on demand from within + # the thread) + my @instances = grep { defined $_ } values %seek_and_destroy; + %seek_and_destroy = (); + + for (@instances) { + $_->_dbh(undef); + $_->disconnect; + + # properly renumber existing refs + $_->_arm_global_destructor + } + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; + } +} + +sub DESTROY { + return if &detected_reinvoked_destructor; + + $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; + + # some databases spew warnings on implicit disconnect + return unless defined $_[0]->_dbh; + + local $SIG{__WARN__} = sub {}; + $_[0]->_dbh(undef); + # not calling ->disconnect here - we are being destroyed - nothing to reset + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; +} + +# handle pid changes correctly - do not destroy parent's connection +sub _verify_pid { + + my $pid = $_[0]->_conn_pid; + + if( defined $pid and $pid != $$ and my $dbh = $_[0]->_dbh ) { + $dbh->{InactiveDestroy} = 1; + $_[0]->_dbh(undef); + $_[0]->disconnect; + } + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; +} + =head2 connect_info This method is normally called by L, which @@ -192,10 +382,10 @@ for most DBDs. See L for details. =head3 DBIx::Class specific connection attributes -In addition to the standard L -L attributes, DBIx::Class recognizes +In addition to the standard L +L attributes, DBIx::Class recognizes the following connection options. These options can be mixed in with your other -L connection attributes, or placed in a seperate hashref +L connection attributes, or placed in a separate hashref (C<\%extra_attributes>) as shown above. Every time C is invoked, any previous settings for @@ -328,14 +518,19 @@ statement handles via L. =item limit_dialect -Sets the limit dialect. This is useful for JDBC-bridge among others -where the remote SQL-dialect cannot be determined by the name of the -driver alone. See also L. +Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the +default L setting of the storage (if any). For a list +of available limit dialects see L. + +=item quote_names + +When true automatically sets L and L to the characters +appropriate for your particular RDBMS. This option is preferred over specifying +L directly. =item quote_char -Specifies what characters to use to quote table and column names. If -you use this you will want to specify L as well. +Specifies what characters to use to quote table and column names. C expects either a single character, in which case is it is placed on either side of the table/column name, or an arrayref of length @@ -346,14 +541,9 @@ SQL Server you should use C<< quote_char => [qw/[ ]/] >>. =item name_sep -This only needs to be used in conjunction with C, and is used to -specify the charecter that seperates elements (schemas, tables, columns) from -each other. In most cases this is simply a C<.>. - -The consequences of not supplying this value is that L -will assume DBIx::Class' uses of aliases to be complete column -names. The output will look like I<"me.name"> when it should actually -be I<"me"."name">. +This parameter is only useful in conjunction with C, and is used to +specify the character that separates elements (schemas, tables, columns) from +each other. If unspecified it defaults to the most commonly used C<.>. =item unsafe @@ -406,7 +596,7 @@ L 'postgres', 'my_pg_password', { AutoCommit => 1 }, - { quote_char => q{"}, name_sep => q{.} }, + { quote_char => q{"} }, ] ); @@ -451,13 +641,90 @@ L =cut sub connect_info { - my ($self, $info_arg) = @_; + my ($self, $info) = @_; - return $self->_connect_info if !$info_arg; + return $self->_connect_info if !$info; + + $self->_connect_info($info); # copy for _connect_info + + $info = $self->_normalize_connect_info($info) + if ref $info eq 'ARRAY'; + + my %attrs = ( + %{ $self->_default_dbi_connect_attributes || {} }, + %{ $info->{attributes} || {} }, + ); + + my @args = @{ $info->{arguments} }; + + if (keys %attrs and ref $args[0] ne 'CODE') { + carp_unique ( + 'You provided explicit AutoCommit => 0 in your connection_info. ' + . 'This is almost universally a bad idea (see the footnotes of ' + . 'DBIx::Class::Storage::DBI for more info). If you still want to ' + . 'do this you can set $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK} to disable ' + . 'this warning.' + ) if ! $attrs{AutoCommit} and ! $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK}; + + push @args, \%attrs if keys %attrs; + } + + # this is the authoritative "always an arrayref" thing fed to DBI->connect + # OR a single-element coderef-based $dbh factory + $self->_dbi_connect_info(\@args); + + # extract the individual storage options + for my $storage_opt (keys %{ $info->{storage_options} }) { + my $value = $info->{storage_options}{$storage_opt}; + + $self->$storage_opt($value); + } + + # Extract the individual sqlmaker options + # + # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only + # the new set of options + $self->_sql_maker(undef); + $self->_sql_maker_opts({}); + + for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) { + my $value = $info->{sql_maker_options}{$sql_maker_opt}; + + $self->_sql_maker_opts->{$sql_maker_opt} = $value; + } + + # FIXME - dirty: + # save attributes in a separate accessor so they are always + # introspectable, even in case of a CODE $dbhmaker + $self->_dbic_connect_attributes (\%attrs); + + return $self->_connect_info; +} + +sub _dbi_connect_info { + my $self = shift; + + return $self->{_dbi_connect_info} = $_[0] + if @_; + + my $conninfo = $self->{_dbi_connect_info} || []; + + # last ditch effort to grab a DSN + if ( ! defined $conninfo->[0] and $ENV{DBI_DSN} ) { + my @new_conninfo = @$conninfo; + $new_conninfo[0] = $ENV{DBI_DSN}; + $conninfo = \@new_conninfo; + } + + return $conninfo; +} - my @args = @$info_arg; # take a shallow copy for further mutilation - $self->_connect_info([@args]); # copy for _connect_info +sub _normalize_connect_info { + my ($self, $info_arg) = @_; + my %info; + + my @args = @$info_arg; # take a shallow copy for further mutilation # combine/pre-parse arguments depending on invocation style @@ -494,43 +761,31 @@ sub connect_info { @args = @args[0,1,2]; } - # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only - # the new set of options - $self->_sql_maker(undef); - $self->_sql_maker_opts({}); + $info{arguments} = \@args; - if(keys %attrs) { - for my $storage_opt (@storage_options, 'cursor_class') { # @storage_options is declared at the top of the module - if(my $value = delete $attrs{$storage_opt}) { - $self->$storage_opt($value); - } - } - for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) { - if(my $opt_val = delete $attrs{$sql_maker_opt}) { - $self->_sql_maker_opts->{$sql_maker_opt} = $opt_val; - } - } - } + my @storage_opts = grep exists $attrs{$_}, + @storage_options, 'cursor_class'; - if (ref $args[0] eq 'CODE') { - # _connect() never looks past $args[0] in this case - %attrs = () - } else { - %attrs = ( - %{ $self->_default_dbi_connect_attributes || {} }, - %attrs, - ); - } + @{ $info{storage_options} }{@storage_opts} = + delete @attrs{@storage_opts} if @storage_opts; - $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]); - $self->_connect_info; + my @sql_maker_opts = grep exists $attrs{$_}, + qw/limit_dialect quote_char name_sep quote_names/; + + @{ $info{sql_maker_options} }{@sql_maker_opts} = + delete @attrs{@sql_maker_opts} if @sql_maker_opts; + + $info{attributes} = \%attrs if %attrs; + + return \%info; } -sub _default_dbi_connect_attributes { - return { +sub _default_dbi_connect_attributes () { + +{ AutoCommit => 1, - RaiseError => 1, PrintError => 0, + RaiseError => 1, + ShowErrorStatement => 1, }; } @@ -600,106 +855,37 @@ Example: sub dbh_do { my $self = shift; - my $code = shift; - - my $dbh = $self->_get_dbh; - - return $self->$code($dbh, @_) if $self->{_in_dbh_do} - || $self->{transaction_depth}; - - local $self->{_in_dbh_do} = 1; + my $run_target = shift; # either a coderef or a method name - my @result; - my $want_array = wantarray; - - eval { - - if($want_array) { - @result = $self->$code($dbh, @_); - } - elsif(defined $want_array) { - $result[0] = $self->$code($dbh, @_); - } - else { - $self->$code($dbh, @_); - } - }; - - # ->connected might unset $@ - copy - my $exception = $@; - if(!$exception) { return $want_array ? @result : $result[0] } - - $self->throw_exception($exception) if $self->connected; - - # We were not connected - reconnect and retry, but let any - # exception fall right through this time - carp "Retrying $code after catching disconnected exception: $exception" - if $ENV{DBIC_DBIRETRY_DEBUG}; - $self->_populate_dbh; - $self->$code($self->_dbh, @_); + # short circuit when we know there is no need for a runner + # + # FIXME - assumption may be wrong + # the rationale for the txn_depth check is that if this block is a part + # of a larger transaction, everything up to that point is screwed anyway + return $self->$run_target($self->_get_dbh, @_) + if $self->{_in_do_block} or $self->transaction_depth; + + # take a ref instead of a copy, to preserve @_ aliasing + # semantics within the coderef, but only if needed + # (pseudoforking doesn't like this trick much) + my $args = @_ ? \@_ : []; + + DBIx::Class::Storage::BlockRunner->new( + storage => $self, + wrap_txn => 0, + retry_handler => sub { + $_[0]->failed_attempt_count == 1 + and + ! $_[0]->storage->connected + }, + )->run(sub { + $self->$run_target ($self->_get_dbh, @$args ) + }); } -# This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do. -# It also informs dbh_do to bypass itself while under the direction of txn_do, -# via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc) sub txn_do { - my $self = shift; - my $coderef = shift; - - ref $coderef eq 'CODE' or $self->throw_exception - ('$coderef must be a CODE reference'); - - return $coderef->(@_) if $self->{transaction_depth} && ! $self->auto_savepoint; - - local $self->{_in_dbh_do} = 1; - - my @result; - my $want_array = wantarray; - - my $tried = 0; - while(1) { - eval { - $self->_get_dbh; - - $self->txn_begin; - if($want_array) { - @result = $coderef->(@_); - } - elsif(defined $want_array) { - $result[0] = $coderef->(@_); - } - else { - $coderef->(@_); - } - $self->txn_commit; - }; - - # ->connected might unset $@ - copy - my $exception = $@; - if(!$exception) { return $want_array ? @result : $result[0] } - - if($tried++ || $self->connected) { - eval { $self->txn_rollback }; - my $rollback_exception = $@; - if($rollback_exception) { - my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; - $self->throw_exception($exception) # propagate nested rollback - if $rollback_exception =~ /$exception_class/; - - $self->throw_exception( - "Transaction aborted: ${exception}. " - . "Rollback failed: ${rollback_exception}" - ); - } - $self->throw_exception($exception) - } - - # We were not connected, and was first try - reconnect and retry - # via the while loop - carp "Retrying $coderef after catching disconnected exception: $exception" - if $ENV{DBIC_DBIRETRY_DEBUG}; - $self->_populate_dbh; - } + $_[0]->_get_dbh; # connects or reconnects on pid change, necessary to grab correct txn_depth + shift->next::method(@_); } =head2 disconnect @@ -710,22 +896,41 @@ database is not in C mode. =cut sub disconnect { - my ($self) = @_; - - if( $self->_dbh ) { - my @actions; + my $self = shift; - push @actions, ( $self->on_disconnect_call || () ); - push @actions, $self->_parse_connect_do ('on_disconnect_do'); + # this physical disconnect below might very well throw + # in order to unambiguously reset the state - do the cleanup in guard - $self->_do_connection_actions(disconnect_call_ => $_) for @actions; + my $g = scope_guard { - $self->_dbh_rollback unless $self->_dbh_autocommit; + defined( $self->_dbh ) + and dbic_internal_try { $self->_dbh->disconnect }; - $self->_dbh->disconnect; $self->_dbh(undef); - $self->{_dbh_gen}++; + $self->_dbh_details({}); + $self->transaction_depth(undef); + $self->_dbh_autocommit(undef); + $self->savepoints([]); + + # FIXME - this needs reenabling with the proper "no reset on same DSN" check + #$self->_sql_maker(undef); # this may also end up being different + }; + + if( $self->_dbh ) { + + $self->_do_connection_actions(disconnect_call_ => $_) for ( + ( $self->on_disconnect_call || () ), + $self->_parse_connect_do ('on_disconnect_do') + ); + + # stops the "implicit rollback on disconnect" warning + $self->_exec_txn_rollback unless $self->_dbh_autocommit; } + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; } =head2 with_deferred_fk_checks @@ -745,8 +950,8 @@ in MySQL's case disabled entirely. # Storage subclasses should override this sub with_deferred_fk_checks { - my ($self, $sub) = @_; - $sub->(); + #my ($self, $sub) = @_; + $_[1]->(); } =head2 connected @@ -759,69 +964,41 @@ sub with_deferred_fk_checks { =back -Verifies that the the current database handle is active and ready to execute -an SQL statement (i.e. the connection did not get stale, server is still +Verifies that the current database handle is active and ready to execute +an SQL statement (e.g. the connection did not get stale, server is still answering, etc.) This method is used internally by L. =cut sub connected { - my $self = shift; - return 0 unless $self->_seems_connected; + return 0 unless $_[0]->_seems_connected; #be on the safe side - local $self->_dbh->{RaiseError} = 1; + local $_[0]->_dbh->{RaiseError} = 1; - return $self->_ping; + return $_[0]->_ping; } sub _seems_connected { - my $self = shift; + $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; - my $dbh = $self->_dbh - or return 0; - - if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) { - $self->_dbh(undef); - $self->{_dbh_gen}++; - return 0; - } - else { - $self->_verify_pid; - return 0 if !$self->_dbh; - } + $_[0]->_dbh + and + $_[0]->_dbh->FETCH('Active') + and + return 1; - return $dbh->FETCH('Active'); + # explicitly reset all state + $_[0]->disconnect; + return 0; } sub _ping { - my $self = shift; - - my $dbh = $self->_dbh or return 0; - - return $dbh->ping; -} - -# handle pid changes correctly -# NOTE: assumes $self->_dbh is a valid $dbh -sub _verify_pid { - my ($self) = @_; - - return if defined $self->_conn_pid && $self->_conn_pid == $$; - - $self->_dbh->{InactiveDestroy} = 1; - $self->_dbh(undef); - $self->{_dbh_gen}++; - - return; + ($_[0]->_dbh || return 0)->ping; } sub ensure_connected { - my ($self) = @_; - - unless ($self->connected) { - $self->_populate_dbh; - } + $_[0]->connected || ( $_[0]->_populate_dbh && 1 ); } =head2 dbh @@ -829,47 +1006,82 @@ sub ensure_connected { Returns a C<$dbh> - a data base handle of class L. The returned handle is guaranteed to be healthy by implicitly calling L, and if necessary performing a reconnection before returning. Keep in mind that this -is very B on some database engines. Consider using L +is very B on some database engines. Consider using L instead. =cut sub dbh { - my ($self) = @_; - - if (not $self->_dbh) { - $self->_populate_dbh; - } else { - $self->ensure_connected; - } - return $self->_dbh; + # maybe save a ping call + $_[0]->_dbh + ? ( $_[0]->ensure_connected and $_[0]->_dbh ) + : $_[0]->_populate_dbh + ; } # this is the internal "get dbh or connect (don't check)" method sub _get_dbh { - my $self = shift; - $self->_verify_pid if $self->_dbh; - $self->_populate_dbh unless $self->_dbh; - return $self->_dbh; + $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; + $_[0]->_dbh || $_[0]->_populate_dbh; } -sub _sql_maker_args { - my ($self) = @_; +# *DELIBERATELY* not a setter (for the time being) +# Too intertwined with everything else for any kind of sanity +sub sql_maker { + my $self = shift; - return ( - bindtype=>'columns', - array_datatypes => 1, - limit_dialect => $self->_get_dbh, - %{$self->_sql_maker_opts} - ); -} + $self->throw_exception('sql_maker() is not a setter method') if @_; -sub sql_maker { - my ($self) = @_; unless ($self->_sql_maker) { my $sql_maker_class = $self->sql_maker_class; - $self->ensure_class_loaded ($sql_maker_class); - $self->_sql_maker($sql_maker_class->new( $self->_sql_maker_args )); + + my %opts = %{$self->_sql_maker_opts||{}}; + my $dialect = + $opts{limit_dialect} + || + $self->sql_limit_dialect + || + do { + my $s_class = (ref $self) || $self; + carp_unique ( + "Your storage class ($s_class) does not set sql_limit_dialect and you " + . 'have not supplied an explicit limit_dialect in your connection_info. ' + . 'DBIC will attempt to use the GenericSubQ dialect, which works on most ' + . 'databases but can be (and often is) painfully slow. ' + . "Please file an RT ticket against '$s_class'" + ) if $self->_dbi_connect_info->[0]; + + 'GenericSubQ'; + } + ; + + my ($quote_char, $name_sep); + + if ($opts{quote_names}) { + $quote_char = (delete $opts{quote_char}) || $self->sql_quote_char || do { + my $s_class = (ref $self) || $self; + carp_unique ( + "You requested 'quote_names' but your storage class ($s_class) does " + . 'not explicitly define a default sql_quote_char and you have not ' + . 'supplied a quote_char as part of your connection_info. DBIC will ' + .q{default to the ANSI SQL standard quote '"', which works most of } + . "the time. Please file an RT ticket against '$s_class'." + ); + + '"'; # RV + }; + + $name_sep = (delete $opts{name_sep}) || $self->sql_name_sep; + } + + $self->_sql_maker($sql_maker_class->new( + bindtype=>'columns', + array_datatypes => 1, + limit_dialect => $dialect, + ($quote_char ? (quote_char => $quote_char) : ()), + name_sep => ($name_sep || '.'), + %opts, + )); } return $self->_sql_maker; } @@ -879,32 +1091,212 @@ sub _rebless {} sub _init {} sub _populate_dbh { - my ($self) = @_; - my @info = @{$self->_dbi_connect_info || []}; - $self->_dbh(undef); # in case ->connected failed we might get sent here - $self->_dbh($self->_connect(@info)); + # reset internal states + # also in case ->connected failed we might get sent here + $_[0]->disconnect; - $self->_conn_pid($$); - $self->_conn_tid(threads->tid) if $INC{'threads.pm'}; + $_[0]->_dbh($_[0]->_connect); - $self->_determine_driver; + $_[0]->_conn_pid($$) unless DBIx::Class::_ENV_::BROKEN_FORK; # on win32 these are in fact threads + + $_[0]->_determine_driver; # Always set the transaction depth on connect, since # there is no transaction in progress by definition - $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; + $_[0]->transaction_depth( $_[0]->_dbh_autocommit ? 0 : 1 ); + + $_[0]->_run_connection_actions unless $_[0]->{_in_determine_driver}; - $self->_run_connection_actions unless $self->{_in_determine_driver}; + $_[0]->_dbh; } sub _run_connection_actions { + + $_[0]->_do_connection_actions(connect_call_ => $_) for ( + ( $_[0]->on_connect_call || () ), + $_[0]->_parse_connect_do ('on_connect_do'), + ); +} + + + +sub set_use_dbms_capability { + $_[0]->set_inherited ($_[1], $_[2]); +} + +sub get_use_dbms_capability { + my ($self, $capname) = @_; + + my $use = $self->get_inherited ($capname); + return defined $use + ? $use + : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) } + ; +} + +sub set_dbms_capability { + $_[0]->_dbh_details->{capability}{$_[1]} = $_[2]; +} + +sub get_dbms_capability { + my ($self, $capname) = @_; + + my $cap = $self->_dbh_details->{capability}{$capname}; + + unless (defined $cap) { + if (my $meth = $self->can ("_determine$capname")) { + $cap = $self->$meth ? 1 : 0; + } + else { + $cap = 0; + } + + $self->set_dbms_capability ($capname, $cap); + } + + return $cap; +} + +sub _server_info { my $self = shift; - my @actions; - push @actions, ( $self->on_connect_call || () ); - push @actions, $self->_parse_connect_do ('on_connect_do'); + # FIXME - ideally this needs to be an ||= assignment, and the final + # assignment at the end of this do{} should be gone entirely. However + # this confuses CXSA: https://rt.cpan.org/Ticket/Display.html?id=103296 + $self->_dbh_details->{info} || do { + + # this guarantees that problematic conninfo won't be hidden + # by the try{} below + $self->ensure_connected; + + my $info = {}; + + my $server_version = dbic_internal_try { + $self->_get_server_version + } catch { + # driver determination *may* use this codepath + # in which case we must rethrow + $self->throw_exception($_) if $self->{_in_determine_driver}; + + # $server_version on failure + undef; + }; + + if (defined $server_version) { + $info->{dbms_version} = $server_version; + + my ($numeric_version) = $server_version =~ /^([\d\.]+)/; + my @verparts = split (/\./, $numeric_version); + if ( + @verparts + && + $verparts[0] <= 999 + ) { + # consider only up to 3 version parts, iff not more than 3 digits + my @use_parts; + while (@verparts && @use_parts < 3) { + my $p = shift @verparts; + last if $p > 999; + push @use_parts, $p; + } + push @use_parts, 0 while @use_parts < 3; - $self->_do_connection_actions(connect_call_ => $_) for @actions; + $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts; + } + } + + $self->_dbh_details->{info} = $info; + }; +} + +sub _get_server_version { + shift->_dbh_get_info('SQL_DBMS_VER'); +} + +sub _dbh_get_info { + my ($self, $info) = @_; + + if ($info =~ /[^0-9]/) { + require DBI::Const::GetInfoType; + $info = $DBI::Const::GetInfoType::GetInfoType{$info}; + $self->throw_exception("Info type '$_[1]' not provided by DBI::Const::GetInfoType") + unless defined $info; + } + + $self->_get_dbh->get_info($info); +} + +sub _describe_connection { + require DBI::Const::GetInfoReturn; + + my $self = shift; + + my $drv; + dbic_internal_try { + $drv = $self->_extract_driver_from_connect_info; + $self->ensure_connected; + }; + + $drv = "DBD::$drv" if $drv; + + my $res = { + DBIC_DSN => $self->_dbi_connect_info->[0], + DBI_VER => DBI->VERSION, + DBIC_VER => DBIx::Class->VERSION, + DBIC_DRIVER => ref $self, + $drv ? ( + DBD => $drv, + DBD_VER => dbic_internal_try { $drv->VERSION }, + ) : (), + }; + + # try to grab data even if we never managed to connect + # will cover us in cases of an oddly broken half-connect + for my $inf ( + #keys %DBI::Const::GetInfoType::GetInfoType, + qw/ + SQL_CURSOR_COMMIT_BEHAVIOR + SQL_CURSOR_ROLLBACK_BEHAVIOR + SQL_CURSOR_SENSITIVITY + SQL_DATA_SOURCE_NAME + SQL_DBMS_NAME + SQL_DBMS_VER + SQL_DEFAULT_TXN_ISOLATION + SQL_DM_VER + SQL_DRIVER_NAME + SQL_DRIVER_ODBC_VER + SQL_DRIVER_VER + SQL_EXPRESSIONS_IN_ORDERBY + SQL_GROUP_BY + SQL_IDENTIFIER_CASE + SQL_IDENTIFIER_QUOTE_CHAR + SQL_MAX_CATALOG_NAME_LEN + SQL_MAX_COLUMN_NAME_LEN + SQL_MAX_IDENTIFIER_LEN + SQL_MAX_TABLE_NAME_LEN + SQL_MULTIPLE_ACTIVE_TXN + SQL_MULT_RESULT_SETS + SQL_NEED_LONG_DATA_LEN + SQL_NON_NULLABLE_COLUMNS + SQL_ODBC_VER + SQL_QUALIFIER_NAME_SEPARATOR + SQL_QUOTED_IDENTIFIER_CASE + SQL_TXN_CAPABLE + SQL_TXN_ISOLATION_OPTION + / + ) { + # some drivers barf on things they do not know about instead + # of returning undef + my $v = dbic_internal_try { $self->_dbh_get_info($inf) }; + next unless defined $v; + + #my $key = sprintf( '%s(%s)', $inf, $DBI::Const::GetInfoType::GetInfoType{$inf} ); + my $expl = DBI::Const::GetInfoReturn::Explain($inf, $v); + $res->{$inf} = DBI::Const::GetInfoReturn::Format($inf, $v) . ( $expl ? " ($expl)" : '' ); + } + + $res; } sub _determine_driver { @@ -919,30 +1311,46 @@ sub _determine_driver { if ($self->_dbh) { # we are connected $driver = $self->_dbh->{Driver}{Name}; $started_connected = 1; - } else { - # if connect_info is a CODEREF, we have no choice but to connect - if (ref $self->_dbi_connect_info->[0] && - Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') { - $self->_populate_dbh; - $driver = $self->_dbh->{Driver}{Name}; + } + else { + $driver = $self->_extract_driver_from_connect_info; + } + + if ($driver) { + my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; + if ($self->load_optional_class($storage_class)) { + mro::set_mro($storage_class, 'c3'); + bless $self, $storage_class; + $self->_rebless(); } else { - # try to use dsn to not require being connected, the driver may still - # force a connection in _rebless to determine version - ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; + $self->_warn_undetermined_driver( + 'This version of DBIC does not yet seem to supply a driver for ' + . "your particular RDBMS and/or connection method ('$driver')." + ); } } - - my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; - if ($self->load_optional_class($storage_class)) { - mro::set_mro($storage_class, 'c3'); - bless $self, $storage_class; - $self->_rebless(); + else { + $self->_warn_undetermined_driver( + 'Unable to extract a driver name from connect info - this ' + . 'should not have happened.' + ); } } $self->_driver_determined(1); + Class::C3->reinitialize() if DBIx::Class::_ENV_::OLD_MRO; + + if ($self->can('source_bind_attributes')) { + $self->throw_exception( + "Your storage subclass @{[ ref $self ]} provides (or inherits) the method " + . 'source_bind_attributes() for which support has been removed as of Jan 2013. ' + . 'If you are not sure how to proceed please contact the development team via ' + . DBIx::Class::_ENV_::HELP_URL + ); + } + $self->_init; # run driver-specific initializations $self->_run_connection_actions @@ -950,25 +1358,118 @@ sub _determine_driver { } } +sub _extract_driver_from_connect_info { + my $self = shift; + + my $drv; + + # if connect_info is a CODEREF, we have no choice but to connect + if ( + ref $self->_dbi_connect_info->[0] + and + reftype $self->_dbi_connect_info->[0] eq 'CODE' + ) { + $self->_populate_dbh; + $drv = $self->_dbh->{Driver}{Name}; + } + else { + # try to use dsn to not require being connected, the driver may still + # force a connection later in _rebless to determine version + # (dsn may not be supplied at all if all we do is make a mock-schema) + # + # Use the same regex as the one used by DBI itself (even if the use of + # \w is odd given unicode): + # https://metacpan.org/source/TIMB/DBI-1.634/DBI.pm#L621 + # + # DO NOT use https://metacpan.org/source/TIMB/DBI-1.634/DBI.pm#L559-566 + # as there is a long-standing precedent of not loading DBI.pm until the + # very moment we are actually connecting + # + ($drv) = ($self->_dbi_connect_info->[0] || '') =~ /^dbi:(\w*)/i; + $drv ||= $ENV{DBI_DRIVER}; + } + + return $drv; +} + +sub _determine_connector_driver { + my ($self, $conn) = @_; + + my $dbtype = $self->_get_rdbms_name; + + if (not $dbtype) { + $self->_warn_undetermined_driver( + 'Unable to retrieve RDBMS type (SQL_DBMS_NAME) of the engine behind your ' + . "$conn connector - this should not have happened." + ); + return; + } + + $dbtype =~ s/\W/_/gi; + + my $subclass = "DBIx::Class::Storage::DBI::${conn}::${dbtype}"; + return if $self->isa($subclass); + + if ($self->load_optional_class($subclass)) { + bless $self, $subclass; + $self->_rebless; + } + else { + $self->_warn_undetermined_driver( + 'This version of DBIC does not yet seem to supply a driver for ' + . "your particular RDBMS and/or connection method ('$conn/$dbtype')." + ); + } +} + +sub _get_rdbms_name { shift->_dbh_get_info('SQL_DBMS_NAME') } + +sub _warn_undetermined_driver { + my ($self, $msg) = @_; + + carp_once ($msg . ' While we will attempt to continue anyway, the results ' + . 'are likely to be underwhelming. Please upgrade DBIC, and if this message ' + . "does not go away, file a bugreport including the following info:\n" + . dump_value $self->_describe_connection + ); +} + sub _do_connection_actions { - my $self = shift; - my $method_prefix = shift; - my $call = shift; - - if (not ref($call)) { - my $method = $method_prefix . $call; - $self->$method(@_); - } elsif (ref($call) eq 'CODE') { - $self->$call(@_); - } elsif (ref($call) eq 'ARRAY') { - if (ref($call->[0]) ne 'ARRAY') { - $self->_do_connection_actions($method_prefix, $_) for @$call; - } else { - $self->_do_connection_actions($method_prefix, @$_) for @$call; + my ($self, $method_prefix, $call, @args) = @_; + + dbic_internal_try { + if (not ref($call)) { + my $method = $method_prefix . $call; + $self->$method(@args); + } + elsif (ref($call) eq 'CODE') { + $self->$call(@args); + } + elsif (ref($call) eq 'ARRAY') { + if (ref($call->[0]) ne 'ARRAY') { + $self->_do_connection_actions($method_prefix, $_) for @$call; + } + else { + $self->_do_connection_actions($method_prefix, @$_) for @$call; + } + } + else { + $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) ); } - } else { - $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) ); } + catch { + if ( $method_prefix =~ /^connect/ ) { + # this is an on_connect cycle - we can't just throw while leaving + # a handle in an undefined state in our storage object + # kill it with fire and rethrow + $self->_dbh(undef); + $self->disconnect; # the $dbh is gone, but we still need to reset the rest + $self->throw_exception( $_[0] ); + } + else { + carp "Disconnect action failed: $_[0]"; + } + }; return $self; } @@ -983,7 +1484,19 @@ sub disconnect_call_do_sql { $self->_do_query(@_); } -# override in db-specific backend when necessary +=head2 connect_call_datetime_setup + +A no-op stub method, provided so that one can always safely supply the +L + + on_connect_call => 'datetime_setup' + +This way one does not need to know in advance whether the underlying +storage requires any sort of hand-holding when dealing with calendar +data. + +=cut + sub connect_call_datetime_setup { 1 } sub _do_query { @@ -1003,163 +1516,120 @@ sub _do_query { my $attrs = shift @do_args; my @bind = map { [ undef, $_ ] } @do_args; - $self->_query_start($sql, @bind); - $self->_get_dbh->do($sql, $attrs, @do_args); - $self->_query_end($sql, @bind); + $self->dbh_do(sub { + $_[0]->_query_start($sql, \@bind); + $_[1]->do($sql, $attrs, @do_args); + $_[0]->_query_end($sql, \@bind); + }); } return $self; } sub _connect { - my ($self, @info) = @_; + my $self = shift; + + my $info = $self->_dbi_connect_info; - $self->throw_exception("You failed to provide any connection info") - if !@info; + $self->throw_exception("You did not provide any connection_info") + unless defined $info->[0]; my ($old_connect_via, $dbh); - if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) { - $old_connect_via = $DBI::connect_via; - $DBI::connect_via = 'connect'; - } + local $DBI::connect_via = 'connect' if $INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}; + + # this odd anonymous coderef dereference is in fact really + # necessary to avoid the unwanted effect described in perl5 + # RT#75792 + # + # in addition the coderef itself can't reside inside the try{} block below + # as it somehow triggers a leak under perl -d + my $dbh_error_handler_installer = sub { + weaken (my $weak_self = $_[0]); + + # the coderef is blessed so we can distinguish it from externally + # supplied handles (which must be preserved) + $_[1]->{HandleError} = bless sub { + if ($weak_self) { + $weak_self->throw_exception("DBI Exception: $_[0]"); + } + else { + # the handler may be invoked by something totally out of + # the scope of DBIC + DBIx::Class::Exception->throw("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]"); + } + }, '__DBIC__DBH__ERROR__HANDLER__'; + }; - eval { - if(ref $info[0] eq 'CODE') { - $dbh = &{$info[0]} + dbic_internal_try { + if(ref $info->[0] eq 'CODE') { + $dbh = $info->[0]->(); } else { - $dbh = DBI->connect(@info); - } - - if($dbh && !$self->unsafe) { - my $weak_self = $self; - Scalar::Util::weaken($weak_self); - $dbh->{HandleError} = sub { - if ($weak_self) { - $weak_self->throw_exception("DBI Exception: $_[0]"); - } - else { - # the handler may be invoked by something totally out of - # the scope of DBIC - croak ("DBI Exception: $_[0]"); - } - }; - $dbh->{ShowErrorStatement} = 1; - $dbh->{RaiseError} = 1; - $dbh->{PrintError} = 0; + require DBI; + $dbh = DBI->connect(@$info); } - }; - - $DBI::connect_via = $old_connect_via if $old_connect_via; - - $self->throw_exception("DBI Connection failed: " . ($@||$DBI::errstr)) - if !$dbh || $@; - - $self->_dbh_autocommit($dbh->{AutoCommit}); - - $dbh; -} - -sub svp_begin { - my ($self, $name) = @_; - - $name = $self->_svp_generate_name - unless defined $name; - - $self->throw_exception ("You can't use savepoints outside a transaction") - if $self->{transaction_depth} == 0; - - $self->throw_exception ("Your Storage implementation doesn't support savepoints") - unless $self->can('_svp_begin'); - - push @{ $self->{savepoints} }, $name; - - $self->debugobj->svp_begin($name) if $self->debug; - - return $self->_svp_begin($name); -} - -sub svp_release { - my ($self, $name) = @_; - - $self->throw_exception ("You can't use savepoints outside a transaction") - if $self->{transaction_depth} == 0; - - $self->throw_exception ("Your Storage implementation doesn't support savepoints") - unless $self->can('_svp_release'); - if (defined $name) { - $self->throw_exception ("Savepoint '$name' does not exist") - unless grep { $_ eq $name } @{ $self->{savepoints} }; + die $DBI::errstr unless $dbh; - # Dig through the stack until we find the one we are releasing. This keeps - # the stack up to date. - my $svp; + die sprintf ("%s fresh DBI handle with a *false* 'Active' attribute. " + . 'This handle is disconnected as far as DBIC is concerned, and we can ' + . 'not continue', + ref $info->[0] eq 'CODE' + ? "Connection coderef $info->[0] returned a" + : 'DBI->connect($schema->storage->connect_info) resulted in a' + ) unless $dbh->FETCH('Active'); - do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name; - } else { - $name = pop @{ $self->{savepoints} }; - } + # sanity checks unless asked otherwise + unless ($self->unsafe) { - $self->debugobj->svp_release($name) if $self->debug; - - return $self->_svp_release($name); -} + $self->throw_exception( + 'Refusing clobbering of {HandleError} installed on externally supplied ' + ."DBI handle $dbh. Either remove the handler or use the 'unsafe' attribute." + ) if $dbh->{HandleError} and ref $dbh->{HandleError} ne '__DBIC__DBH__ERROR__HANDLER__'; -sub svp_rollback { - my ($self, $name) = @_; + # Default via _default_dbi_connect_attributes is 1, hence it was an explicit + # request, or an external handle. Complain and set anyway + unless ($dbh->{RaiseError}) { + carp( ref $info->[0] eq 'CODE' - $self->throw_exception ("You can't use savepoints outside a transaction") - if $self->{transaction_depth} == 0; + ? "The 'RaiseError' of the externally supplied DBI handle is set to false. " + ."DBIx::Class will toggle it back to true, unless the 'unsafe' connect " + .'attribute has been supplied' - $self->throw_exception ("Your Storage implementation doesn't support savepoints") - unless $self->can('_svp_rollback'); + : 'RaiseError => 0 supplied in your connection_info, without an explicit ' + .'unsafe => 1. Toggling RaiseError back to true' + ); - if (defined $name) { - # If they passed us a name, verify that it exists in the stack - unless(grep({ $_ eq $name } @{ $self->{savepoints} })) { - $self->throw_exception("Savepoint '$name' does not exist!"); + $dbh->{RaiseError} = 1; } - # Dig through the stack until we find the one we are releasing. This keeps - # the stack up to date. - while(my $s = pop(@{ $self->{savepoints} })) { - last if($s eq $name); - } - # Add the savepoint back to the stack, as a rollback doesn't remove the - # named savepoint, only everything after it. - push(@{ $self->{savepoints} }, $name); - } else { - # We'll assume they want to rollback to the last savepoint - $name = $self->{savepoints}->[-1]; + $dbh_error_handler_installer->($self, $dbh); + } } + catch { + $self->throw_exception("DBI Connection failed: $_") + }; - $self->debugobj->svp_rollback($name) if $self->debug; - - return $self->_svp_rollback($name); -} - -sub _svp_generate_name { - my ($self) = @_; - - return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); + $self->_dbh_autocommit($dbh->{AutoCommit}); + return $dbh; } sub txn_begin { - my $self = shift; - if($self->{transaction_depth} == 0) { - $self->debugobj->txn_begin() - if $self->debug; - $self->_dbh_begin_work; + # this means we have not yet connected and do not know the AC status + # (e.g. coderef $dbh), need a full-fledged connection check + if (! defined $_[0]->_dbh_autocommit) { + $_[0]->ensure_connected; } - elsif ($self->auto_savepoint) { - $self->svp_begin; + # Otherwise simply connect or re-connect on pid changes + else { + $_[0]->_get_dbh; } - $self->{transaction_depth}++; + + shift->next::method(@_); } -sub _dbh_begin_work { +sub _exec_txn_begin { my $self = shift; # if the user is utilizing txn_do - good for him, otherwise we need to @@ -1167,7 +1637,7 @@ sub _dbh_begin_work { # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping" # will be replaced by a failure of begin_work itself (which will be # then retried on reconnect) - if ($self->{_in_dbh_do}) { + if ($self->{_in_do_block}) { $self->_dbh->begin_work; } else { $self->dbh_do(sub { $_[1]->begin_work }); @@ -1176,629 +1646,952 @@ sub _dbh_begin_work { sub txn_commit { my $self = shift; - if ($self->{transaction_depth} == 1) { - $self->debugobj->txn_commit() - if ($self->debug); - $self->_dbh_commit; - $self->{transaction_depth} = 0 - if $self->_dbh_autocommit; - } - elsif($self->{transaction_depth} > 1) { - $self->{transaction_depth}--; - $self->svp_release - if $self->auto_savepoint; + + $self->throw_exception("Unable to txn_commit() on a disconnected storage") + unless $self->_seems_connected; + + # esoteric case for folks using external $dbh handles + if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) { + carp "Storage transaction_depth 0 does not match " + ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway"; + $self->transaction_depth(1); } + + $self->next::method(@_); + + # if AutoCommit is disabled txn_depth never goes to 0 + # as a new txn is started immediately on commit + $self->transaction_depth(1) if ( + !$self->transaction_depth + and + defined $self->_dbh_autocommit + and + ! $self->_dbh_autocommit + ); } -sub _dbh_commit { - my $self = shift; - my $dbh = $self->_dbh - or $self->throw_exception('cannot COMMIT on a disconnected handle'); - $dbh->commit; +sub _exec_txn_commit { + shift->_dbh->commit; } sub txn_rollback { my $self = shift; - my $dbh = $self->_dbh; - eval { - if ($self->{transaction_depth} == 1) { - $self->debugobj->txn_rollback() - if ($self->debug); - $self->{transaction_depth} = 0 - if $self->_dbh_autocommit; - $self->_dbh_rollback; - } - elsif($self->{transaction_depth} > 1) { - $self->{transaction_depth}--; - if ($self->auto_savepoint) { - $self->svp_rollback; - $self->svp_release; - } - } - else { - die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new; - } - }; - if ($@) { - my $error = $@; - my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; - $error =~ /$exception_class/ and $self->throw_exception($error); - # ensure that a failed rollback resets the transaction depth - $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; - $self->throw_exception($error); + + # do a minimal connectivity check due to weird shit like + # https://rt.cpan.org/Public/Bug/Display.html?id=62370 + $self->throw_exception("lost connection to storage") + unless $self->_seems_connected; + + # esoteric case for folks using external $dbh handles + if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) { + carp "Storage transaction_depth 0 does not match " + ."false AutoCommit of $self->{_dbh}, attempting ROLLBACK anyway"; + $self->transaction_depth(1); } + + $self->next::method(@_); + + # if AutoCommit is disabled txn_depth never goes to 0 + # as a new txn is started immediately on commit + $self->transaction_depth(1) if ( + !$self->transaction_depth + and + defined $self->_dbh_autocommit + and + ! $self->_dbh_autocommit + ); } -sub _dbh_rollback { - my $self = shift; - my $dbh = $self->_dbh - or $self->throw_exception('cannot ROLLBACK on a disconnected handle'); - $dbh->rollback; +sub _exec_txn_rollback { + shift->_dbh->rollback; } +# generate the DBI-specific stubs, which then fallback to ::Storage proper +quote_sub __PACKAGE__ . "::$_" => sprintf (<<'EOS', $_) for qw(svp_begin svp_release svp_rollback); + $_[0]->throw_exception('Unable to %s() on a disconnected storage') + unless $_[0]->_seems_connected; + shift->next::method(@_); +EOS + # This used to be the top-half of _execute. It was split out to make it # easier to override in NoBindVars without duping the rest. It takes up # all of _execute's args, and emits $sql, @bind. sub _prep_for_execute { - my ($self, $op, $extra_bind, $ident, $args) = @_; + #my ($self, $op, $ident, $args) = @_; + return shift->_gen_sql_bind(@_) +} - if( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { - $ident = $ident->from(); +sub _gen_sql_bind { + my ($self, $op, $ident, $args) = @_; + + my ($colinfos, $from); + if ( blessed($ident) ) { + $from = $ident->from; + $colinfos = $ident->columns_info; } - my ($sql, @bind) = $self->sql_maker->$op($ident, @$args); + my ($sql, $bind); + ($sql, @$bind) = $self->sql_maker->$op( ($from || $ident), @$args ); + + $bind = $self->_resolve_bindattrs( + $ident, [ @{$args->[2]{bind}||[]}, @$bind ], $colinfos + ); + + if ( + ! $ENV{DBIC_DT_SEARCH_OK} + and + $op eq 'select' + and + grep { + length ref $_->[1] + and + blessed($_->[1]) + and + $_->[1]->isa('DateTime') + } @$bind + ) { + carp_unique 'DateTime objects passed to search() are not supported ' + . 'properly (InflateColumn::DateTime formats and settings are not ' + . 'respected.) See ".. format a DateTime object for searching?" in ' + . 'DBIx::Class::Manual::FAQ. To disable this warning for good ' + . 'set $ENV{DBIC_DT_SEARCH_OK} to true' + } - unshift(@bind, - map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind) - if $extra_bind; - return ($sql, \@bind); + return( $sql, $bind ); } +sub _resolve_bindattrs { + my ($self, $ident, $bind, $colinfos) = @_; + + my $resolve_bindinfo = sub { + #my $infohash = shift; + + $colinfos ||= { %{ $self->_resolve_column_info($ident) } }; + + my $ret; + if (my $col = $_[0]->{dbic_colname}) { + $ret = { %{$_[0]} }; -sub _fix_bind_params { - my ($self, @bind) = @_; + $ret->{sqlt_datatype} ||= $colinfos->{$col}{data_type} + if $colinfos->{$col}{data_type}; + + $ret->{sqlt_size} ||= $colinfos->{$col}{size} + if $colinfos->{$col}{size}; + } + + $ret || $_[0]; + }; - ### Turn @bind from something like this: - ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] ) - ### to this: - ### ( "'1'", "'1'", "'3'" ) - return - map { - if ( defined( $_ && $_->[1] ) ) { - map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ]; - } - else { q{'NULL'}; } - } @bind; + return [ map { + ( ref $_ ne 'ARRAY' or @$_ != 2 ) ? [ {}, $_ ] + : ( ! defined $_->[0] ) ? [ {}, $_->[1] ] + : (ref $_->[0] eq 'HASH') ? [( + ! keys %{$_->[0]} + or + exists $_->[0]{dbd_attrs} + or + $_->[0]{sqlt_datatype} + ) ? $_->[0] + : $resolve_bindinfo->($_->[0]) + , $_->[1] + ] + : (ref $_->[0] eq 'SCALAR') ? [ { sqlt_datatype => ${$_->[0]} }, $_->[1] ] + : [ $resolve_bindinfo->( + { dbic_colname => $_->[0] } + ), $_->[1] ] + } @$bind ]; +} + +sub _format_for_trace { + #my ($self, $bind) = @_; + + ### Turn @bind from something like this: + ### ( [ "artist", 1 ], [ \%attrs, 3 ] ) + ### to this: + ### ( "'1'", "'3'" ) + + map { + defined( $_ && $_->[1] ) + ? qq{'$_->[1]'} + : q{NULL} + } @{$_[1] || []}; } sub _query_start { - my ( $self, $sql, @bind ) = @_; + my ( $self, $sql, $bind ) = @_; + + $self->debugobj->query_start( $sql, $self->_format_for_trace($bind) ) + if $self->debug; +} + +sub _query_end { + my ( $self, $sql, $bind ) = @_; + + $self->debugobj->query_end( $sql, $self->_format_for_trace($bind) ) + if $self->debug; +} + +sub _dbi_attrs_for_bind { + #my ($self, $ident, $bind) = @_; + + return [ map { - if ( $self->debug ) { - @bind = $self->_fix_bind_params(@bind); + exists $_->{dbd_attrs} ? $_->{dbd_attrs} - $self->debugobj->query_start( $sql, @bind ); + : ! $_->{sqlt_datatype} ? undef + + : do { + + # cache the result in the dbh_details hash, as it (usually) can not change + # unless we connect to something else + # FIXME: for the time being Oracle is an exception, pending a rewrite of + # the LOB storage + my $cache = $_[0]->_dbh_details->{_datatype_map_cache} ||= {}; + + $cache->{$_->{sqlt_datatype}} = $_[0]->bind_attribute_by_data_type($_->{sqlt_datatype}) + if ! exists $cache->{$_->{sqlt_datatype}}; + + $cache->{$_->{sqlt_datatype}}; + + } } map { $_->[0] } @{$_[2]} ]; +} + +sub _execute { + my ($self, $op, $ident, @args) = @_; + + my ($sql, $bind) = $self->_prep_for_execute($op, $ident, \@args); + + # not even a PID check - we do not care about the state of the _dbh. + # All we need is to get the appropriate drivers loaded if they aren't + # already so that the assumption in ad7c50fc26e holds + $self->_populate_dbh unless $self->_dbh; + + $self->dbh_do( _dbh_execute => # retry over disconnects + $sql, + $bind, + $self->_dbi_attrs_for_bind($ident, $bind), + ); +} + +sub _dbh_execute { + my ($self, $dbh, $sql, $bind, $bind_attrs) = @_; + + $self->_query_start( $sql, $bind ); + + my $sth = $self->_bind_sth_params( + $self->_prepare_sth($dbh, $sql), + $bind, + $bind_attrs, + ); + + # Can this fail without throwing an exception anyways??? + my $rv = $sth->execute(); + $self->throw_exception( + $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...' + ) if !$rv; + + $self->_query_end( $sql, $bind ); + + return (wantarray ? ($rv, $sth, @$bind) : $rv); +} + +sub _prepare_sth { + my ($self, $dbh, $sql) = @_; + + # 3 is the if_active parameter which avoids active sth re-use + my $sth = $self->disable_sth_caching + ? $dbh->prepare($sql) + : $dbh->prepare_cached($sql, {}, 3); + + # XXX You would think RaiseError would make this impossible, + # but apparently that's not true :( + $self->throw_exception( + $dbh->errstr + || + sprintf( "\$dbh->prepare() of '%s' through %s failed *silently* without " + .'an exception and/or setting $dbh->errstr', + length ($sql) > 20 + ? substr($sql, 0, 20) . '...' + : $sql + , + 'DBD::' . $dbh->{Driver}{Name}, + ) + ) if !$sth; + + $sth; +} + +sub _bind_sth_params { + my ($self, $sth, $bind, $bind_attrs) = @_; + + for my $i (0 .. $#$bind) { + if (ref $bind->[$i][1] eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts + $sth->bind_param_inout( + $i + 1, # bind params counts are 1-based + $bind->[$i][1], + $bind->[$i][0]{dbd_size} || $self->_max_column_bytesize($bind->[$i][0]), # size + $bind_attrs->[$i], + ); + } + else { + # FIXME SUBOPTIMAL - DBI needs fixing to always stringify regardless of DBD + my $v = ( length ref $bind->[$i][1] and is_plain_value $bind->[$i][1] ) + ? "$bind->[$i][1]" + : $bind->[$i][1] + ; + + $sth->bind_param( + $i + 1, + # The temp-var is CRUCIAL - DO NOT REMOVE IT, breaks older DBD::SQLite RT#79576 + $v, + $bind_attrs->[$i], + ); } + } + + $sth; } -sub _query_end { - my ( $self, $sql, @bind ) = @_; +sub _prefetch_autovalues { + my ($self, $source, $colinfo, $to_insert) = @_; - if ( $self->debug ) { - @bind = $self->_fix_bind_params(@bind); - $self->debugobj->query_end( $sql, @bind ); + my %values; + for my $col (keys %$colinfo) { + if ( + $colinfo->{$col}{auto_nextval} + and + ( + ! exists $to_insert->{$col} + or + is_literal_value($to_insert->{$col}) + ) + ) { + $values{$col} = $self->_sequence_fetch( + 'NEXTVAL', + ( $colinfo->{$col}{sequence} ||= + $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col) + ), + ); } + } + + \%values; } -sub _dbh_execute { - my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_; +sub insert { + my ($self, $source, $to_insert) = @_; + + my $col_infos = $source->columns_info; + + my $prefetched_values = $self->_prefetch_autovalues($source, $col_infos, $to_insert); + + # fuse the values, but keep a separate list of prefetched_values so that + # they can be fused once again with the final return + $to_insert = { %$to_insert, %$prefetched_values }; + + # FIXME - we seem to assume undef values as non-supplied. This is wrong. + # Investigate what does it take to s/defined/exists/ + my %pcols = map { $_ => 1 } $source->primary_columns; + my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col); + for my $col ($source->columns) { + if ($col_infos->{$col}{is_auto_increment}) { + $autoinc_supplied ||= 1 if defined $to_insert->{$col}; + $retrieve_autoinc_col ||= $col unless $autoinc_supplied; + } + + # nothing to retrieve when explicit values are supplied + next if ( + defined $to_insert->{$col} and ! is_literal_value($to_insert->{$col}) + ); + + # the 'scalar keys' is a trick to preserve the ->columns declaration order + $retrieve_cols{$col} = scalar keys %retrieve_cols if ( + $pcols{$col} + or + $col_infos->{$col}{retrieve_on_insert} + ); + }; + + local $self->{_autoinc_supplied_for_op} = $autoinc_supplied; + local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col; + + my ($sqla_opts, @ir_container); + if (%retrieve_cols and $self->_use_insert_returning) { + $sqla_opts->{returning_container} = \@ir_container + if $self->_use_insert_returning_bound; + + $sqla_opts->{returning} = [ + sort { $retrieve_cols{$a} <=> $retrieve_cols{$b} } keys %retrieve_cols + ]; + } + + my ($rv, $sth) = $self->_execute('insert', $source, $to_insert, $sqla_opts); + + my %returned_cols = %$to_insert; + if (my $retlist = $sqla_opts->{returning}) { # if IR is supported - we will get everything in one set + + unless( @ir_container ) { + dbic_internal_try { + + # FIXME - need to investigate why Caelum silenced this in 4d4dc518 + local $SIG{__WARN__} = sub {}; + + @ir_container = $sth->fetchrow_array; + $sth->finish; + + } catch { + # Evict the $sth from the cache in case we got here, since the finish() + # is crucial, at least on older Firebirds, possibly on other engines too + # + # It would be too complex to make this a proper subclass override, + # and besides we already take the try{} penalty, adding a catch that + # triggers infrequently is a no-brainer + # + if( my $kids = $self->_dbh->{CachedKids} ) { + $kids->{$_} == $sth and delete $kids->{$_} + for keys %$kids + } + }; + } + + @returned_cols{@$retlist} = @ir_container if @ir_container; + } + else { + # pull in PK if needed and then everything else + if (my @missing_pri = grep { $pcols{$_} } keys %retrieve_cols) { + + $self->throw_exception( "Missing primary key but Storage doesn't support last_insert_id" ) + unless $self->can('last_insert_id'); - my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args); + my @pri_values = $self->last_insert_id($source, @missing_pri); - $self->_query_start( $sql, @$bind ); + $self->throw_exception( "Can't get last insert id" ) + unless (@pri_values == @missing_pri); - my $sth = $self->sth($sql,$op); + @returned_cols{@missing_pri} = @pri_values; + delete @retrieve_cols{@missing_pri}; + } - my $placeholder_index = 1; + # if there is more left to pull + if (%retrieve_cols) { + $self->throw_exception( + 'Unable to retrieve additional columns without a Primary Key on ' . $source->source_name + ) unless %pcols; - foreach my $bound (@$bind) { - my $attributes = {}; - my($column_name, @data) = @$bound; + my @left_to_fetch = sort { $retrieve_cols{$a} <=> $retrieve_cols{$b} } keys %retrieve_cols; - if ($bind_attributes) { - $attributes = $bind_attributes->{$column_name} - if defined $bind_attributes->{$column_name}; - } + my $cur = DBIx::Class::ResultSet->new($source, { + where => { map { $_ => $returned_cols{$_} } (keys %pcols) }, + select => \@left_to_fetch, + })->cursor; - foreach my $data (@data) { - my $ref = ref $data; - $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs) + @returned_cols{@left_to_fetch} = $cur->next; - $sth->bind_param($placeholder_index, $data, $attributes); - $placeholder_index++; + $self->throw_exception('Duplicate row returned for PK-search after fresh insert') + if scalar $cur->next; } } - # Can this fail without throwing an exception anyways??? - my $rv = $sth->execute(); - $self->throw_exception($sth->errstr) if !$rv; + return { %$prefetched_values, %returned_cols }; +} + +sub insert_bulk { + carp_unique( + 'insert_bulk() should have never been exposed as a public method and ' + . 'calling it is depecated as of Aug 2014. If you believe having a genuine ' + . 'use for this method please contact the development team via ' + . DBIx::Class::_ENV_::HELP_URL + ); - $self->_query_end( $sql, @$bind ); + return '0E0' unless @{$_[3]||[]}; - return (wantarray ? ($rv, $sth, @$bind) : $rv); + shift->_insert_bulk(@_); } -sub _execute { - my $self = shift; - $self->dbh_do('_dbh_execute', @_); # retry over disconnects -} +sub _insert_bulk { + my ($self, $source, $cols, $data) = @_; -sub insert { - my ($self, $source, $to_insert) = @_; + $self->throw_exception('Calling _insert_bulk without a dataset to process makes no sense') + unless @{$data||[]}; - my $ident = $source->from; - my $bind_attributes = $self->source_bind_attributes($source); + my $colinfos = $source->columns_info($cols); - my $updated_cols = {}; + local $self->{_autoinc_supplied_for_op} = + (grep { $_->{is_auto_increment} } values %$colinfos) + ? 1 + : 0 + ; - foreach my $col ( $source->columns ) { - if ( !defined $to_insert->{$col} ) { - my $col_info = $source->column_info($col); + # get a slice type index based on first row of data + # a "column" in this context may refer to more than one bind value + # e.g. \[ '?, ?', [...], [...] ] + # + # construct the value type index - a description of values types for every + # per-column slice of $data: + # + # nonexistent - nonbind literal + # 0 - regular value + # [] of bindattrs - resolved attribute(s) of bind(s) passed via literal+bind \[] combo + # + # also construct the column hash to pass to the SQL generator. For plain + # (non literal) values - convert the members of the first row into a + # literal+bind combo, with extra positional info in the bind attr hashref. + # This will allow us to match the order properly, and is so contrived + # because a user-supplied literal/bind (or something else specific to a + # resultsource and/or storage driver) can inject extra binds along the + # way, so one can't rely on "shift positions" ordering at all. Also we + # can't just hand SQLA a set of some known "values" (e.g. hashrefs that + # can be later matched up by address), because we want to supply a real + # value on which perhaps e.g. datatype checks will be performed + my ($proto_data, $serialized_bind_type_by_col_idx); + for my $col_idx (0..$#$cols) { + my $colname = $cols->[$col_idx]; + if (ref $data->[0][$col_idx] eq 'SCALAR') { + # no bind value at all - no type + + $proto_data->{$colname} = $data->[0][$col_idx]; + } + elsif (ref $data->[0][$col_idx] eq 'REF' and ref ${$data->[0][$col_idx]} eq 'ARRAY' ) { + # repack, so we don't end up mangling the original \[] + my ($sql, @bind) = @${$data->[0][$col_idx]}; + + # normalization of user supplied stuff + my $resolved_bind = $self->_resolve_bindattrs( + $source, \@bind, $colinfos, + ); + + # store value-less (attrs only) bind info - we will be comparing all + # supplied binds against this for sanity + $serialized_bind_type_by_col_idx->{$col_idx} = serialize [ map { $_->[0] } @$resolved_bind ]; + + $proto_data->{$colname} = \[ $sql, map { [ + # inject slice order to use for $proto_bind construction + { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $col_idx, _literal_bind_subindex => $_+1 } + => + $resolved_bind->[$_][1] + ] } (0 .. $#bind) + ]; + } + else { + $serialized_bind_type_by_col_idx->{$col_idx} = undef; - if ( $col_info->{auto_nextval} ) { - $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( - 'nextval', - $col_info->{sequence} || - $self->_dbh_get_autoinc_seq($self->_get_dbh, $source) - ); - } + $proto_data->{$colname} = \[ '?', [ + { dbic_colname => $colname, _bind_data_slice_idx => $col_idx } + => + $data->[0][$col_idx] + ] ]; } } - $self->_execute('insert' => [], $source, $bind_attributes, $to_insert); - - return $updated_cols; -} - -## Still not quite perfect, and EXPERIMENTAL -## Currently it is assumed that all values passed will be "normal", i.e. not -## scalar refs, or at least, all the same type as the first set, the statement is -## only prepped once. -sub insert_bulk { - my ($self, $source, $cols, $data) = @_; - - my %colvalues; - @colvalues{@$cols} = (0..$#$cols); - - for my $i (0..$#$cols) { - my $first_val = $data->[0][$i]; - next unless ref $first_val eq 'SCALAR'; + my ($sql, $proto_bind) = $self->_prep_for_execute ( + 'insert', + $source, + [ $proto_data ], + ); - $colvalues{ $cols->[$i] } = $first_val; + if (! @$proto_bind and keys %$serialized_bind_type_by_col_idx) { + # if the bindlist is empty and we had some dynamic binds, this means the + # storage ate them away (e.g. the NoBindVars component) and interpolated + # them directly into the SQL. This obviously can't be good for multi-inserts + $self->throw_exception('Unable to invoke fast-path insert without storage placeholder support'); } - # check for bad data and stringify stringifiable objects - my $bad_slice = sub { - my ($msg, $col_idx, $slice_idx) = @_; + # sanity checks + # FIXME - devise a flag "no babysitting" or somesuch to shut this off + # + # use an error reporting closure for convenience (less to pass) + my $bad_slice_report_cref = sub { + my ($msg, $r_idx, $c_idx) = @_; $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", $msg, - $cols->[$col_idx], + $cols->[$c_idx], do { - local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any - Data::Dumper::Concise::Dumper({ - map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols) - }), + local $Data::Dumper::Maxdepth = 5; + dump_value { + map { $cols->[$_] => + $data->[$r_idx][$_] + } 0..$#$cols + }; } ); }; - for my $datum_idx (0..$#$data) { - my $datum = $data->[$datum_idx]; + for my $col_idx (0..$#$cols) { + my $reference_val = $data->[0][$col_idx]; - for my $col_idx (0..$#$cols) { - my $val = $datum->[$col_idx]; - my $sqla_bind = $colvalues{ $cols->[$col_idx] }; - my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR'; + for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1 + my $val = $data->[$row_idx][$col_idx]; - if ($is_literal_sql) { - if (not ref $val) { - $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx); + if (! exists $serialized_bind_type_by_col_idx->{$col_idx}) { # literal no binds + if (ref $val ne 'SCALAR') { + $bad_slice_report_cref->( + "Incorrect value (expecting SCALAR-ref \\'$$reference_val')", + $row_idx, + $col_idx, + ); } - elsif ((my $reftype = ref $val) ne 'SCALAR') { - $bad_slice->("$reftype reference found where literal SQL expected", - $col_idx, $datum_idx); + elsif ($$val ne $$reference_val) { + $bad_slice_report_cref->( + "Inconsistent literal SQL value (expecting \\'$$reference_val')", + $row_idx, + $col_idx, + ); } - elsif ($$val ne $$sqla_bind){ - $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'", - $col_idx, $datum_idx); + } + elsif (! defined $serialized_bind_type_by_col_idx->{$col_idx} ) { # regular non-literal value + if (is_literal_value($val)) { + $bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx); } } - elsif (my $reftype = ref $val) { - require overload; - if (overload::Method($val, '""')) { - $datum->[$col_idx] = "".$val; + else { # binds from a \[], compare type and attrs + if (ref $val ne 'REF' or ref $$val ne 'ARRAY') { + $bad_slice_report_cref->( + "Incorrect value (expecting ARRAYREF-ref \\['${$reference_val}->[0]', ... ])", + $row_idx, + $col_idx, + ); } - else { - $bad_slice->("$reftype reference found where bind expected", - $col_idx, $datum_idx); + # start drilling down and bail out early on identical refs + elsif ( + $reference_val != $val + or + $$reference_val != $$val + ) { + if (${$val}->[0] ne ${$reference_val}->[0]) { + $bad_slice_report_cref->( + "Inconsistent literal/bind SQL (expecting \\['${$reference_val}->[0]', ... ])", + $row_idx, + $col_idx, + ); + } + # need to check the bind attrs - a bind will happen only once for + # the entire dataset, so any changes further down will be ignored. + elsif ( + $serialized_bind_type_by_col_idx->{$col_idx} + ne + serialize [ + map + { $_->[0] } + @{$self->_resolve_bindattrs( + $source, [ @{$$val}[1 .. $#$$val] ], $colinfos, + )} + ] + ) { + $bad_slice_report_cref->( + 'Differing bind attributes on literal/bind values not supported', + $row_idx, + $col_idx, + ); + } } } } } - my ($sql, $bind) = $self->_prep_for_execute ( - 'insert', undef, $source, [\%colvalues] - ); - my @bind = @$bind; - - my $empty_bind = 1 if (not @bind) && - (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols; - - if ((not @bind) && (not $empty_bind)) { - $self->throw_exception( - 'Cannot insert_bulk without support for placeholders' - ); - } - - $self->_query_start( $sql, ['__BULK__'] ); - my $sth = $self->sth($sql); + # neither _dbh_execute_for_fetch, nor _dbh_execute_inserts_with_no_binds + # are atomic (even if execute_for_fetch is a single call). Thus a safety + # scope guard + my $guard = $self->txn_scope_guard; + $self->_query_start( $sql, @$proto_bind ? [[ {} => '__BULK_INSERT__' ]] : () ); + my $sth = $self->_prepare_sth($self->_dbh, $sql); my $rv = do { - if ($empty_bind) { - # bind_param_array doesn't work if there are no binds - $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data ); + if (@$proto_bind) { + # proto bind contains the information on which pieces of $data to pull + # $cols is passed in only for prettier error-reporting + $self->_dbh_execute_for_fetch( $source, $sth, $proto_bind, $cols, $data ); } else { -# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args - $self->_execute_array( $source, $sth, \@bind, $cols, $data ); + # bind_param_array doesn't work if there are no binds + $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data ); } }; - $self->_query_end( $sql, ['__BULK__'] ); - - return (wantarray ? ($rv, $sth, @bind) : $rv); -} - -sub _execute_array { - my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_; + $self->_query_end( $sql, @$proto_bind ? [[ {} => '__BULK_INSERT__' ]] : () ); - my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0; + $guard->commit; - ## This must be an arrayref, else nothing works! - my $tuple_status = []; + return wantarray ? ($rv, $sth, @$proto_bind) : $rv; +} - ## Get the bind_attributes, if any exist - my $bind_attributes = $self->source_bind_attributes($source); +# execute_for_fetch is capable of returning data just fine (it means it +# can be used for INSERT...RETURNING and UPDATE...RETURNING. Since this +# is the void-populate fast-path we will just ignore this altogether +# for the time being. +sub _dbh_execute_for_fetch { + my ($self, $source, $sth, $proto_bind, $cols, $data) = @_; - ## Bind the values and execute - my $placeholder_index = 1; + # If we have any bind attributes to take care of, we will bind the + # proto-bind data (which will never be used by execute_for_fetch) + # However since column bindtypes are "sticky", this is sufficient + # to get the DBD to apply the bindtype to all values later on + my $bind_attrs = $self->_dbi_attrs_for_bind($source, $proto_bind); - foreach my $bound (@$bind) { + for my $i (0 .. $#$proto_bind) { + $sth->bind_param ( + $i+1, # DBI bind indexes are 1-based + $proto_bind->[$i][1], + $bind_attrs->[$i], + ) if defined $bind_attrs->[$i]; + } - my $attributes = {}; - my ($column_name, $data_index) = @$bound; + # At this point $data slots named in the _bind_data_slice_idx of + # each piece of $proto_bind are either \[]s or plain values to be + # passed in. Construct the dispensing coderef. *NOTE* the order + # of $data will differ from this of the ?s in the SQL (due to + # alphabetical ordering by colname). We actually do want to + # preserve this behavior so that prepare_cached has a better + # chance of matching on unrelated calls + + my $fetch_row_idx = -1; # saner loop this way + my $fetch_tuple = sub { + return undef if ++$fetch_row_idx > $#$data; + + return [ map { + my $v = ! defined $_->{_literal_bind_subindex} + + ? $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ] + + # There are no attributes to resolve here - we already did everything + # when we constructed proto_bind. However we still want to sanity-check + # what the user supplied, so pass stuff through to the resolver *anyway* + : $self->_resolve_bindattrs ( + undef, # a fake rsrc + [ ${ $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ]}->[ $_->{_literal_bind_subindex} ] ], + {}, # a fake column_info bag + )->[0][1] + ; + + # FIXME SUBOPTIMAL - DBI needs fixing to always stringify regardless of DBD + # For the time being forcibly stringify whatever is stringifiable + my $vref; + + ( !length ref $v or ! ($vref = is_plain_value $v) ) ? $v + : defined blessed( $$vref ) ? "$$vref" + : $$vref + ; + } map { $_->[0] } @$proto_bind ]; + }; - if( $bind_attributes ) { - $attributes = $bind_attributes->{$column_name} - if defined $bind_attributes->{$column_name}; - } + my $tuple_status = []; + my ($rv, $err); + dbic_internal_try { + $rv = $sth->execute_for_fetch( + $fetch_tuple, + $tuple_status, + ); + } + catch { + $err = shift; + }; - my @data = map { $_->[$data_index] } @$data; + # Not all DBDs are create equal. Some throw on error, some return + # an undef $rv, and some set $sth->err - try whatever we can + $err = ($sth->errstr || 'UNKNOWN ERROR ($sth->errstr is unset)') if ( + ! defined $err + and + ( !defined $rv or $sth->err ) + ); - $sth->bind_param_array( $placeholder_index, [@data], $attributes ); - $placeholder_index++; + # Statement must finish even if there was an exception. + dbic_internal_try { + $sth->finish } - - my $rv = eval { - $self->_dbh_execute_array($sth, $tuple_status, @extra); + catch { + $err = shift unless defined $err }; - my $err = $@ || $sth->errstr; -# Statement must finish even if there was an exception. - eval { $sth->finish }; - $err = $@ unless $err; - - if ($err) { + if (defined $err) { my $i = 0; ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i]; $self->throw_exception("Unexpected populate error: $err") if ($i > $#$tuple_status); - $self->throw_exception(sprintf "%s for populate slice:\n%s", + $self->throw_exception(sprintf "execute_for_fetch() aborted with '%s' at populate slice:\n%s", ($tuple_status->[$i][1] || $err), - Data::Dumper::Concise::Dumper({ - map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) - }), + dump_value { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }, ); } - $guard->commit if $guard; - return $rv; } -sub _dbh_execute_array { - my ($self, $sth, $tuple_status, @extra) = @_; - - return $sth->execute_array({ArrayTupleStatus => $tuple_status}); -} - sub _dbh_execute_inserts_with_no_binds { my ($self, $sth, $count) = @_; - my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0; - - eval { + my $err; + dbic_internal_try { my $dbh = $self->_get_dbh; local $dbh->{RaiseError} = 1; local $dbh->{PrintError} = 0; $sth->execute foreach 1..$count; + } + catch { + $err = shift; }; - my $exception = $@; - -# Make sure statement is finished even if there was an exception. - eval { $sth->finish }; - $exception = $@ unless $exception; - $self->throw_exception($exception) if $exception; + # Make sure statement is finished even if there was an exception. + dbic_internal_try { + $sth->finish + } + catch { + $err = shift unless defined $err; + }; - $guard->commit if $guard; + $self->throw_exception($err) if defined $err; return $count; } sub update { - my ($self, $source, @args) = @_; - - my $bind_attrs = $self->source_bind_attributes($source); - - return $self->_execute('update' => [], $source, $bind_attrs, @args); + #my ($self, $source, @args) = @_; + shift->_execute('update', @_); } sub delete { - my ($self, $source, @args) = @_; - - my $bind_attrs = $self->source_bind_attributes($source); - - return $self->_execute('delete' => [], $source, $bind_attrs, @args); -} - -# We were sent here because the $rs contains a complex search -# which will require a subquery to select the correct rows -# (i.e. joined or limited resultsets, or non-introspectable conditions) -# -# Generating a single PK column subquery is trivial and supported -# by all RDBMS. However if we have a multicolumn PK, things get ugly. -# Look at _multipk_update_delete() -sub _subq_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - - # quick check if we got a sane rs on our hands - my @pcols = $rsrc->primary_columns; - unless (@pcols) { - $self->throw_exception ( - sprintf ( - "You must declare primary key(s) on source '%s' (via set_primary_key) in order to update or delete complex resultsets", - $rsrc->source_name || $rsrc->from - ) - ); - } - - my $sel = $rs->_resolved_attrs->{select}; - $sel = [ $sel ] unless ref $sel eq 'ARRAY'; - - if ( - join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols) - ne - join ("\x00", sort @$sel ) - ) { - $self->throw_exception ( - '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys' - ); - } - - if (@pcols == 1) { - return $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - { $pcols[0] => { -in => $rs->as_query } }, - ); - } - - else { - return $self->_multipk_update_delete (@_); - } -} - -# ANSI SQL does not provide a reliable way to perform a multicol-PK -# resultset update/delete involving subqueries. So by default resort -# to simple (and inefficient) delete_all style per-row opearations, -# while allowing specific storages to override this with a faster -# implementation. -# -sub _multipk_update_delete { - return shift->_per_row_update_delete (@_); -} - -# This is the default loop used to delete/update rows for multi PK -# resultsets, and used by mysql exclusively (because it can't do anything -# else). -# -# We do not use $row->$op style queries, because resultset update/delete -# is not expected to cascade (this is what delete_all/update_all is for). -# -# There should be no race conditions as the entire operation is rolled -# in a transaction. -# -sub _per_row_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - my @pcols = $rsrc->primary_columns; - - my $guard = $self->txn_scope_guard; - - # emulate the return value of $sth->execute for non-selects - my $row_cnt = '0E0'; - - my $subrs_cur = $rs->cursor; - while (my @pks = $subrs_cur->next) { - - my $cond; - for my $i (0.. $#pcols) { - $cond->{$pcols[$i]} = $pks[$i]; - } - - $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - $cond, - ); - - $row_cnt++; - } - - $guard->commit; - - return $row_cnt; + #my ($self, $source, @args) = @_; + shift->_execute('delete', @_); } sub _select { my $self = shift; - - # localization is neccessary as - # 1) there is no infrastructure to pass this around before SQLA2 - # 2) _select_args sets it and _prep_for_execute consumes it - my $sql_maker = $self->sql_maker; - local $sql_maker->{_dbic_rs_attrs}; - - return $self->_execute($self->_select_args(@_)); + $self->_execute($self->_select_args(@_)); } sub _select_args_to_query { my $self = shift; - # localization is neccessary as - # 1) there is no infrastructure to pass this around before SQLA2 - # 2) _select_args sets it and _prep_for_execute consumes it - my $sql_maker = $self->sql_maker; - local $sql_maker->{_dbic_rs_attrs}; + $self->throw_exception( + "Unable to generate limited query representation with 'software_limit' enabled" + ) if ($_[3]->{software_limit} and ($_[3]->{offset} or $_[3]->{rows}) ); - # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset) + # my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); - my ($op, $bind, $ident, $bind_attrs, @args) = + my ($op, $ident, @args) = $self->_select_args(@_); - # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]); - my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args); - $prepared_bind ||= []; + # my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); + my ($sql, $bind) = $self->_gen_sql_bind($op, $ident, \@args); - return wantarray - ? ($sql, $prepared_bind, $bind_attrs) - : \[ "($sql)", @$prepared_bind ] - ; + # reuse the bind arrayref + unshift @{$bind}, "($sql)"; + \$bind; } sub _select_args { - my ($self, $ident, $select, $where, $attrs) = @_; - - my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident); + my ($self, $ident, $select, $where, $orig_attrs) = @_; + + # FIXME - that kind of caching would be nice to have + # however currently we *may* pass the same $orig_attrs + # with different ident/select/where + # the whole interface needs to be rethought, since it + # was centered around the flawed SQLA API. We can do + # soooooo much better now. But that is also another + # battle... + #return ( + # 'select', $orig_attrs->{!args_as_stored_at_the_end_of_this_method!} + #) if $orig_attrs->{!args_as_stored_at_the_end_of_this_method!}; my $sql_maker = $self->sql_maker; - $sql_maker->{_dbic_rs_attrs} = { - %$attrs, + + my $attrs = { + %$orig_attrs, select => $select, from => $ident, where => $where, - $rs_alias - ? ( _source_handle => $alias2source->{$rs_alias}->handle ) - : () - , }; - # calculate bind_attrs before possible $ident mangling - my $bind_attrs = {}; - for my $alias (keys %$alias2source) { - my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {}; - for my $col (keys %$bindtypes) { - - my $fqcn = join ('.', $alias, $col); - $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col}; - - # Unqialified column names are nice, but at the same time can be - # rather ambiguous. What we do here is basically go along with - # the loop, adding an unqualified column slot to $bind_attrs, - # alongside the fully qualified name. As soon as we encounter - # another column by that name (which would imply another table) - # we unset the unqualified slot and never add any info to it - # to avoid erroneous type binding. If this happens the users - # only choice will be to fully qualify his column name - - if (exists $bind_attrs->{$col}) { - $bind_attrs->{$col} = {}; - } - else { - $bind_attrs->{$col} = $bind_attrs->{$fqcn}; - } - } - } + # MySQL actually recommends this approach. I cringe. + $attrs->{rows} ||= $sql_maker->__max_int + if $attrs->{offset}; - # adjust limits - if ( - $attrs->{software_limit} - || - $sql_maker->_default_limit_syntax eq "GenericSubQ" + # see if we will need to tear the prefetch apart to satisfy group_by == select + # this is *extremely tricky* to get right, I am still not sure I did + # + my ($prefetch_needs_subquery, @limit_args); + + if ( $attrs->{_grouped_by_distinct} and $attrs->{collapse} ) { + # we already know there is a valid group_by (we made it) and we know it is + # intended to be based *only* on non-multi stuff + # short circuit the group_by parsing below + $prefetch_needs_subquery = 1; + } + elsif ( + # The rationale is that even if we do *not* have collapse, we still + # need to wrap the core grouped select/group_by in a subquery + # so that databases that care about group_by/select equivalence + # are happy (this includes MySQL in strict_mode) + # If any of the other joined tables are referenced in the group_by + # however - the user is on their own + ( $prefetch_needs_subquery or ! $attrs->{_simple_passthrough_construction} ) + and + $attrs->{group_by} + and + @{$attrs->{group_by}} + and + my $grp_aliases = dbic_internal_try { # internal_try{} because $attrs->{from} may be unreadable + $self->_resolve_aliastypes_from_select_args({ from => $attrs->{from}, group_by => $attrs->{group_by} }) + } ) { - $attrs->{software_limit} = 1; + # no aliases other than our own in group_by + # if there are - do not allow subquery even if limit is present + $prefetch_needs_subquery = ! scalar grep { $_ ne $attrs->{alias} } keys %{ $grp_aliases->{grouping} || {} }; } - else { - $self->throw_exception("rows attribute must be positive if present") - if (defined($attrs->{rows}) && !($attrs->{rows} > 0)); - - # MySQL actually recommends this approach. I cringe. - $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset}; + elsif ( $attrs->{rows} && $attrs->{collapse} ) { + # active collapse with a limit - that one is a no-brainer unless + # overruled by a group_by above + $prefetch_needs_subquery = 1; } - my @limit; + if ($prefetch_needs_subquery) { + $attrs = $self->_adjust_select_args_for_complex_prefetch ($attrs); + } + elsif (! $attrs->{software_limit} ) { + push @limit_args, ( + $attrs->{rows} || (), + $attrs->{offset} || (), + ); + } - # see if we need to tear the prefetch apart (either limited has_many or grouped prefetch) - # otherwise delegate the limiting to the storage, unless software limit was requested + # try to simplify the joinmap further (prune unreferenced type-single joins) if ( - ( $attrs->{rows} && keys %{$attrs->{collapse}} ) - || - ( $attrs->{group_by} && @{$attrs->{group_by}} && - $attrs->{_prefetch_select} && @{$attrs->{_prefetch_select}} ) + ! $prefetch_needs_subquery # already pruned + and + ref $attrs->{from} + and + reftype $attrs->{from} eq 'ARRAY' + and + @{$attrs->{from}} != 1 ) { - ($ident, $select, $where, $attrs) - = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); - } - elsif (! $attrs->{software_limit} ) { - push @limit, $attrs->{rows}, $attrs->{offset}; + ($attrs->{from}, $attrs->{_aliastypes}) = $self->_prune_unused_joins ($attrs); } + # FIXME this is a gross, inefficient, largely incorrect and fragile hack + # during the result inflation stage we *need* to know what was the aliastype + # map as sqla saw it when the final pieces of SQL were being assembled + # Originally we simply carried around the entirety of $attrs, but this + # resulted in resultsets that are being reused growing continuously, as + # the hash in question grew deeper and deeper. + # Instead hand-pick what to take with us here (we actually don't need much + # at this point just the map itself) + $orig_attrs->{_last_sqlmaker_alias_map} = $attrs->{_aliastypes}; + ### - # This would be the point to deflate anything found in $where + # my $alias2source = $self->_resolve_ident_sources ($ident); + # + # This would be the point to deflate anything found in $attrs->{where} # (and leave $attrs->{bind} intact). Problem is - inflators historically - # expect a row object. And all we have is a resultsource (it is trivial + # expect a result object. And all we have is a resultsource (it is trivial # to extract deflator coderefs via $alias2source above). # # I don't see a way forward other than changing the way deflators are # invoked, and that's just bad... ### - my $order = { map - { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () } - (qw/order_by group_by having/ ) - }; - - return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit); + return ( 'select', @{$attrs}{qw(from select where)}, $attrs, @limit_args ); } # Returns a counting SELECT for a simple count @@ -1810,35 +2603,6 @@ sub _count_select { return { count => '*' }; } -# Returns a SELECT which will end up in the subselect -# There may or may not be a group_by, as the subquery -# might have been called to accomodate a limit -# -# Most databases would be happy with whatever ends up -# here, but some choke in various ways. -# -sub _subq_count_select { - my ($self, $source, $rs_attrs) = @_; - return $rs_attrs->{group_by} if $rs_attrs->{group_by}; - - my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns); - return @pcols ? \@pcols : [ 1 ]; -} - -sub source_bind_attributes { - my ($self, $source) = @_; - - my $bind_attributes; - foreach my $column ($source->columns) { - - my $data_type = $source->column_info($column)->{data_type} || ''; - $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type) - if $data_type; - } - - return $bind_attributes; -} - =head2 select =over 4 @@ -1870,44 +2634,22 @@ sub select_single { return @row; } -=head2 sth - -=over 4 - -=item Arguments: $sql - -=back +=head2 sql_limit_dialect -Returns a L sth (statement handle) for the supplied SQL. +This is an accessor for the default SQL limit dialect used by a particular +storage driver. Can be overridden by supplying an explicit L +to L. For a list of available limit dialects +see L. =cut -sub _dbh_sth { - my ($self, $dbh, $sql) = @_; - - # 3 is the if_active parameter which avoids active sth re-use - my $sth = $self->disable_sth_caching - ? $dbh->prepare($sql) - : $dbh->prepare_cached($sql, {}, 3); - - # XXX You would think RaiseError would make this impossible, - # but apparently that's not true :( - $self->throw_exception($dbh->errstr) if !$sth; - - $sth; -} - -sub sth { - my ($self, $sql) = @_; - $self->dbh_do('_dbh_sth', $sql); # retry over disconnects -} - sub _dbh_columns_info_for { my ($self, $dbh, $table) = @_; - if ($dbh->can('column_info')) { - my %result; - eval { + my %result; + + if (! DBIx::Class::_ENV_::STRESSTEST_COLUMN_INFO_UNAWARE_STORAGE and $dbh->can('column_info')) { + dbic_internal_try { my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table); my $sth = $dbh->column_info( undef,$schema, $tab, '%' ); $sth->execute(); @@ -1922,38 +2664,76 @@ sub _dbh_columns_info_for { $result{$col_name} = \%column_info; } + } catch { + %result = (); }; - return \%result if !$@ && scalar keys %result; + + return \%result if keys %result; } - my %result; my $sth = $dbh->prepare($self->sql_maker->select($table, undef, \'1 = 0')); $sth->execute; - my @columns = @{$sth->{NAME_lc}}; - for my $i ( 0 .. $#columns ){ - my %column_info; - $column_info{data_type} = $sth->{TYPE}->[$i]; - $column_info{size} = $sth->{PRECISION}->[$i]; - $column_info{is_nullable} = $sth->{NULLABLE}->[$i] ? 1 : 0; - - if ($column_info{data_type} =~ m/^(.*?)\((.*?)\)$/) { - $column_info{data_type} = $1; - $column_info{size} = $2; + +### The acrobatics with lc names is necessary to support both the legacy +### API that used NAME_lc exclusively, *AND* at the same time work properly +### with column names differing in cas eonly (thanks pg!) + + my ($columns, $seen_lcs); + + ++$seen_lcs->{lc($_)} and $columns->{$_} = { + idx => scalar keys %$columns, + name => $_, + lc_name => lc($_), + } for @{$sth->{NAME}}; + + $seen_lcs->{$_->{lc_name}} == 1 + and + $_->{name} = $_->{lc_name} + for values %$columns; + + for ( values %$columns ) { + my $inf = { + data_type => $sth->{TYPE}->[$_->{idx}], + size => $sth->{PRECISION}->[$_->{idx}], + is_nullable => $sth->{NULLABLE}->[$_->{idx}] ? 1 : 0, + }; + + if ($inf->{data_type} =~ m/^(.*?)\((.*?)\)$/) { + @{$inf}{qw( data_type size)} = ($1, $2); } - $result{$columns[$i]} = \%column_info; + $result{$_->{name}} = $inf; } + $sth->finish; - foreach my $col (keys %result) { - my $colinfo = $result{$col}; - my $type_num = $colinfo->{data_type}; - my $type_name; - if(defined $type_num && $dbh->can('type_info')) { - my $type_info = $dbh->type_info($type_num); - $type_name = $type_info->{TYPE_NAME} if $type_info; - $colinfo->{data_type} = $type_name if $type_name; + if ($dbh->can('type_info')) { + for my $inf (values %result) { + next if ! defined $inf->{data_type}; + + $inf->{data_type} = ( + ( + ( + $dbh->type_info( $inf->{data_type} ) + || + next + ) + || + next + )->{TYPE_NAME} + || + next + ); + + # FIXME - this may be an artifact of the DBD::Pg implmentation alone + # needs more testing in the future... + $inf->{size} -= 4 if ( + ( $inf->{size}||0 > 4 ) + and + $inf->{data_type} =~ qr/^text$/i + ); } + } return \%result; @@ -1971,18 +2751,14 @@ Return the row id of the last insert. =cut sub _dbh_last_insert_id { - # All Storage's need to register their own _dbh_last_insert_id - # the old SQLite-based method was highly inappropriate + my ($self, $dbh, $source, $col) = @_; - my $self = shift; - my $class = ref $self; - $self->throw_exception (<last_insert_id (undef, undef, $source->name, $col) }; + + return $id if defined $id; -No _dbh_last_insert_id() method found in $class. -Since the method of obtaining the autoincrement id of the last insert -operation varies greatly between different databases, this method must be -individually implemented for every storage class. -EOE + my $class = ref $self; + $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed"); } sub last_insert_id { @@ -2022,33 +2798,39 @@ sub _native_data_type { } # Check if placeholders are supported at all -sub _placeholders_supported { +sub _determine_supports_placeholders { my $self = shift; my $dbh = $self->_get_dbh; # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported}) # but it is inaccurate more often than not - eval { + ( dbic_internal_try { local $dbh->{PrintError} = 0; local $dbh->{RaiseError} = 1; $dbh->do('select ?', {}, 1); - }; - return $@ ? 0 : 1; + 1; + } ) + ? 1 + : 0 + ; } # Check if placeholders bound to non-string types throw exceptions # -sub _typeless_placeholders_supported { +sub _determine_supports_typeless_placeholders { my $self = shift; my $dbh = $self->_get_dbh; - eval { + ( dbic_internal_try { local $dbh->{PrintError} = 0; local $dbh->{RaiseError} = 1; # this specifically tests a bind that is NOT a string $dbh->do('select 1 where 1 = ?', {}, 1); - }; - return $@ ? 0 : 1; + 1; + } ) + ? 1 + : 0 + ; } =head2 sqlt_type @@ -2067,7 +2849,10 @@ Given a datatype from column info, returns a database specific bind attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will let the database planner just handle it. -Generally only needed for special case column types, like bytea in postgres. +This method is always called after the driver has been determined and a DBI +connection has been established. Therefore you can refer to C +and/or C directly, without worrying about loading +the correct modules. =cut @@ -2086,21 +2871,21 @@ be performed instead of the usual C. =cut sub is_datatype_numeric { - my ($self, $dt) = @_; + #my ($self, $dt) = @_; - return 0 unless $dt; + return 0 unless $_[1]; - return $dt =~ /^ (?: + $_[1] =~ /^ (?: numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial ) $/ix; } -=head2 create_ddl_dir (EXPERIMENTAL) +=head2 create_ddl_dir =over 4 -=item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args +=item Arguments: $schema, \@databases, $version, $directory, $preversion, \%sqlt_args =back @@ -2121,8 +2906,7 @@ $version in the name with "$preversion-$version". See L for a list of values for C<\%sqlt_args>. The most common value for this would be C<< { add_drop_table => 1 } >> to have the SQL produced include a C statement for each table -created. For quoting purposes supply C and -C. +created. For quoting purposes supply C. If no arguments are passed, then the following default values are assumed: @@ -2148,20 +2932,27 @@ hashref like the following { ignore_constraint_names => 0, # ... other options } -Note that this feature is currently EXPERIMENTAL and may not work correctly -across all databases, or fully handle complex relationships. - -WARNING: Please check all SQL files created, before applying them. +WARNING: You are strongly advised to check all SQL files created, before applying +them. =cut sub create_ddl_dir { my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_; - if(!$dir || !-d $dir) { + require DBIx::Class::Optional::Dependencies; + if (my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')) { + $self->throw_exception("Can't create a ddl file without $missing"); + } + + if (!$dir) { carp "No directory given, using ./\n"; - $dir = "./"; + $dir = './'; + } + else { + mkdir_p( $dir ) unless -d $dir; } + $databases ||= ['MySQL', 'SQLite', 'PostgreSQL']; $databases = [ $databases ] if(ref($databases) ne 'ARRAY'); @@ -2172,12 +2963,10 @@ sub create_ddl_dir { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1, + quote_identifiers => $self->sql_maker->_quoting_enabled, %{$sqltargs || {}} }; - $self->throw_exception("Can't create a ddl file without SQL::Translator: " . $self->_sqlt_version_error) - if !$self->_sqlt_version_ok; - my $sqlt = SQL::Translator->new( $sqltargs ); $sqlt->parser('SQL::Translator::Parser::DBIx::Class'); @@ -2265,10 +3054,21 @@ sub create_ddl_dir { unless $dest_schema->name; } - my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db, - $dest_schema, $db, - $sqltargs - ); + my $diff = do { + # FIXME - this is a terrible workaround for + # https://github.com/dbsrgits/sql-translator/commit/2d23c1e + # Fixing it in this sloppy manner so that we don't hve to + # lockstep an SQLT release as well. Needs to be removed at + # some point, and SQLT dep bumped + local $SQL::Translator::Producer::SQLite::NO_QUOTES + if $SQL::Translator::Producer::SQLite::NO_QUOTES; + + SQL::Translator::Diff::schema_diff($source_schema, $db, + $dest_schema, $db, + $sqltargs + ); + }; + if(!open $file, ">$difffile") { $self->throw_exception("Can't write to $difffile ($!)"); next; @@ -2286,7 +3086,8 @@ sub create_ddl_dir { =back -Returns the statements used by L and L. +Returns the statements used by L +and L. The L (not L) database driver name can be explicitly provided in C<$type>, otherwise the result of L is used as default. @@ -2310,6 +3111,7 @@ sub deployment_statements { my $filename = $schema->ddl_filename($type, $version, $dir); if(-f $filename) { + # FIXME replace this block when a proper sane sql parser is available my $file; open($file, "<$filename") or $self->throw_exception("Can't open $filename ($!)"); @@ -2318,14 +3120,19 @@ sub deployment_statements { return join('', @rows); } - $self->throw_exception("Can't deploy without either SQL::Translator or a ddl_dir: " . $self->_sqlt_version_error ) - if !$self->_sqlt_version_ok; + require DBIx::Class::Optional::Dependencies; + if (my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ) { + $self->throw_exception("Can't deploy without a pregenerated 'ddl_dir' directory or $missing"); + } - # sources needs to be a parser arg, but for simplicty allow at top level + # sources needs to be a parser arg, but for simplicity allow at top level # coming in $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources} if exists $sqltargs->{sources}; + $sqltargs->{quote_identifiers} = $self->sql_maker->_quoting_enabled + unless exists $sqltargs->{quote_identifiers}; + my $tr = SQL::Translator->new( producer => "SQL::Translator::Producer::${type}", %$sqltargs, @@ -2333,41 +3140,45 @@ sub deployment_statements { data => $schema, ); - my $ret = $tr->translate - or $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error); - - return $ret; + return preserve_context { + $tr->translate + } after => sub { + $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error) + unless defined $_[0]; + }; } +# FIXME deploy() currently does not accurately report sql errors +# Will always return true while errors are warned sub deploy { my ($self, $schema, $type, $sqltargs, $dir) = @_; my $deploy = sub { my $line = shift; - return if($line =~ /^--/); return if(!$line); + return if($line =~ /^--/); # next if($line =~ /^DROP/m); return if($line =~ /^BEGIN TRANSACTION/m); return if($line =~ /^COMMIT/m); return if $line =~ /^\s+$/; # skip whitespace only $self->_query_start($line); - eval { + dbic_internal_try { # do a dbh_do cycle here, as we need some error checking in # place (even though we will ignore errors) $self->dbh_do (sub { $_[1]->do($line) }); + } catch { + carp qq{$_ (running "${line}")}; }; - if ($@) { - carp qq{$@ (running "${line}")}; - } $self->_query_end($line); }; - my @statements = $self->deployment_statements($schema, $type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } ); + my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } ); if (@statements > 1) { foreach my $statement (@statements) { $deploy->( $statement ); } } elsif (@statements == 1) { - foreach my $line ( split(";\n", $statements[0])) { + # split on single line comments and end of statements + foreach my $line ( split(/\s*--.*\n|;\n/, $statements[0])) { $deploy->( $line ); } } @@ -2388,12 +3199,7 @@ sub datetime_parser { =head2 datetime_parser_type -Defines (returns) the datetime parser class - currently hardwired to -L - -=cut - -sub datetime_parser_type { "DateTime::Format::MySQL"; } +Defines the datetime parser class - currently defaults to L =head2 build_datetime_parser @@ -2404,7 +3210,6 @@ See L sub build_datetime_parser { my $self = shift; my $type = $self->datetime_parser_type(@_); - $self->ensure_class_loaded ($type); return $type; } @@ -2434,45 +3239,110 @@ sub lag_behind_master { return; } -# SQLT version handling -{ - my $_sqlt_version_ok; # private - my $_sqlt_version_error; # private - - sub _sqlt_version_ok { - if (!defined $_sqlt_version_ok) { - eval "use SQL::Translator $minimum_sqlt_version"; - if ($@) { - $_sqlt_version_ok = 0; - $_sqlt_version_error = $@; +=head2 relname_to_table_alias + +=over 4 + +=item Arguments: $relname, $join_count + +=item Return Value: $alias + +=back + +L uses L names as table aliases in +queries. + +This hook is to allow specific L drivers to change the +way these aliases are named. + +The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>, +otherwise C<"$relname">. + +=cut + +sub relname_to_table_alias { + my ($self, $relname, $join_count) = @_; + + my $alias = ($join_count && $join_count > 1 ? + join('_', $relname, $join_count) : $relname); + + return $alias; +} + +# The size in bytes to use for DBI's ->bind_param_inout, this is the generic +# version and it may be necessary to amend or override it for a specific storage +# if such binds are necessary. +sub _max_column_bytesize { + my ($self, $attr) = @_; + + my $max_size; + + if ($attr->{sqlt_datatype}) { + my $data_type = lc($attr->{sqlt_datatype}); + + if ($attr->{sqlt_size}) { + + # String/sized-binary types + if ($data_type =~ /^(?: + l? (?:var)? char(?:acter)? (?:\s*varying)? + | + (?:var)? binary (?:\s*varying)? + | + raw + )\b/x + ) { + $max_size = $attr->{sqlt_size}; } - else { - $_sqlt_version_ok = 1; + # Other charset/unicode types, assume scale of 4 + elsif ($data_type =~ /^(?: + national \s* character (?:\s*varying)? + | + nchar + | + univarchar + | + nvarchar + )\b/x + ) { + $max_size = $attr->{sqlt_size} * 4; } } - return $_sqlt_version_ok; - } - sub _sqlt_version_error { - shift->_sqlt_version_ok unless defined $_sqlt_version_ok; - return $_sqlt_version_error; + if (!$max_size and !$self->_is_lob_type($data_type)) { + $max_size = 100 # for all other (numeric?) datatypes + } } - sub _sqlt_minimum_version { $minimum_sqlt_version }; + $max_size || $self->_dbic_connect_attributes->{LongReadLen} || $self->_get_dbh->{LongReadLen} || 8000; } -sub DESTROY { - my $self = shift; +# Determine if a data_type is some type of BLOB +sub _is_lob_type { + my ($self, $data_type) = @_; + $data_type && ($data_type =~ /lob|bfile|text|image|bytea|memo/i + || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary + |varchar|character\s*varying|nvarchar + |national\s*character\s*varying))?\z/xi); +} - $self->_verify_pid if $self->_dbh; +sub _is_binary_lob_type { + my ($self, $data_type) = @_; + $data_type && ($data_type =~ /blob|bfile|image|bytea/i + || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary))?\z/xi); +} - # some databases need this to stop spewing warnings - if (my $dbh = $self->_dbh) { - local $@; - eval { $dbh->disconnect }; - } +sub _is_text_lob_type { + my ($self, $data_type) = @_; + $data_type && ($data_type =~ /^(?:clob|memo)\z/i + || $data_type =~ /^long(?:\s+(?:varchar|character\s*varying|nvarchar + |national\s*character\s*varying))\z/xi); +} - $self->_dbh(undef); +# Determine if a data_type is some type of a binary type +sub _is_binary_type { + my ($self, $data_type) = @_; + $data_type && ($self->_is_binary_lob_type($data_type) + || $data_type =~ /(?:var)?(?:binary|bit|graphic)(?:\s*varying)?/i); } 1; @@ -2483,7 +3353,8 @@ sub DESTROY { DBIx::Class can do some wonderful magic with handling exceptions, disconnections, and transactions when you use C<< AutoCommit => 1 >> -(the default) combined with C for transaction support. +(the default) combined with L for +transaction support. If you set C<< AutoCommit => 0 >> in your connect info, then you are always in an assumed transaction between commits, and you're telling us you'd @@ -2494,15 +3365,13 @@ transactions. You're on your own for handling all sorts of exceptional cases if you choose the C<< AutoCommit => 0 >> path, just as you would be with raw DBI. +=head1 FURTHER QUESTIONS? -=head1 AUTHORS +Check the list of L. -Matt S. Trout +=head1 COPYRIGHT AND LICENSE -Andy Grundman - -=head1 LICENSE - -You may distribute this code under the same terms as Perl itself. - -=cut +This module is free software L +by the L. You can +redistribute it and/or modify it under the same terms as the +L.