X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=bdafe1f31d2fbe2cef3bbeea405e7aeb1debe0d2;hb=3d56e0269f018071841218af861bfa07df6bf01b;hp=6df0e1f73c51bfa40e03b13fdfcccf25da732cbb;hpb=e103821341284d4adddf43954dc4be7c92b46160;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 6df0e1f..bdafe1f 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -8,12 +8,12 @@ use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/; use mro 'c3'; use DBIx::Class::Carp; -use DBIx::Class::Exception; use Scalar::Util qw/refaddr weaken reftype blessed/; use List::Util qw/first/; -use Sub::Name 'subname'; +use Context::Preserve 'preserve_context'; use Try::Tiny; -use overload (); +use SQL::Abstract qw(is_plain_value is_literal_value); +use DBIx::Class::_Util qw(quote_sub perlstring serialize detect_reinvoked_destructor); use namespace::clean; # default cursor class, overridable in connect_info attributes @@ -31,8 +31,9 @@ __PACKAGE__->datetime_parser_type('DateTime::Format::MySQL'); # historic default __PACKAGE__->sql_name_sep('.'); __PACKAGE__->mk_group_accessors('simple' => qw/ - _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined + _connect_info _dbic_connect_attributes _driver_determined _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit + _perform_autoinc_retrieval _autoinc_supplied_for_op /); # the values for these accessors are picked out (and deleted) from @@ -61,8 +62,12 @@ __PACKAGE__->mk_group_accessors('simple' => @storage_options); my @capabilities = (qw/ insert_returning insert_returning_bound + + multicolumn_in + placeholders typeless_placeholders + join_optimizer /); __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities ); @@ -73,25 +78,35 @@ __PACKAGE__->_use_join_optimizer (1); sub _determine_supports_join_optimizer { 1 }; # Each of these methods need _determine_driver called before itself -# in order to function reliably. This is a purely DRY optimization +# in order to function reliably. We also need to separate accessors +# from plain old method calls, since an accessor called as a setter +# does *not* need the driver determination loop fired (and in fact +# can produce hard to find bugs, like e.g. losing on_connect_* +# semantics on fresh connections) # -# get_(use)_dbms_capability need to be called on the correct Storage -# class, as _use_X may be hardcoded class-wide, and _supports_X calls -# _determine_supports_X which obv. needs a correct driver as well -my @rdbms_specific_methods = qw/ - deployment_statements +# The construct below is simply a parameterized around() +my $storage_accessor_idx = { map { $_ => 1 } qw( sqlt_type + datetime_parser_type + sql_maker + cursor_class +)}; +for my $meth (keys %$storage_accessor_idx, qw( + deployment_statements + build_datetime_parser - datetime_parser_type txn_begin + insert - insert_bulk update delete select select_single + + _insert_bulk + with_deferred_fk_checks get_use_dbms_capability @@ -99,37 +114,45 @@ my @rdbms_specific_methods = qw/ _server_info _get_server_version -/; - -for my $meth (@rdbms_specific_methods) { +)) { my $orig = __PACKAGE__->can ($meth) or die "$meth is not a ::Storage::DBI method!"; - no strict qw/refs/; - no warnings qw/redefine/; - *{__PACKAGE__ ."::$meth"} = subname $meth => sub { + my $is_getter = $storage_accessor_idx->{$meth} ? 0 : 1; + + quote_sub + __PACKAGE__ ."::$meth", sprintf( <<'EOC', $is_getter, perlstring $meth ), { '$orig' => \$orig }; + if ( # only fire when invoked on an instance, a valid class-based invocation # would e.g. be setting a default for an inherited accessor ref $_[0] and - ! $_[0]->_driver_determined + ! $_[0]->{_driver_determined} and ! $_[0]->{_in_determine_driver} + and + # if this is a known *setter* - just set it, no need to connect + # and determine the driver + ( %1$s or @_ <= 1 ) + and + # Only try to determine stuff if we have *something* that either is or can + # provide a DSN. Allows for bare $schema's generated with a plain ->connect() + # to still be marginally useful + $_[0]->_dbi_connect_info->[0] ) { $_[0]->_determine_driver; - # This for some reason crashes and burns on perl 5.8.1 - # IFF the method ends up throwing an exception - #goto $_[0]->can ($meth); + # work around http://rt.perl.org/rt3/Public/Bug/Display.html?id=35878 + goto $_[0]->can(%2$s) unless DBIx::Class::_ENV_::BROKEN_GOTO; - my $cref = $_[0]->can ($meth); + my $cref = $_[0]->can(%2$s); goto $cref; } goto $orig; - }; +EOC } =head1 NAME @@ -170,7 +193,6 @@ sub new { $new->_sql_maker_opts({}); $new->_dbh_details({}); $new->{_in_do_block} = 0; - $new->{_dbh_gen} = 0; # read below to see what this does $new->_arm_global_destructor; @@ -190,16 +212,21 @@ sub new { my %seek_and_destroy; sub _arm_global_destructor { - my $self = shift; - my $key = refaddr ($self); - $seek_and_destroy{$key} = $self; - weaken ($seek_and_destroy{$key}); + + # quick "garbage collection" pass - prevents the registry + # from slowly growing with a bunch of undef-valued keys + defined $seek_and_destroy{$_} or delete $seek_and_destroy{$_} + for keys %seek_and_destroy; + + weaken ( + $seek_and_destroy{ refaddr($_[0]) } = $_[0] + ); } END { local $?; # just in case the DBI destructor changes it somehow - # destroy just the object if not native to this process/thread + # destroy just the object if not native to this process $_->_verify_pid for (grep { defined $_ } values %seek_and_destroy @@ -210,23 +237,28 @@ sub new { # As per DBI's recommendation, DBIC disconnects all handles as # soon as possible (DBIC will reconnect only on demand from within # the thread) - for (values %seek_and_destroy) { - next unless $_; - $_->{_dbh_gen}++; # so that existing cursors will drop as well + my @instances = grep { defined $_ } values %seek_and_destroy; + %seek_and_destroy = (); + + for (@instances) { $_->_dbh(undef); $_->transaction_depth(0); $_->savepoints([]); + + # properly renumber existing refs + $_->_arm_global_destructor } } } sub DESTROY { - my $self = shift; + return if &detect_reinvoked_destructor; + $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; # some databases spew warnings on implicit disconnect local $SIG{__WARN__} = sub {}; - $self->_dbh(undef); + $_[0]->_dbh(undef); # this op is necessary, since the very last perl runtime statement # triggers a global destruction shootout, and the $SIG localization @@ -237,15 +269,14 @@ sub DESTROY { # handle pid changes correctly - do not destroy parent's connection sub _verify_pid { - my $self = shift; - my $pid = $self->_conn_pid; - if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) { + my $pid = $_[0]->_conn_pid; + + if( defined $pid and $pid != $$ and my $dbh = $_[0]->_dbh ) { $dbh->{InactiveDestroy} = 1; - $self->{_dbh_gen}++; - $self->_dbh(undef); - $self->transaction_depth(0); - $self->savepoints([]); + $_[0]->_dbh(undef); + $_[0]->transaction_depth(0); + $_[0]->savepoints([]); } return; @@ -325,8 +356,8 @@ for most DBDs. See L for details. =head3 DBIx::Class specific connection attributes -In addition to the standard L -L attributes, DBIx::Class recognizes +In addition to the standard L +L attributes, DBIx::Class recognizes the following connection options. These options can be mixed in with your other L connection attributes, or placed in a separate hashref (C<\%extra_attributes>) as shown above. @@ -593,23 +624,6 @@ sub connect_info { $info = $self->_normalize_connect_info($info) if ref $info eq 'ARRAY'; - for my $storage_opt (keys %{ $info->{storage_options} }) { - my $value = $info->{storage_options}{$storage_opt}; - - $self->$storage_opt($value); - } - - # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only - # the new set of options - $self->_sql_maker(undef); - $self->_sql_maker_opts({}); - - for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) { - my $value = $info->{sql_maker_options}{$sql_maker_opt}; - - $self->_sql_maker_opts->{$sql_maker_opt} = $value; - } - my %attrs = ( %{ $self->_default_dbi_connect_attributes || {} }, %{ $info->{attributes} || {} }, @@ -618,26 +632,68 @@ sub connect_info { my @args = @{ $info->{arguments} }; if (keys %attrs and ref $args[0] ne 'CODE') { - carp + carp_unique ( 'You provided explicit AutoCommit => 0 in your connection_info. ' . 'This is almost universally a bad idea (see the footnotes of ' . 'DBIx::Class::Storage::DBI for more info). If you still want to ' . 'do this you can set $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK} to disable ' . 'this warning.' - if ! $attrs{AutoCommit} and ! $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK}; + ) if ! $attrs{AutoCommit} and ! $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK}; push @args, \%attrs if keys %attrs; } + + # this is the authoritative "always an arrayref" thing fed to DBI->connect + # OR a single-element coderef-based $dbh factory $self->_dbi_connect_info(\@args); + # extract the individual storage options + for my $storage_opt (keys %{ $info->{storage_options} }) { + my $value = $info->{storage_options}{$storage_opt}; + + $self->$storage_opt($value); + } + + # Extract the individual sqlmaker options + # + # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only + # the new set of options + $self->_sql_maker(undef); + $self->_sql_maker_opts({}); + + for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) { + my $value = $info->{sql_maker_options}{$sql_maker_opt}; + + $self->_sql_maker_opts->{$sql_maker_opt} = $value; + } + # FIXME - dirty: - # save attributes them in a separate accessor so they are always + # save attributes in a separate accessor so they are always # introspectable, even in case of a CODE $dbhmaker $self->_dbic_connect_attributes (\%attrs); return $self->_connect_info; } +sub _dbi_connect_info { + my $self = shift; + + return $self->{_dbi_connect_info} = $_[0] + if @_; + + my $conninfo = $self->{_dbi_connect_info} || []; + + # last ditch effort to grab a DSN + if ( ! defined $conninfo->[0] and $ENV{DBI_DSN} ) { + my @new_conninfo = @$conninfo; + $new_conninfo[0] = $ENV{DBI_DSN}; + $conninfo = \@new_conninfo; + } + + return $conninfo; +} + + sub _normalize_connect_info { my ($self, $info_arg) = @_; my %info; @@ -773,37 +829,36 @@ Example: sub dbh_do { my $self = shift; - my $code = shift; - - my $dbh = $self->_get_dbh; - - return $self->$code($dbh, @_) - if ( $self->{_in_do_block} || $self->{transaction_depth} ); - - local $self->{_in_do_block} = 1; - - # take a ref instead of a copy, to preserve coderef @_ aliasing semantics - my $args = \@_; - - try { - $self->$code ($dbh, @$args); - } catch { - $self->throw_exception($_) if $self->connected; - - # We were not connected - reconnect and retry, but let any - # exception fall right through this time - carp "Retrying dbh_do($code) after catching disconnected exception: $_" - if $ENV{DBIC_STORAGE_RETRY_DEBUG}; + my $run_target = shift; # either a coderef or a method name - $self->_populate_dbh; - $self->$code($self->_dbh, @$args); - }; + # short circuit when we know there is no need for a runner + # + # FIXME - assumption may be wrong + # the rationale for the txn_depth check is that if this block is a part + # of a larger transaction, everything up to that point is screwed anyway + return $self->$run_target($self->_get_dbh, @_) + if $self->{_in_do_block} or $self->transaction_depth; + + # take a ref instead of a copy, to preserve @_ aliasing + # semantics within the coderef, but only if needed + # (pseudoforking doesn't like this trick much) + my $args = @_ ? \@_ : []; + + DBIx::Class::Storage::BlockRunner->new( + storage => $self, + wrap_txn => 0, + retry_handler => sub { + $_[0]->failed_attempt_count == 1 + and + ! $_[0]->storage->connected + }, + )->run(sub { + $self->$run_target ($self->_get_dbh, @$args ) + }); } sub txn_do { - # connects or reconnects on pid change, necessary to grab correct txn_depth - $_[0]->_get_dbh; - local $_[0]->{_in_do_block} = 1; + $_[0]->_get_dbh; # connects or reconnects on pid change, necessary to grab correct txn_depth shift->next::method(@_); } @@ -815,23 +870,20 @@ database is not in C mode. =cut sub disconnect { - my ($self) = @_; - - if( $self->_dbh ) { - my @actions; - push @actions, ( $self->on_disconnect_call || () ); - push @actions, $self->_parse_connect_do ('on_disconnect_do'); + if( my $dbh = $_[0]->_dbh ) { - $self->_do_connection_actions(disconnect_call_ => $_) for @actions; + $_[0]->_do_connection_actions(disconnect_call_ => $_) for ( + ( $_[0]->on_disconnect_call || () ), + $_[0]->_parse_connect_do ('on_disconnect_do') + ); # stops the "implicit rollback on disconnect" warning - $self->_exec_txn_rollback unless $self->_dbh_autocommit; + $_[0]->_exec_txn_rollback unless $_[0]->_dbh_autocommit; - %{ $self->_dbh->{CachedKids} } = (); - $self->_dbh->disconnect; - $self->_dbh(undef); - $self->{_dbh_gen}++; + %{ $dbh->{CachedKids} } = (); + $dbh->disconnect; + $_[0]->_dbh(undef); } } @@ -852,8 +904,8 @@ in MySQL's case disabled entirely. # Storage subclasses should override this sub with_deferred_fk_checks { - my ($self, $sub) = @_; - $sub->(); + #my ($self, $sub) = @_; + $_[1]->(); } =head2 connected @@ -873,40 +925,26 @@ answering, etc.) This method is used internally by L. =cut sub connected { - my $self = shift; - return 0 unless $self->_seems_connected; + return 0 unless $_[0]->_seems_connected; #be on the safe side - local $self->_dbh->{RaiseError} = 1; + local $_[0]->_dbh->{RaiseError} = 1; - return $self->_ping; + return $_[0]->_ping; } sub _seems_connected { - my $self = shift; - - $self->_verify_pid; + $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; - my $dbh = $self->_dbh - or return 0; - - return $dbh->FETCH('Active'); + ($_[0]->_dbh || return 0)->FETCH('Active'); } sub _ping { - my $self = shift; - - my $dbh = $self->_dbh or return 0; - - return $dbh->ping; + ($_[0]->_dbh || return 0)->ping; } sub ensure_connected { - my ($self) = @_; - - unless ($self->connected) { - $self->_populate_dbh; - } + $_[0]->connected || ( $_[0]->_populate_dbh && 1 ); } =head2 dbh @@ -920,26 +958,26 @@ instead. =cut sub dbh { - my ($self) = @_; - - if (not $self->_dbh) { - $self->_populate_dbh; - } else { - $self->ensure_connected; - } - return $self->_dbh; + # maybe save a ping call + $_[0]->_dbh + ? ( $_[0]->ensure_connected and $_[0]->_dbh ) + : $_[0]->_populate_dbh + ; } # this is the internal "get dbh or connect (don't check)" method sub _get_dbh { - my $self = shift; - $self->_verify_pid; - $self->_populate_dbh unless $self->_dbh; - return $self->_dbh; + $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; + $_[0]->_dbh || $_[0]->_populate_dbh; } +# *DELIBERATELY* not a setter (for the time being) +# Too intertwined with everything else for any kind of sanity sub sql_maker { - my ($self) = @_; + my $self = shift; + + $self->throw_exception('sql_maker() is not a setter method') if @_; + unless ($self->_sql_maker) { my $sql_maker_class = $self->sql_maker_class; @@ -951,13 +989,13 @@ sub sql_maker { || do { my $s_class = (ref $self) || $self; - carp ( + carp_unique ( "Your storage class ($s_class) does not set sql_limit_dialect and you " . 'have not supplied an explicit limit_dialect in your connection_info. ' . 'DBIC will attempt to use the GenericSubQ dialect, which works on most ' . 'databases but can be (and often is) painfully slow. ' - . "Please file an RT ticket against '$s_class' ." - ); + . "Please file an RT ticket against '$s_class'" + ) if $self->_dbi_connect_info->[0]; 'GenericSubQ'; } @@ -968,7 +1006,7 @@ sub sql_maker { if ($opts{quote_names}) { $quote_char = (delete $opts{quote_char}) || $self->sql_quote_char || do { my $s_class = (ref $self) || $self; - carp ( + carp_unique ( "You requested 'quote_names' but your storage class ($s_class) does " . 'not explicitly define a default sql_quote_char and you have not ' . 'supplied a quote_char as part of your connection_info. DBIC will ' @@ -999,33 +1037,35 @@ sub _rebless {} sub _init {} sub _populate_dbh { - my ($self) = @_; - my @info = @{$self->_dbi_connect_info || []}; - $self->_dbh(undef); # in case ->connected failed we might get sent here - $self->_dbh_details({}); # reset everything we know + $_[0]->_dbh(undef); # in case ->connected failed we might get sent here + + $_[0]->_dbh_details({}); # reset everything we know - $self->_dbh($self->_connect(@info)); + # FIXME - this needs reenabling with the proper "no reset on same DSN" check + #$_[0]->_sql_maker(undef); # this may also end up being different - $self->_conn_pid($$) if $^O ne 'MSWin32'; # on win32 these are in fact threads + $_[0]->_dbh($_[0]->_connect); - $self->_determine_driver; + $_[0]->_conn_pid($$) unless DBIx::Class::_ENV_::BROKEN_FORK; # on win32 these are in fact threads + + $_[0]->_determine_driver; # Always set the transaction depth on connect, since # there is no transaction in progress by definition - $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; + $_[0]->{transaction_depth} = $_[0]->_dbh_autocommit ? 0 : 1; + + $_[0]->_run_connection_actions unless $_[0]->{_in_determine_driver}; - $self->_run_connection_actions unless $self->{_in_determine_driver}; + $_[0]->_dbh; } sub _run_connection_actions { - my $self = shift; - my @actions; - - push @actions, ( $self->on_connect_call || () ); - push @actions, $self->_parse_connect_do ('on_connect_do'); - $self->_do_connection_actions(connect_call_ => $_) for @actions; + $_[0]->_do_connection_actions(connect_call_ => $_) for ( + ( $_[0]->on_connect_call || () ), + $_[0]->_parse_connect_do ('on_connect_do'), + ); } @@ -1075,7 +1115,16 @@ sub _server_info { $info = {}; - my $server_version = try { $self->_get_server_version }; + my $server_version = try { + $self->_get_server_version + } catch { + # driver determination *may* use this codepath + # in which case we must rethrow + $self->throw_exception($_) if $self->{_in_determine_driver}; + + # $server_version on failure + undef; + }; if (defined $server_version) { $info->{dbms_version} = $server_version; @@ -1107,13 +1156,92 @@ sub _server_info { } sub _get_server_version { - shift->_dbh_get_info(18); + shift->_dbh_get_info('SQL_DBMS_VER'); } sub _dbh_get_info { my ($self, $info) = @_; - return try { $self->_get_dbh->get_info($info) } || undef; + if ($info =~ /[^0-9]/) { + require DBI::Const::GetInfoType; + $info = $DBI::Const::GetInfoType::GetInfoType{$info}; + $self->throw_exception("Info type '$_[1]' not provided by DBI::Const::GetInfoType") + unless defined $info; + } + + $self->_get_dbh->get_info($info); +} + +sub _describe_connection { + require DBI::Const::GetInfoReturn; + + my $self = shift; + + my $drv; + try { + $drv = $self->_extract_driver_from_connect_info; + $self->ensure_connected; + }; + + $drv = "DBD::$drv" if $drv; + + my $res = { + DBIC_DSN => $self->_dbi_connect_info->[0], + DBI_VER => DBI->VERSION, + DBIC_VER => DBIx::Class->VERSION, + DBIC_DRIVER => ref $self, + $drv ? ( + DBD => $drv, + DBD_VER => try { $drv->VERSION }, + ) : (), + }; + + # try to grab data even if we never managed to connect + # will cover us in cases of an oddly broken half-connect + for my $inf ( + #keys %DBI::Const::GetInfoType::GetInfoType, + qw/ + SQL_CURSOR_COMMIT_BEHAVIOR + SQL_CURSOR_ROLLBACK_BEHAVIOR + SQL_CURSOR_SENSITIVITY + SQL_DATA_SOURCE_NAME + SQL_DBMS_NAME + SQL_DBMS_VER + SQL_DEFAULT_TXN_ISOLATION + SQL_DM_VER + SQL_DRIVER_NAME + SQL_DRIVER_ODBC_VER + SQL_DRIVER_VER + SQL_EXPRESSIONS_IN_ORDERBY + SQL_GROUP_BY + SQL_IDENTIFIER_CASE + SQL_IDENTIFIER_QUOTE_CHAR + SQL_MAX_CATALOG_NAME_LEN + SQL_MAX_COLUMN_NAME_LEN + SQL_MAX_IDENTIFIER_LEN + SQL_MAX_TABLE_NAME_LEN + SQL_MULTIPLE_ACTIVE_TXN + SQL_MULT_RESULT_SETS + SQL_NEED_LONG_DATA_LEN + SQL_NON_NULLABLE_COLUMNS + SQL_ODBC_VER + SQL_QUALIFIER_NAME_SEPARATOR + SQL_QUOTED_IDENTIFIER_CASE + SQL_TXN_CAPABLE + SQL_TXN_ISOLATION_OPTION + / + ) { + # some drivers barf on things they do not know about instead + # of returning undef + my $v = try { $self->_dbh_get_info($inf) }; + next unless defined $v; + + #my $key = sprintf( '%s(%s)', $inf, $DBI::Const::GetInfoType::GetInfoType{$inf} ); + my $expl = DBI::Const::GetInfoReturn::Explain($inf, $v); + $res->{$inf} = DBI::Const::GetInfoReturn::Format($inf, $v) . ( $expl ? " ($expl)" : '' ); + } + + $res; } sub _determine_driver { @@ -1128,21 +1256,9 @@ sub _determine_driver { if ($self->_dbh) { # we are connected $driver = $self->_dbh->{Driver}{Name}; $started_connected = 1; - } else { - # if connect_info is a CODEREF, we have no choice but to connect - if (ref $self->_dbi_connect_info->[0] && - reftype $self->_dbi_connect_info->[0] eq 'CODE') { - $self->_populate_dbh; - $driver = $self->_dbh->{Driver}{Name}; - } - else { - # try to use dsn to not require being connected, the driver may still - # force a connection in _rebless to determine version - # (dsn may not be supplied at all if all we do is make a mock-schema) - my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || ''; - ($driver) = $dsn =~ /dbi:([^:]+):/i; - $driver ||= $ENV{DBI_DRIVER}; - } + } + else { + $driver = $self->_extract_driver_from_connect_info; } if ($driver) { @@ -1152,6 +1268,18 @@ sub _determine_driver { bless $self, $storage_class; $self->_rebless(); } + else { + $self->_warn_undetermined_driver( + 'This version of DBIC does not yet seem to supply a driver for ' + . "your particular RDBMS and/or connection method ('$driver')." + ); + } + } + else { + $self->_warn_undetermined_driver( + 'Unable to extract a driver name from connect info - this ' + . 'should not have happened.' + ); } } @@ -1159,6 +1287,15 @@ sub _determine_driver { Class::C3->reinitialize() if DBIx::Class::_ENV_::OLD_MRO; + if ($self->can('source_bind_attributes')) { + $self->throw_exception( + "Your storage subclass @{[ ref $self ]} provides (or inherits) the method " + . 'source_bind_attributes() for which support has been removed as of Jan 2013. ' + . 'If you are not sure how to proceed please contact the development team via ' + . DBIx::Class::_ENV_::HELP_URL + ); + } + $self->_init; # run driver-specific initializations $self->_run_connection_actions @@ -1166,6 +1303,73 @@ sub _determine_driver { } } +sub _extract_driver_from_connect_info { + my $self = shift; + + my $drv; + + # if connect_info is a CODEREF, we have no choice but to connect + if ( + ref $self->_dbi_connect_info->[0] + and + reftype $self->_dbi_connect_info->[0] eq 'CODE' + ) { + $self->_populate_dbh; + $drv = $self->_dbh->{Driver}{Name}; + } + else { + # try to use dsn to not require being connected, the driver may still + # force a connection later in _rebless to determine version + # (dsn may not be supplied at all if all we do is make a mock-schema) + ($drv) = ($self->_dbi_connect_info->[0] || '') =~ /^dbi:([^:]+):/i; + $drv ||= $ENV{DBI_DRIVER}; + } + + return $drv; +} + +sub _determine_connector_driver { + my ($self, $conn) = @_; + + my $dbtype = $self->_dbh_get_info('SQL_DBMS_NAME'); + + if (not $dbtype) { + $self->_warn_undetermined_driver( + 'Unable to retrieve RDBMS type (SQL_DBMS_NAME) of the engine behind your ' + . "$conn connector - this should not have happened." + ); + return; + } + + $dbtype =~ s/\W/_/gi; + + my $subclass = "DBIx::Class::Storage::DBI::${conn}::${dbtype}"; + return if $self->isa($subclass); + + if ($self->load_optional_class($subclass)) { + bless $self, $subclass; + $self->_rebless; + } + else { + $self->_warn_undetermined_driver( + 'This version of DBIC does not yet seem to supply a driver for ' + . "your particular RDBMS and/or connection method ('$conn/$dbtype')." + ); + } +} + +sub _warn_undetermined_driver { + my ($self, $msg) = @_; + + require Data::Dumper::Concise; + + carp_once ($msg . ' While we will attempt to continue anyway, the results ' + . 'are likely to be underwhelming. Please upgrade DBIC, and if this message ' + . "does not go away, file a bugreport including the following info:\n" + . Data::Dumper::Concise::Dumper($self->_describe_connection) + ); +} + sub _do_connection_actions { my $self = shift; my $method_prefix = shift; @@ -1199,7 +1403,19 @@ sub disconnect_call_do_sql { $self->_do_query(@_); } -# override in db-specific backend when necessary +=head2 connect_call_datetime_setup + +A no-op stub method, provided so that one can always safely supply the +L + + on_connect_call => 'datetime_setup' + +This way one does not need to know in advance whether the underlying +storage requires any sort of hand-holding when dealing with calendar +data. + +=cut + sub connect_call_datetime_setup { 1 } sub _do_query { @@ -1219,40 +1435,71 @@ sub _do_query { my $attrs = shift @do_args; my @bind = map { [ undef, $_ ] } @do_args; - $self->_query_start($sql, \@bind); - $self->_get_dbh->do($sql, $attrs, @do_args); - $self->_query_end($sql, \@bind); + $self->dbh_do(sub { + $_[0]->_query_start($sql, \@bind); + $_[1]->do($sql, $attrs, @do_args); + $_[0]->_query_end($sql, \@bind); + }); } return $self; } sub _connect { - my ($self, @info) = @_; + my $self = shift; + + my $info = $self->_dbi_connect_info; - $self->throw_exception("You failed to provide any connection info") - if !@info; + $self->throw_exception("You did not provide any connection_info") + unless defined $info->[0]; my ($old_connect_via, $dbh); - if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) { - $old_connect_via = $DBI::connect_via; - $DBI::connect_via = 'connect'; - } + local $DBI::connect_via = 'connect' if $INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}; + + # this odd anonymous coderef dereference is in fact really + # necessary to avoid the unwanted effect described in perl5 + # RT#75792 + # + # in addition the coderef itself can't reside inside the try{} block below + # as it somehow triggers a leak under perl -d + my $dbh_error_handler_installer = sub { + weaken (my $weak_self = $_[0]); + + # the coderef is blessed so we can distinguish it from externally + # supplied handles (which must be preserved) + $_[1]->{HandleError} = bless sub { + if ($weak_self) { + $weak_self->throw_exception("DBI Exception: $_[0]"); + } + else { + # the handler may be invoked by something totally out of + # the scope of DBIC + DBIx::Class::Exception->throw("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]"); + } + }, '__DBIC__DBH__ERROR__HANDLER__'; + }; try { - if(ref $info[0] eq 'CODE') { - $dbh = $info[0]->(); + if(ref $info->[0] eq 'CODE') { + $dbh = $info->[0]->(); } else { require DBI; - $dbh = DBI->connect(@info); + $dbh = DBI->connect(@$info); } - if (!$dbh) { - die $DBI::errstr; - } + die $DBI::errstr unless $dbh; + die sprintf ("%s fresh DBI handle with a *false* 'Active' attribute. " + . 'This handle is disconnected as far as DBIC is concerned, and we can ' + . 'not continue', + ref $info->[0] eq 'CODE' + ? "Connection coderef $info->[0] returned a" + : 'DBI->connect($schema->storage->connect_info) resulted in a' + ) unless $dbh->FETCH('Active'); + + # sanity checks unless asked otherwise unless ($self->unsafe) { $self->throw_exception( @@ -1263,7 +1510,7 @@ sub _connect { # Default via _default_dbi_connect_attributes is 1, hence it was an explicit # request, or an external handle. Complain and set anyway unless ($dbh->{RaiseError}) { - carp( ref $info[0] eq 'CODE' + carp( ref $info->[0] eq 'CODE' ? "The 'RaiseError' of the externally supplied DBI handle is set to false. " ."DBIx::Class will toggle it back to true, unless the 'unsafe' connect " @@ -1276,53 +1523,29 @@ sub _connect { $dbh->{RaiseError} = 1; } - # this odd anonymous coderef dereference is in fact really - # necessary to avoid the unwanted effect described in perl5 - # RT#75792 - sub { - my $weak_self = $_[0]; - weaken $weak_self; - - # the coderef is blessed so we can distinguish it from externally - # supplied handles (which must be preserved) - $_[1]->{HandleError} = bless sub { - if ($weak_self) { - $weak_self->throw_exception("DBI Exception: $_[0]"); - } - else { - # the handler may be invoked by something totally out of - # the scope of DBIC - DBIx::Class::Exception->throw("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]"); - } - }, '__DBIC__DBH__ERROR__HANDLER__'; - }->($self, $dbh); + $dbh_error_handler_installer->($self, $dbh); } } catch { $self->throw_exception("DBI Connection failed: $_") - } - finally { - $DBI::connect_via = $old_connect_via if $old_connect_via; }; $self->_dbh_autocommit($dbh->{AutoCommit}); - $dbh; + return $dbh; } sub txn_begin { - my $self = shift; - # this means we have not yet connected and do not know the AC status # (e.g. coderef $dbh), need a full-fledged connection check - if (! defined $self->_dbh_autocommit) { - $self->ensure_connected; + if (! defined $_[0]->_dbh_autocommit) { + $_[0]->ensure_connected; } # Otherwise simply connect or re-connect on pid changes else { - $self->_get_dbh; + $_[0]->_get_dbh; } - $self->next::method(@_); + shift->next::method(@_); } sub _exec_txn_begin { @@ -1343,9 +1566,8 @@ sub _exec_txn_begin { sub txn_commit { my $self = shift; - $self->_verify_pid if $self->_dbh; $self->throw_exception("Unable to txn_commit() on a disconnected storage") - unless $self->_dbh; + unless $self->_seems_connected; # esoteric case for folks using external $dbh handles if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) { @@ -1374,9 +1596,8 @@ sub _exec_txn_commit { sub txn_rollback { my $self = shift; - $self->_verify_pid if $self->_dbh; $self->throw_exception("Unable to txn_rollback() on a disconnected storage") - unless $self->_dbh; + unless $self->_seems_connected; # esoteric case for folks using external $dbh handles if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) { @@ -1402,17 +1623,12 @@ sub _exec_txn_rollback { shift->_dbh->rollback; } -# generate some identical methods -for my $meth (qw/svp_begin svp_release svp_rollback/) { - no strict qw/refs/; - *{__PACKAGE__ ."::$meth"} = subname $meth => sub { - my $self = shift; - $self->_verify_pid if $self->_dbh; - $self->throw_exception("Unable to $meth() on a disconnected storage") - unless $self->_dbh; - $self->next::method(@_); - }; -} +# generate the DBI-specific stubs, which then fallback to ::Storage proper +quote_sub __PACKAGE__ . "::$_" => sprintf (<<'EOS', $_) for qw(svp_begin svp_release svp_rollback); + $_[0]->throw_exception('Unable to %s() on a disconnected storage') + unless $_[0]->_seems_connected; + shift->next::method(@_); +EOS # This used to be the top-half of _execute. It was split out to make it # easier to override in NoBindVars without duping the rest. It takes up @@ -1425,78 +1641,130 @@ sub _prep_for_execute { sub _gen_sql_bind { my ($self, $op, $ident, $args) = @_; - my ($sql, @bind) = $self->sql_maker->$op( - blessed($ident) ? $ident->from : $ident, - @$args, - ); - - my (@final_bind, $colinfos); - my $resolve_bindinfo = sub { - $colinfos ||= $self->_resolve_column_info($ident); - if (my $col = $_[1]->{dbic_colname}) { - $_[1]->{sqlt_datatype} ||= $colinfos->{$col}{data_type} - if $colinfos->{$col}{data_type}; - $_[1]->{sqlt_size} ||= $colinfos->{$col}{size} - if $colinfos->{$col}{size}; - } - $_[1]; - }; - - for my $e (@{$args->[2]{bind}||[]}, @bind) { - push @final_bind, [ do { - if (ref $e ne 'ARRAY') { - ({}, $e) - } - elsif (! defined $e->[0]) { - ({}, $e->[1]) - } - elsif (ref $e->[0] eq 'HASH') { - ( - (first { $e->[0]{$_} } qw/dbd_attrs sqlt_datatype/) ? $e->[0] : $self->$resolve_bindinfo($e->[0]), - $e->[1] - ) - } - elsif (ref $e->[0] eq 'SCALAR') { - ( { sqlt_datatype => ${$e->[0]} }, $e->[1] ) - } - else { - ( $self->$resolve_bindinfo({ dbic_colname => $e->[0] }), $e->[1] ) - } - }]; + my ($colinfos, $from); + if ( blessed($ident) ) { + $from = $ident->from; + $colinfos = $ident->columns_info; } - if ($op eq 'select' - && first { blessed($_->[1]) && $_->[1]->isa('DateTime') } @final_bind) { + my ($sql, $bind); + ($sql, @$bind) = $self->sql_maker->$op( ($from || $ident), @$args ); + + $bind = $self->_resolve_bindattrs( + $ident, [ @{$args->[2]{bind}||[]}, @$bind ], $colinfos + ); + if ( + ! $ENV{DBIC_DT_SEARCH_OK} + and + $op eq 'select' + and + first { + length ref $_->[1] + and + blessed($_->[1]) + and + $_->[1]->isa('DateTime') + } @$bind + ) { carp_unique 'DateTime objects passed to search() are not supported ' . 'properly (InflateColumn::DateTime formats and settings are not ' . 'respected.) See "Formatting DateTime objects in queries" in ' - . 'DBIx::Class::Manual::Cookbook'; + . 'DBIx::Class::Manual::Cookbook. To disable this warning for good ' + . 'set $ENV{DBIC_DT_SEARCH_OK} to true' } - ($sql, \@final_bind); + return( $sql, $bind ); } -sub _format_for_trace { - #my ($self, $bind) = @_; +sub _resolve_bindattrs { + my ($self, $ident, $bind, $colinfos) = @_; - ### Turn @bind from something like this: - ### ( [ "artist", 1 ], [ \%attrs, 3 ] ) - ### to this: - ### ( "'1'", "'3'" ) + my $resolve_bindinfo = sub { + #my $infohash = shift; - map { - defined( $_ && $_->[1] ) - ? qq{'$_->[1]'} - : q{NULL} - } @{$_[1] || []}; -} + $colinfos ||= { %{ $self->_resolve_column_info($ident) } }; -sub _query_start { - my ( $self, $sql, $bind ) = @_; + my $ret; + if (my $col = $_[0]->{dbic_colname}) { + $ret = { %{$_[0]} }; - $self->debugobj->query_start( $sql, $self->_format_for_trace($bind) ) - if $self->debug; + $ret->{sqlt_datatype} ||= $colinfos->{$col}{data_type} + if $colinfos->{$col}{data_type}; + + $ret->{sqlt_size} ||= $colinfos->{$col}{size} + if $colinfos->{$col}{size}; + } + + $ret || $_[0]; + }; + + return [ map { + my $resolved = + ( ref $_ ne 'ARRAY' or @$_ != 2 ) ? [ {}, $_ ] + : ( ! defined $_->[0] ) ? [ {}, $_->[1] ] + : (ref $_->[0] eq 'HASH') ? [( + ! keys %{$_->[0]} + or + exists $_->[0]{dbd_attrs} + or + $_->[0]{sqlt_datatype} + ) ? $_->[0] + : $resolve_bindinfo->($_->[0]) + , $_->[1] + ] + : (ref $_->[0] eq 'SCALAR') ? [ { sqlt_datatype => ${$_->[0]} }, $_->[1] ] + : [ $resolve_bindinfo->( + { dbic_colname => $_->[0] } + ), $_->[1] ] + ; + + if ( + ! exists $resolved->[0]{dbd_attrs} + and + ! $resolved->[0]{sqlt_datatype} + and + length ref $resolved->[1] + and + ! is_plain_value $resolved->[1] + ) { + require Data::Dumper; + local $Data::Dumper::Maxdepth = 1; + local $Data::Dumper::Terse = 1; + local $Data::Dumper::Useqq = 1; + local $Data::Dumper::Indent = 0; + local $Data::Dumper::Pad = ' '; + $self->throw_exception( + 'You must supply a datatype/bindtype (see DBIx::Class::ResultSet/DBIC BIND VALUES) ' + . 'for non-scalar value '. Data::Dumper::Dumper ($resolved->[1]) + ); + } + + $resolved; + + } @$bind ]; +} + +sub _format_for_trace { + #my ($self, $bind) = @_; + + ### Turn @bind from something like this: + ### ( [ "artist", 1 ], [ \%attrs, 3 ] ) + ### to this: + ### ( "'1'", "'3'" ) + + map { + defined( $_ && $_->[1] ) + ? qq{'$_->[1]'} + : q{NULL} + } @{$_[1] || []}; +} + +sub _query_start { + my ( $self, $sql, $bind ) = @_; + + $self->debugobj->query_start( $sql, $self->_format_for_trace($bind) ) + if $self->debug; } sub _query_end { @@ -1506,56 +1774,29 @@ sub _query_end { if $self->debug; } -my $sba_compat; sub _dbi_attrs_for_bind { - my ($self, $ident, $bind) = @_; + #my ($self, $ident, $bind) = @_; - if (! defined $sba_compat) { - $self->_determine_driver; - $sba_compat = $self->can('source_bind_attributes') == \&source_bind_attributes - ? 0 - : 1 - ; - } + return [ map { - my $sba_attrs; - if ($sba_compat) { - my $class = ref $self; - carp_unique ( - "The source_bind_attributes() override in $class relies on a deprecated codepath. " - .'You are strongly advised to switch your code to override bind_attribute_by_datatype() ' - .'instead. This legacy compat shim will also disappear some time before DBIC 0.09' - ); + exists $_->{dbd_attrs} ? $_->{dbd_attrs} - my $sba_attrs = $self->source_bind_attributes - } + : ! $_->{sqlt_datatype} ? undef - my @attrs; + : do { - for (map { $_->[0] } @$bind) { - push @attrs, do { - if (exists $_->{dbd_attrs}) { - $_->{dbd_attrs} - } - elsif($_->{sqlt_datatype}) { - # cache the result in the dbh_details hash, as it can not change unless - # we connect to something else - my $cache = $self->_dbh_details->{_datatype_map_cache} ||= {}; - if (not exists $cache->{$_->{sqlt_datatype}}) { - $cache->{$_->{sqlt_datatype}} = $self->bind_attribute_by_data_type($_->{sqlt_datatype}) || undef; - } - $cache->{$_->{sqlt_datatype}}; - } - elsif ($sba_attrs and $_->{dbic_colname}) { - $sba_attrs->{$_->{dbic_colname}} || undef; - } - else { - undef; # always push something at this position - } - } - } + # cache the result in the dbh_details hash, as it (usually) can not change + # unless we connect to something else + # FIXME: for the time being Oracle is an exception, pending a rewrite of + # the LOB storage + my $cache = $_[0]->_dbh_details->{_datatype_map_cache} ||= {}; + + $cache->{$_->{sqlt_datatype}} = $_[0]->bind_attribute_by_data_type($_->{sqlt_datatype}) + if ! exists $cache->{$_->{sqlt_datatype}}; - return \@attrs; + $cache->{$_->{sqlt_datatype}}; + + } } map { $_->[0] } @{$_[2]} ]; } sub _execute { @@ -1563,19 +1804,68 @@ sub _execute { my ($sql, $bind) = $self->_prep_for_execute($op, $ident, \@args); - shift->dbh_do( # retry over disconnects - '_dbh_execute', + # not even a PID check - we do not care about the state of the _dbh. + # All we need is to get the appropriate drivers loaded if they aren't + # already so that the assumption in ad7c50fc26e holds + $self->_populate_dbh unless $self->_dbh; + + $self->dbh_do( _dbh_execute => # retry over disconnects $sql, $bind, - $self->_dbi_attrs_for_bind($ident, $bind) + $self->_dbi_attrs_for_bind($ident, $bind), ); } sub _dbh_execute { - my ($self, undef, $sql, $bind, $bind_attrs) = @_; + my ($self, $dbh, $sql, $bind, $bind_attrs) = @_; $self->_query_start( $sql, $bind ); - my $sth = $self->_sth($sql); + + my $sth = $self->_bind_sth_params( + $self->_prepare_sth($dbh, $sql), + $bind, + $bind_attrs, + ); + + # Can this fail without throwing an exception anyways??? + my $rv = $sth->execute(); + $self->throw_exception( + $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...' + ) if !$rv; + + $self->_query_end( $sql, $bind ); + + return (wantarray ? ($rv, $sth, @$bind) : $rv); +} + +sub _prepare_sth { + my ($self, $dbh, $sql) = @_; + + # 3 is the if_active parameter which avoids active sth re-use + my $sth = $self->disable_sth_caching + ? $dbh->prepare($sql) + : $dbh->prepare_cached($sql, {}, 3); + + # XXX You would think RaiseError would make this impossible, + # but apparently that's not true :( + $self->throw_exception( + $dbh->errstr + || + sprintf( "\$dbh->prepare() of '%s' through %s failed *silently* without " + .'an exception and/or setting $dbh->errstr', + length ($sql) > 20 + ? substr($sql, 0, 20) . '...' + : $sql + , + 'DBD::' . $dbh->{Driver}{Name}, + ) + ) if !$sth; + + $sth; +} + +sub _bind_sth_params { + my ($self, $sth, $bind, $bind_attrs) = @_; for my $i (0 .. $#$bind) { if (ref $bind->[$i][1] eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts @@ -1587,32 +1877,26 @@ sub _dbh_execute { ); } else { + # FIXME SUBOPTIMAL - DBI needs fixing to always stringify regardless of DBD + my $v = ( length ref $bind->[$i][1] and is_plain_value $bind->[$i][1] ) + ? "$bind->[$i][1]" + : $bind->[$i][1] + ; + $sth->bind_param( $i + 1, - (ref $bind->[$i][1] and overload::Method($bind->[$i][1], '""')) - ? "$bind->[$i][1]" - : $bind->[$i][1] - , + # The temp-var is CRUCIAL - DO NOT REMOVE IT, breaks older DBD::SQLite RT#79576 + $v, $bind_attrs->[$i], ); } } - # Can this fail without throwing an exception anyways??? - my $rv = $sth->execute(); - $self->throw_exception( - $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...' - ) if !$rv; - - $self->_query_end( $sql, $bind ); - - return (wantarray ? ($rv, $sth, @$bind) : $rv); + $sth; } sub _prefetch_autovalues { - my ($self, $source, $to_insert) = @_; - - my $colinfo = $source->columns_info; + my ($self, $source, $colinfo, $to_insert) = @_; my %values; for my $col (keys %$colinfo) { @@ -1622,9 +1906,7 @@ sub _prefetch_autovalues { ( ! exists $to_insert->{$col} or - ref $to_insert->{$col} eq 'SCALAR' - or - (ref $to_insert->{$col} eq 'REF' and ref ${$to_insert->{$col}} eq 'ARRAY') + is_literal_value($to_insert->{$col}) ) ) { $values{$col} = $self->_sequence_fetch( @@ -1642,22 +1924,28 @@ sub _prefetch_autovalues { sub insert { my ($self, $source, $to_insert) = @_; - my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert); + my $col_infos = $source->columns_info; + + my $prefetched_values = $self->_prefetch_autovalues($source, $col_infos, $to_insert); # fuse the values, but keep a separate list of prefetched_values so that # they can be fused once again with the final return $to_insert = { %$to_insert, %$prefetched_values }; - my $col_infos = $source->columns_info; + # FIXME - we seem to assume undef values as non-supplied. This is wrong. + # Investigate what does it take to s/defined/exists/ my %pcols = map { $_ => 1 } $source->primary_columns; - my %retrieve_cols; + my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col); for my $col ($source->columns) { + if ($col_infos->{$col}{is_auto_increment}) { + $autoinc_supplied ||= 1 if defined $to_insert->{$col}; + $retrieve_autoinc_col ||= $col unless $autoinc_supplied; + } + # nothing to retrieve when explicit values are supplied - next if (defined $to_insert->{$col} and ! ( - ref $to_insert->{$col} eq 'SCALAR' - or - (ref $to_insert->{$col} eq 'REF' and ref ${$to_insert->{$col}} eq 'ARRAY') - )); + next if ( + defined $to_insert->{$col} and ! is_literal_value($to_insert->{$col}) + ); # the 'scalar keys' is a trick to preserve the ->columns declaration order $retrieve_cols{$col} = scalar keys %retrieve_cols if ( @@ -1667,6 +1955,9 @@ sub insert { ); }; + local $self->{_autoinc_supplied_for_op} = $autoinc_supplied; + local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col; + my ($sqla_opts, @ir_container); if (%retrieve_cols and $self->_use_insert_returning) { $sqla_opts->{returning_container} = \@ir_container @@ -1703,7 +1994,7 @@ sub insert { unless (@pri_values == @missing_pri); @returned_cols{@missing_pri} = @pri_values; - delete $retrieve_cols{$_} for @missing_pri; + delete @retrieve_cols{@missing_pri}; } # if there is more left to pull @@ -1730,140 +2021,211 @@ sub insert { } sub insert_bulk { + carp_unique( + 'insert_bulk() should have never been exposed as a public method and ' + . 'calling it is depecated as of Aug 2014. If you believe having a genuine ' + . 'use for this method please contact the development team via ' + . DBIx::Class::_ENV_::HELP_URL + ); + + return '0E0' unless @{$_[3]||[]}; + + shift->_insert_bulk(@_); +} + +sub _insert_bulk { my ($self, $source, $cols, $data) = @_; - # FIXME - perhaps this is not even needed? does DBI stringify? + $self->throw_exception('Calling _insert_bulk without a dataset to process makes no sense') + unless @{$data||[]}; + + my $colinfos = $source->columns_info($cols); + + local $self->{_autoinc_supplied_for_op} = + (grep { $_->{is_auto_increment} } values %$colinfos) + ? 1 + : 0 + ; + + # get a slice type index based on first row of data + # a "column" in this context may refer to more than one bind value + # e.g. \[ '?, ?', [...], [...] ] # - # forcibly stringify whatever is stringifiable - for my $r (0 .. $#$data) { - for my $c (0 .. $#{$data->[$r]}) { - $data->[$r][$c] = "$data->[$r][$c]" - if ( ref $data->[$r][$c] and overload::Method($data->[$r][$c], '""') ); + # construct the value type index - a description of values types for every + # per-column slice of $data: + # + # nonexistent - nonbind literal + # 0 - regular value + # [] of bindattrs - resolved attribute(s) of bind(s) passed via literal+bind \[] combo + # + # also construct the column hash to pass to the SQL generator. For plain + # (non literal) values - convert the members of the first row into a + # literal+bind combo, with extra positional info in the bind attr hashref. + # This will allow us to match the order properly, and is so contrived + # because a user-supplied literal/bind (or something else specific to a + # resultsource and/or storage driver) can inject extra binds along the + # way, so one can't rely on "shift positions" ordering at all. Also we + # can't just hand SQLA a set of some known "values" (e.g. hashrefs that + # can be later matched up by address), because we want to supply a real + # value on which perhaps e.g. datatype checks will be performed + my ($proto_data, $serialized_bind_type_by_col_idx); + for my $col_idx (0..$#$cols) { + my $colname = $cols->[$col_idx]; + if (ref $data->[0][$col_idx] eq 'SCALAR') { + # no bind value at all - no type + + $proto_data->{$colname} = $data->[0][$col_idx]; + } + elsif (ref $data->[0][$col_idx] eq 'REF' and ref ${$data->[0][$col_idx]} eq 'ARRAY' ) { + # repack, so we don't end up mangling the original \[] + my ($sql, @bind) = @${$data->[0][$col_idx]}; + + # normalization of user supplied stuff + my $resolved_bind = $self->_resolve_bindattrs( + $source, \@bind, $colinfos, + ); + + # store value-less (attrs only) bind info - we will be comparing all + # supplied binds against this for sanity + $serialized_bind_type_by_col_idx->{$col_idx} = serialize [ map { $_->[0] } @$resolved_bind ]; + + $proto_data->{$colname} = \[ $sql, map { [ + # inject slice order to use for $proto_bind construction + { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $col_idx, _literal_bind_subindex => $_+1 } + => + $resolved_bind->[$_][1] + ] } (0 .. $#bind) + ]; + } + else { + $serialized_bind_type_by_col_idx->{$col_idx} = undef; + + $proto_data->{$colname} = \[ '?', [ + { dbic_colname => $colname, _bind_data_slice_idx => $col_idx } + => + $data->[0][$col_idx] + ] ]; } } - # check the data for consistency - # report a sensible error on bad data + my ($sql, $proto_bind) = $self->_prep_for_execute ( + 'insert', + $source, + [ $proto_data ], + ); + + if (! @$proto_bind and keys %$serialized_bind_type_by_col_idx) { + # if the bindlist is empty and we had some dynamic binds, this means the + # storage ate them away (e.g. the NoBindVars component) and interpolated + # them directly into the SQL. This obviously can't be good for multi-inserts + $self->throw_exception('Unable to invoke fast-path insert without storage placeholder support'); + } + + # sanity checks + # FIXME - devise a flag "no babysitting" or somesuch to shut this off # - # also create a list of dynamic binds (ones that will be changing - # for each row) - my $dyn_bind_idx; - for my $col_idx (0..$#$cols) { + # use an error reporting closure for convenience (less to pass) + my $bad_slice_report_cref = sub { + my ($msg, $r_idx, $c_idx) = @_; + $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", + $msg, + $cols->[$c_idx], + do { + require Data::Dumper::Concise; + local $Data::Dumper::Maxdepth = 5; + Data::Dumper::Concise::Dumper ({ + map { $cols->[$_] => + $data->[$r_idx][$_] + } 0..$#$cols + }), + } + ); + }; - # the first "row" is used as a point of reference + for my $col_idx (0..$#$cols) { my $reference_val = $data->[0][$col_idx]; - my $is_literal = ref $reference_val eq 'SCALAR'; - my $is_literal_bind = ( !$is_literal and ( - ref $reference_val eq 'REF' - and - ref $$reference_val eq 'ARRAY' - ) ); - - $dyn_bind_idx->{$col_idx} = 1 - if (!$is_literal and !$is_literal_bind); - - # use a closure for convenience (less to pass) - my $bad_slice = sub { - my ($msg, $slice_idx) = @_; - $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", - $msg, - $cols->[$col_idx], - do { - require Data::Dumper::Concise; - local $Data::Dumper::Maxdepth = 2; - Data::Dumper::Concise::Dumper ({ - map { $cols->[$_] => - $data->[$slice_idx][$_] - } (0 .. $#$cols) - }), - } - ); - }; for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1 my $val = $data->[$row_idx][$col_idx]; - if ($is_literal) { + if (! exists $serialized_bind_type_by_col_idx->{$col_idx}) { # literal no binds if (ref $val ne 'SCALAR') { - $bad_slice->( + $bad_slice_report_cref->( "Incorrect value (expecting SCALAR-ref \\'$$reference_val')", - $row_idx + $row_idx, + $col_idx, ); } elsif ($$val ne $$reference_val) { - $bad_slice->( + $bad_slice_report_cref->( "Inconsistent literal SQL value (expecting \\'$$reference_val')", - $row_idx + $row_idx, + $col_idx, ); } } - elsif ($is_literal_bind) { + elsif (! defined $serialized_bind_type_by_col_idx->{$col_idx} ) { # regular non-literal value + if (is_literal_value($val)) { + $bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx); + } + } + else { # binds from a \[], compare type and attrs if (ref $val ne 'REF' or ref $$val ne 'ARRAY') { - $bad_slice->( + $bad_slice_report_cref->( "Incorrect value (expecting ARRAYREF-ref \\['${$reference_val}->[0]', ... ])", - $row_idx + $row_idx, + $col_idx, ); } - elsif (${$val}->[0] ne ${$reference_val}->[0]) { - $bad_slice->( - "Inconsistent literal SQL-bind value (expecting \\['${$reference_val}->[0]', ... ])", - $row_idx - ); - } - } - elsif (ref $val) { - if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) { - $bad_slice->("Literal SQL found where a plain bind value is expected", $row_idx); - } - else { - $bad_slice->("$val reference found where bind expected", $row_idx); + # start drilling down and bail out early on identical refs + elsif ( + $reference_val != $val + or + $$reference_val != $$val + ) { + if (${$val}->[0] ne ${$reference_val}->[0]) { + $bad_slice_report_cref->( + "Inconsistent literal/bind SQL (expecting \\['${$reference_val}->[0]', ... ])", + $row_idx, + $col_idx, + ); + } + # need to check the bind attrs - a bind will happen only once for + # the entire dataset, so any changes further down will be ignored. + elsif ( + $serialized_bind_type_by_col_idx->{$col_idx} + ne + serialize [ + map + { $_->[0] } + @{$self->_resolve_bindattrs( + $source, [ @{$$val}[1 .. $#$$val] ], $colinfos, + )} + ] + ) { + $bad_slice_report_cref->( + 'Differing bind attributes on literal/bind values not supported', + $row_idx, + $col_idx, + ); + } } } } } - # Get the sql with bind values interpolated where necessary. For dynamic - # binds convert the values of the first row into a literal+bind combo, with - # extra positional info in the bind attr hashref. This will allow us to match - # the order properly, and is so contrived because a user-supplied literal - # bind (or something else specific to a resultsource and/or storage driver) - # can inject extra binds along the way, so one can't rely on "shift - # positions" ordering at all. Also we can't just hand SQLA a set of some - # known "values" (e.g. hashrefs that can be later matched up by address), - # because we want to supply a real value on which perhaps e.g. datatype - # checks will be performed - my ($sql, $proto_bind) = $self->_prep_for_execute ( - 'insert', - $source, - [ { map { $cols->[$_] => $dyn_bind_idx->{$_} - ? \[ '?', [ - { dbic_colname => $cols->[$_], _bind_data_slice_idx => $_ } - => - $data->[0][$_] - ] ] - : $data->[0][$_] - } (0..$#$cols) } ], - ); - - if (! @$proto_bind and keys %$dyn_bind_idx) { - # if the bindlist is empty and we had some dynamic binds, this means the - # storage ate them away (e.g. the NoBindVars component) and interpolated - # them directly into the SQL. This obviosly can't be good for multi-inserts - $self->throw_exception('Cannot insert_bulk without support for placeholders'); - } - - # neither _execute_array, nor _execute_inserts_with_no_binds are - # atomic (even if _execute _array is a single call). Thus a safety + # neither _dbh_execute_for_fetch, nor _dbh_execute_inserts_with_no_binds + # are atomic (even if execute_for_fetch is a single call). Thus a safety # scope guard my $guard = $self->txn_scope_guard; $self->_query_start( $sql, @$proto_bind ? [[undef => '__BULK_INSERT__' ]] : () ); - my $sth = $self->_sth($sql); + my $sth = $self->_prepare_sth($self->_dbh, $sql); my $rv = do { if (@$proto_bind) { # proto bind contains the information on which pieces of $data to pull # $cols is passed in only for prettier error-reporting - $self->_execute_array( $source, $sth, $proto_bind, $cols, $data ); + $self->_dbh_execute_for_fetch( $source, $sth, $proto_bind, $cols, $data ); } else { # bind_param_array doesn't work if there are no binds @@ -1875,40 +2237,73 @@ sub insert_bulk { $guard->commit; - return (wantarray ? ($rv, $sth, @$proto_bind) : $rv); + return wantarray ? ($rv, $sth, @$proto_bind) : $rv; } -sub _execute_array { - my ($self, $source, $sth, $proto_bind, $cols, $data, @extra) = @_; - - ## This must be an arrayref, else nothing works! - my $tuple_status = []; +# execute_for_fetch is capable of returning data just fine (it means it +# can be used for INSERT...RETURNING and UPDATE...RETURNING. Since this +# is the void-populate fast-path we will just ignore this altogether +# for the time being. +sub _dbh_execute_for_fetch { + my ($self, $source, $sth, $proto_bind, $cols, $data) = @_; + # If we have any bind attributes to take care of, we will bind the + # proto-bind data (which will never be used by execute_for_fetch) + # However since column bindtypes are "sticky", this is sufficient + # to get the DBD to apply the bindtype to all values later on my $bind_attrs = $self->_dbi_attrs_for_bind($source, $proto_bind); - # Bind the values by column slices for my $i (0 .. $#$proto_bind) { - my $data_slice_idx = ( - ref $proto_bind->[$i][0] eq 'HASH' - and - exists $proto_bind->[$i][0]{_bind_data_slice_idx} - ) ? $proto_bind->[$i][0]{_bind_data_slice_idx} : undef; - - $sth->bind_param_array( + $sth->bind_param ( $i+1, # DBI bind indexes are 1-based - defined $data_slice_idx - # either get a "column" of dynamic values, or just repeat the same - # bind over and over - ? [ map { $_->[$data_slice_idx] } @$data ] - : [ ($proto_bind->[$i][1]) x @$data ] - , - defined $bind_attrs->[$i] ? $bind_attrs->[$i] : (), # some DBDs throw up when given an undef - ); - } + $proto_bind->[$i][1], + $bind_attrs->[$i], + ) if defined $bind_attrs->[$i]; + } + + # At this point $data slots named in the _bind_data_slice_idx of + # each piece of $proto_bind are either \[]s or plain values to be + # passed in. Construct the dispensing coderef. *NOTE* the order + # of $data will differ from this of the ?s in the SQL (due to + # alphabetical ordering by colname). We actually do want to + # preserve this behavior so that prepare_cached has a better + # chance of matching on unrelated calls + + my $fetch_row_idx = -1; # saner loop this way + my $fetch_tuple = sub { + return undef if ++$fetch_row_idx > $#$data; + + return [ map { + my $v = ! defined $_->{_literal_bind_subindex} + + ? $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ] + + # There are no attributes to resolve here - we already did everything + # when we constructed proto_bind. However we still want to sanity-check + # what the user supplied, so pass stuff through to the resolver *anyway* + : $self->_resolve_bindattrs ( + undef, # a fake rsrc + [ ${ $data->[ $fetch_row_idx ]->[ $_->{_bind_data_slice_idx} ]}->[ $_->{_literal_bind_subindex} ] ], + {}, # a fake column_info bag + )->[0][1] + ; + + # FIXME SUBOPTIMAL - DBI needs fixing to always stringify regardless of DBD + # For the time being forcibly stringify whatever is stringifiable + (length ref $v and is_plain_value $v) + ? "$v" + : $v + ; + } map { $_->[0] } @$proto_bind ]; + }; + my $tuple_status = []; my ($rv, $err); try { - $rv = $self->_dbh_execute_array($sth, $tuple_status, @extra); + $rv = $sth->execute_for_fetch( + $fetch_tuple, + $tuple_status, + ); } catch { $err = shift; @@ -1938,7 +2333,7 @@ sub _execute_array { if ($i > $#$tuple_status); require Data::Dumper::Concise; - $self->throw_exception(sprintf "execute_array() aborted with '%s' at populate slice:\n%s", + $self->throw_exception(sprintf "execute_for_fetch() aborted with '%s' at populate slice:\n%s", ($tuple_status->[$i][1] || $err), Data::Dumper::Concise::Dumper( { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } ), ); @@ -1947,11 +2342,6 @@ sub _execute_array { return $rv; } -sub _dbh_execute_array { - #my ($self, $sth, $tuple_status, @extra) = @_; - return $_[1]->execute_array({ArrayTupleStatus => $_[2]}); -} - sub _dbh_execute_inserts_with_no_binds { my ($self, $sth, $count) = @_; @@ -1991,103 +2381,6 @@ sub delete { shift->_execute('delete', @_); } -# We were sent here because the $rs contains a complex search -# which will require a subquery to select the correct rows -# (i.e. joined or limited resultsets, or non-introspectable conditions) -# -# Generating a single PK column subquery is trivial and supported -# by all RDBMS. However if we have a multicolumn PK, things get ugly. -# Look at _multipk_update_delete() -sub _subq_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - - # quick check if we got a sane rs on our hands - my @pcols = $rsrc->_pri_cols; - - my $sel = $rs->_resolved_attrs->{select}; - $sel = [ $sel ] unless ref $sel eq 'ARRAY'; - - if ( - join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols) - ne - join ("\x00", sort @$sel ) - ) { - $self->throw_exception ( - '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys' - ); - } - - if (@pcols == 1) { - return $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - { $pcols[0] => { -in => $rs->as_query } }, - ); - } - - else { - return $self->_multipk_update_delete (@_); - } -} - -# ANSI SQL does not provide a reliable way to perform a multicol-PK -# resultset update/delete involving subqueries. So by default resort -# to simple (and inefficient) delete_all style per-row opearations, -# while allowing specific storages to override this with a faster -# implementation. -# -sub _multipk_update_delete { - return shift->_per_row_update_delete (@_); -} - -# This is the default loop used to delete/update rows for multi PK -# resultsets, and used by mysql exclusively (because it can't do anything -# else). -# -# We do not use $row->$op style queries, because resultset update/delete -# is not expected to cascade (this is what delete_all/update_all is for). -# -# There should be no race conditions as the entire operation is rolled -# in a transaction. -# -sub _per_row_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - my @pcols = $rsrc->_pri_cols; - - my $guard = $self->txn_scope_guard; - - # emulate the return value of $sth->execute for non-selects - my $row_cnt = '0E0'; - - my $subrs_cur = $rs->cursor; - my @all_pk = $subrs_cur->all; - for my $pks ( @all_pk) { - - my $cond; - for my $i (0.. $#pcols) { - $cond->{$pcols[$i]} = $pks->[$i]; - } - - $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - $cond, - ); - - $row_cnt++; - } - - $guard->commit; - - return $row_cnt; -} - sub _select { my $self = shift; $self->_execute($self->_select_args(@_)); @@ -2096,36 +2389,44 @@ sub _select { sub _select_args_to_query { my $self = shift; + $self->throw_exception( + "Unable to generate limited query representation with 'software_limit' enabled" + ) if ($_[3]->{software_limit} and ($_[3]->{offset} or $_[3]->{rows}) ); + # my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); my ($op, $ident, @args) = $self->_select_args(@_); # my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); - my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, \@args); - $prepared_bind ||= []; + my ($sql, $bind) = $self->_gen_sql_bind($op, $ident, \@args); - return wantarray - ? ($sql, $prepared_bind) - : \[ "($sql)", @$prepared_bind ] - ; + # reuse the bind arrayref + unshift @{$bind}, "($sql)"; + \$bind; } sub _select_args { - my ($self, $ident, $select, $where, $attrs) = @_; + my ($self, $ident, $select, $where, $orig_attrs) = @_; + + # FIXME - that kind of caching would be nice to have + # however currently we *may* pass the same $orig_attrs + # with different ident/select/where + # the whole interface needs to be rethought, since it + # was centered around the flawed SQLA API. We can do + # soooooo much better now. But that is also another + # battle... + #return ( + # 'select', $orig_attrs->{!args_as_stored_at_the_end_of_this_method!} + #) if $orig_attrs->{!args_as_stored_at_the_end_of_this_method!}; my $sql_maker = $self->sql_maker; - my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident); - $attrs = { - %$attrs, + my $attrs = { + %$orig_attrs, select => $select, from => $ident, where => $where, - $rs_alias && $alias2source->{$rs_alias} - ? ( _rsroot_rsrc => $alias2source->{$rs_alias} ) - : () - , }; # Sanity check the attributes (SQLMaker does it too, but @@ -2144,46 +2445,90 @@ sub _select_args { $attrs->{rows} = $sql_maker->__max_int; } - my @limit; - - # see if we need to tear the prefetch apart otherwise delegate the limiting to the - # storage, unless software limit was requested - if ( - #limited has_many - ( $attrs->{rows} && keys %{$attrs->{collapse}} ) - || - # grouped prefetch (to satisfy group_by == select) - ( $attrs->{group_by} - && - @{$attrs->{group_by}} - && - $attrs->{_prefetch_selector_range} - ) + # see if we will need to tear the prefetch apart to satisfy group_by == select + # this is *extremely tricky* to get right, I am still not sure I did + # + my ($prefetch_needs_subquery, @limit_args); + + if ( $attrs->{_grouped_by_distinct} and $attrs->{collapse} ) { + # we already know there is a valid group_by (we made it) and we know it is + # intended to be based *only* on non-multi stuff + # short circuit the group_by parsing below + $prefetch_needs_subquery = 1; + } + elsif ( + # The rationale is that even if we do *not* have collapse, we still + # need to wrap the core grouped select/group_by in a subquery + # so that databases that care about group_by/select equivalence + # are happy (this includes MySQL in strict_mode) + # If any of the other joined tables are referenced in the group_by + # however - the user is on their own + ( $prefetch_needs_subquery or ! $attrs->{_simple_passthrough_construction} ) + and + $attrs->{group_by} + and + @{$attrs->{group_by}} + and + my $grp_aliases = try { # try{} because $attrs->{from} may be unreadable + $self->_resolve_aliastypes_from_select_args({ from => $attrs->{from}, group_by => $attrs->{group_by} }) + } ) { - ($ident, $select, $where, $attrs) - = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); + # no aliases other than our own in group_by + # if there are - do not allow subquery even if limit is present + $prefetch_needs_subquery = ! scalar grep { $_ ne $attrs->{alias} } keys %{ $grp_aliases->{grouping} || {} }; + } + elsif ( $attrs->{rows} && $attrs->{collapse} ) { + # active collapse with a limit - that one is a no-brainer unless + # overruled by a group_by above + $prefetch_needs_subquery = 1; + } + + if ($prefetch_needs_subquery) { + $attrs = $self->_adjust_select_args_for_complex_prefetch ($attrs); } elsif (! $attrs->{software_limit} ) { - push @limit, ( + push @limit_args, ( $attrs->{rows} || (), $attrs->{offset} || (), ); } # try to simplify the joinmap further (prune unreferenced type-single joins) - $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs); + if ( + ! $prefetch_needs_subquery # already pruned + and + ref $attrs->{from} + and + reftype $attrs->{from} eq 'ARRAY' + and + @{$attrs->{from}} != 1 + ) { + ($attrs->{from}, $attrs->{_aliastypes}) = $self->_prune_unused_joins ($attrs); + } + + # FIXME this is a gross, inefficient, largely incorrect and fragile hack + # during the result inflation stage we *need* to know what was the aliastype + # map as sqla saw it when the final pieces of SQL were being assembled + # Originally we simply carried around the entirety of $attrs, but this + # resulted in resultsets that are being reused growing continuously, as + # the hash in question grew deeper and deeper. + # Instead hand-pick what to take with us here (we actually don't need much + # at this point just the map itself) + $orig_attrs->{_last_sqlmaker_alias_map} = $attrs->{_aliastypes}; ### - # This would be the point to deflate anything found in $where + # my $alias2source = $self->_resolve_ident_sources ($ident); + # + # This would be the point to deflate anything found in $attrs->{where} # (and leave $attrs->{bind} intact). Problem is - inflators historically - # expect a row object. And all we have is a resultsource (it is trivial + # expect a result object. And all we have is a resultsource (it is trivial # to extract deflator coderefs via $alias2source above). # # I don't see a way forward other than changing the way deflators are # invoked, and that's just bad... ### - return ('select', $ident, $select, $where, $attrs, @limit); + return ( 'select', @{$attrs}{qw(from select where)}, $attrs, @limit_args ); } # Returns a counting SELECT for a simple count @@ -2195,15 +2540,6 @@ sub _count_select { return { count => '*' }; } -sub source_bind_attributes { - shift->throw_exception( - 'source_bind_attributes() was never meant to be a callable public method - ' - .'please contact the DBIC dev-team and describe your use case so that a reasonable ' - .'solution can be provided' - ."\nhttp://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT" - ); -} - =head2 select =over 4 @@ -2242,60 +2578,14 @@ storage driver. Can be overridden by supplying an explicit L to L. For a list of available limit dialects see L. -=head2 sth - -=over 4 - -=item Arguments: $sql - -=back - -Returns a L sth (statement handle) for the supplied SQL. - =cut -sub _dbh_sth { - my ($self, $dbh, $sql) = @_; - - # 3 is the if_active parameter which avoids active sth re-use - my $sth = $self->disable_sth_caching - ? $dbh->prepare($sql) - : $dbh->prepare_cached($sql, {}, 3); - - # XXX You would think RaiseError would make this impossible, - # but apparently that's not true :( - $self->throw_exception( - $dbh->errstr - || - sprintf( "\$dbh->prepare() of '%s' through %s failed *silently* without " - .'an exception and/or setting $dbh->errstr', - length ($sql) > 20 - ? substr($sql, 0, 20) . '...' - : $sql - , - 'DBD::' . $dbh->{Driver}{Name}, - ) - ) if !$sth; - - $sth; -} - -sub sth { - carp_unique 'sth was mistakenly marked/documented as public, stop calling it (will be removed before DBIC v0.09)'; - shift->_sth(@_); -} - -sub _sth { - my ($self, $sql) = @_; - $self->dbh_do('_dbh_sth', $sql); # retry over disconnects -} - sub _dbh_columns_info_for { my ($self, $dbh, $table) = @_; - if ($dbh->can('column_info')) { - my %result; - my $caught; + my %result; + + if (! DBIx::Class::_ENV_::STRESSTEST_COLUMN_INFO_UNAWARE_STORAGE and $dbh->can('column_info')) { try { my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table); my $sth = $dbh->column_info( undef,$schema, $tab, '%' ); @@ -2312,39 +2602,75 @@ sub _dbh_columns_info_for { $result{$col_name} = \%column_info; } } catch { - $caught = 1; + %result = (); }; - return \%result if !$caught && scalar keys %result; + + return \%result if keys %result; } - my %result; my $sth = $dbh->prepare($self->sql_maker->select($table, undef, \'1 = 0')); $sth->execute; - my @columns = @{$sth->{NAME_lc}}; - for my $i ( 0 .. $#columns ){ - my %column_info; - $column_info{data_type} = $sth->{TYPE}->[$i]; - $column_info{size} = $sth->{PRECISION}->[$i]; - $column_info{is_nullable} = $sth->{NULLABLE}->[$i] ? 1 : 0; - - if ($column_info{data_type} =~ m/^(.*?)\((.*?)\)$/) { - $column_info{data_type} = $1; - $column_info{size} = $2; + +### The acrobatics with lc names is necessary to support both the legacy +### API that used NAME_lc exclusively, *AND* at the same time work properly +### with column names differing in cas eonly (thanks pg!) + + my ($columns, $seen_lcs); + + ++$seen_lcs->{lc($_)} and $columns->{$_} = { + idx => scalar keys %$columns, + name => $_, + lc_name => lc($_), + } for @{$sth->{NAME}}; + + $seen_lcs->{$_->{lc_name}} == 1 + and + $_->{name} = $_->{lc_name} + for values %$columns; + + for ( values %$columns ) { + my $inf = { + data_type => $sth->{TYPE}->[$_->{idx}], + size => $sth->{PRECISION}->[$_->{idx}], + is_nullable => $sth->{NULLABLE}->[$_->{idx}] ? 1 : 0, + }; + + if ($inf->{data_type} =~ m/^(.*?)\((.*?)\)$/) { + @{$inf}{qw( data_type size)} = ($1, $2); } - $result{$columns[$i]} = \%column_info; + $result{$_->{name}} = $inf; } + $sth->finish; - foreach my $col (keys %result) { - my $colinfo = $result{$col}; - my $type_num = $colinfo->{data_type}; - my $type_name; - if(defined $type_num && $dbh->can('type_info')) { - my $type_info = $dbh->type_info($type_num); - $type_name = $type_info->{TYPE_NAME} if $type_info; - $colinfo->{data_type} = $type_name if $type_name; + if ($dbh->can('type_info')) { + for my $inf (values %result) { + next if ! defined $inf->{data_type}; + + $inf->{data_type} = ( + ( + ( + $dbh->type_info( $inf->{data_type} ) + || + next + ) + || + next + )->{TYPE_NAME} + || + next + ); + + # FIXME - this may be an artifact of the DBD::Pg implmentation alone + # needs more testing in the future... + $inf->{size} -= 4 if ( + ( $inf->{size}||0 > 4 ) + and + $inf->{data_type} =~ qr/^text$/i + ); } + } return \%result; @@ -2460,7 +2786,10 @@ Given a datatype from column info, returns a database specific bind attribute for C<< $dbh->bind_param($val,$attribute) >> or nothing if we will let the database planner just handle it. -Generally only needed for special case column types, like bytea in postgres. +This method is always called after the driver has been determined and a DBI +connection has been established. Therefore you can refer to C +and/or C directly, without worrying about loading +the correct modules. =cut @@ -2493,7 +2822,7 @@ sub is_datatype_numeric { =over 4 -=item Arguments: $schema \@databases, $version, $directory, $preversion, \%sqlt_args +=item Arguments: $schema, \@databases, $version, $directory, $preversion, \%sqlt_args =back @@ -2514,8 +2843,7 @@ $version in the name with "$preversion-$version". See L for a list of values for C<\%sqlt_args>. The most common value for this would be C<< { add_drop_table => 1 } >> to have the SQL produced include a C statement for each table -created. For quoting purposes supply C and -C. +created. For quoting purposes supply C. If no arguments are passed, then the following default values are assumed: @@ -2555,7 +2883,7 @@ sub create_ddl_dir { } else { -d $dir or - (require File::Path and File::Path::make_path ("$dir")) # make_path does not like objects (i.e. Path::Class::Dir) + (require File::Path and File::Path::mkpath (["$dir"])) # mkpath does not like objects (i.e. Path::Class::Dir) or $self->throw_exception( "Failed to create '$dir': " . ($! || $@ || 'error unknown') @@ -2574,11 +2902,12 @@ sub create_ddl_dir { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1, + quote_identifiers => $self->sql_maker->_quoting_enabled, %{$sqltargs || {}} }; - unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) { - $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ); + if (my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')) { + $self->throw_exception("Can't create a ddl file without $missing"); } my $sqlt = SQL::Translator->new( $sqltargs ); @@ -2668,10 +2997,21 @@ sub create_ddl_dir { unless $dest_schema->name; } - my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db, - $dest_schema, $db, - $sqltargs - ); + my $diff = do { + # FIXME - this is a terrible workaround for + # https://github.com/dbsrgits/sql-translator/commit/2d23c1e + # Fixing it in this sloppy manner so that we don't hve to + # lockstep an SQLT release as well. Needs to be removed at + # some point, and SQLT dep bumped + local $SQL::Translator::Producer::SQLite::NO_QUOTES + if $SQL::Translator::Producer::SQLite::NO_QUOTES; + + SQL::Translator::Diff::schema_diff($source_schema, $db, + $dest_schema, $db, + $sqltargs + ); + }; + if(!open $file, ">$difffile") { $self->throw_exception("Can't write to $difffile ($!)"); next; @@ -2689,7 +3029,8 @@ sub create_ddl_dir { =back -Returns the statements used by L and L. +Returns the statements used by L +and L. The L (not L) database driver name can be explicitly provided in C<$type>, otherwise the result of L is used as default. @@ -2722,15 +3063,18 @@ sub deployment_statements { return join('', @rows); } - unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) { - $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ); + if (my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ) { + $self->throw_exception("Can't deploy without a pregenerated 'ddl_dir' directory or $missing"); } - # sources needs to be a parser arg, but for simplicty allow at top level + # sources needs to be a parser arg, but for simplicity allow at top level # coming in $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources} if exists $sqltargs->{sources}; + $sqltargs->{quote_identifiers} = $self->sql_maker->_quoting_enabled + unless exists $sqltargs->{quote_identifiers}; + my $tr = SQL::Translator->new( producer => "SQL::Translator::Producer::${type}", %$sqltargs, @@ -2738,18 +3082,12 @@ sub deployment_statements { data => $schema, ); - my @ret; - if (wantarray) { - @ret = $tr->translate; - } - else { - $ret[0] = $tr->translate; - } - - $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error) - unless (@ret && defined $ret[0]); - - return wantarray ? @ret : $ret[0]; + return preserve_context { + $tr->translate + } after => sub { + $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error) + unless defined $_[0]; + }; } # FIXME deploy() currently does not accurately report sql errors @@ -2849,6 +3187,8 @@ sub lag_behind_master { =item Arguments: $relname, $join_count +=item Return Value: $alias + =back L uses L names as table aliases in @@ -2940,6 +3280,13 @@ sub _is_text_lob_type { |national\s*character\s*varying))\z/xi); } +# Determine if a data_type is some type of a binary type +sub _is_binary_type { + my ($self, $data_type) = @_; + $data_type && ($self->_is_binary_lob_type($data_type) + || $data_type =~ /(?:var)?(?:binary|bit|graphic)(?:\s*varying)?/i); +} + 1; =head1 USAGE NOTES @@ -2960,15 +3307,13 @@ transactions. You're on your own for handling all sorts of exceptional cases if you choose the C<< AutoCommit => 0 >> path, just as you would be with raw DBI. +=head1 FURTHER QUESTIONS? -=head1 AUTHORS - -Matt S. Trout - -Andy Grundman +Check the list of L. -=head1 LICENSE +=head1 COPYRIGHT AND LICENSE -You may distribute this code under the same terms as Perl itself. - -=cut +This module is free software L +by the L. You can +redistribute it and/or modify it under the same terms as the +L.